diff --git a/.changelog/unreleased/SDK/2729-ibc-token-query.md b/.changelog/unreleased/SDK/2729-ibc-token-query.md new file mode 100644 index 0000000000..fa0edf93c6 --- /dev/null +++ b/.changelog/unreleased/SDK/2729-ibc-token-query.md @@ -0,0 +1,2 @@ +- move query_ibc_tokens and lookup_ibc_token_alias to sdk + ([\#2729](https://github.com/anoma/namada/issues/2729)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2667-fix-last-update-usage.md b/.changelog/unreleased/bug-fixes/2667-fix-last-update-usage.md new file mode 100644 index 0000000000..2f769a5358 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2667-fix-last-update-usage.md @@ -0,0 +1,2 @@ +- Fix the setting of the last update field in an Epoched data structure. + ([\#2667](https://github.com/anoma/namada/pull/2667)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2685-replace-hash-data-structs.md b/.changelog/unreleased/bug-fixes/2685-replace-hash-data-structs.md new file mode 100644 index 0000000000..e62f215e3b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2685-replace-hash-data-structs.md @@ -0,0 +1,3 @@ +- Use `indexmap` maps and sets in favor of `std` collections, to avoid + iteration order related bugs in the state machine code of Namada. + ([\#2685](https://github.com/anoma/namada/pull/2685)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2735-fix-vps-api.md b/.changelog/unreleased/bug-fixes/2735-fix-vps-api.md new file mode 100644 index 0000000000..4385f92473 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2735-fix-vps-api.md @@ -0,0 +1,2 @@ +- Fixed the `StorageRead` implementation and vp host functions to ignore + temporary writes. ([\#2735](https://github.com/anoma/namada/pull/2735)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2809-ibc-token-query.md b/.changelog/unreleased/bug-fixes/2809-ibc-token-query.md new file mode 100644 index 0000000000..c5ba2a6a4f --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2809-ibc-token-query.md @@ -0,0 +1,2 @@ +- Fix the balance query not to show ibc tokens as default + ([\#2809](https://github.com/anoma/namada/issues/2809)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2819-allowlist-fix.md b/.changelog/unreleased/bug-fixes/2819-allowlist-fix.md new file mode 100644 index 0000000000..81bfabeb83 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2819-allowlist-fix.md @@ -0,0 +1,2 @@ +- Adjusts the tx allowlist check to not prevent fee payment. + ([\#2819](https://github.com/anoma/namada/pull/2819)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2848-ibc-tx-simplify.md b/.changelog/unreleased/bug-fixes/2848-ibc-tx-simplify.md new file mode 100644 index 0000000000..9190f91040 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2848-ibc-tx-simplify.md @@ -0,0 +1,3 @@ +- Reduce the gas cost of prefix iterator in IBC transactions + to match the cost of prefix iterator elsewhere. + ([\#2848](https://github.com/anoma/namada/pull/2848)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2877-fix-val-voting-no-self-bonds.md b/.changelog/unreleased/bug-fixes/2877-fix-val-voting-no-self-bonds.md new file mode 100644 index 0000000000..2456aa6992 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2877-fix-val-voting-no-self-bonds.md @@ -0,0 +1,3 @@ +- Fix client bug that now ensures that a validator with + delegations but no self-bonds can vote in governance. + ([\#2877](https://github.com/anoma/namada/pull/2877)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2928-fst-addr-vps.md b/.changelog/unreleased/bug-fixes/2928-fst-addr-vps.md new file mode 100644 index 0000000000..93a750e4fc --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2928-fst-addr-vps.md @@ -0,0 +1,3 @@ +- Only use addresses from first storage key segments to + determine which VPs should be triggered by storage changes. + ([\#2928](https://github.com/anoma/namada/pull/2928)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/2964-fix-non-persisted-diffs.md b/.changelog/unreleased/bug-fixes/2964-fix-non-persisted-diffs.md new file mode 100644 index 0000000000..a8bb541d41 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2964-fix-non-persisted-diffs.md @@ -0,0 +1,4 @@ +- Replaced DB key-val diffs pruning of non-persisted keys that searched for the + last diffs and was degrading throughput with a separate DB column family that + is pruned on every block. + ([\#2964](https://github.com/anoma/namada/pull/2964)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2316-ibc-nft.md b/.changelog/unreleased/features/2316-ibc-nft.md new file mode 100644 index 0000000000..57133d02b4 --- /dev/null +++ b/.changelog/unreleased/features/2316-ibc-nft.md @@ -0,0 +1,2 @@ +- Support NFT transfer over IBC + ([\#2316](https://github.com/anoma/namada/issues/2316)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2552-ibc-rate-limit.md b/.changelog/unreleased/features/2552-ibc-rate-limit.md new file mode 100644 index 0000000000..b3d618f735 --- /dev/null +++ b/.changelog/unreleased/features/2552-ibc-rate-limit.md @@ -0,0 +1,2 @@ +- IBC rate limit for deposits into and withdrawals out of Namada + ([\#2552](https://github.com/anoma/namada/issues/2552)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2729-ibc-token-query.md b/.changelog/unreleased/features/2729-ibc-token-query.md new file mode 100644 index 0000000000..cfb83bd07b --- /dev/null +++ b/.changelog/unreleased/features/2729-ibc-token-query.md @@ -0,0 +1,2 @@ +- Add ibc-token command to get a list of IBC tokens + ([\#2729](https://github.com/anoma/namada/issues/2729)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2803-remove-offline-governance.md b/.changelog/unreleased/features/2803-remove-offline-governance.md new file mode 100644 index 0000000000..f49fef71ec --- /dev/null +++ b/.changelog/unreleased/features/2803-remove-offline-governance.md @@ -0,0 +1,2 @@ +- Removes offline governance as a proposal option. + ([\#2803](https://github.com/anoma/namada/pull/2803)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2842-nam-transferable.md b/.changelog/unreleased/features/2842-nam-transferable.md new file mode 100644 index 0000000000..4f3561b107 --- /dev/null +++ b/.changelog/unreleased/features/2842-nam-transferable.md @@ -0,0 +1,2 @@ +- Add a parameter to enable/disable native token transfers + ([\#2842](https://github.com/anoma/namada/issues/2842)) \ No newline at end of file diff --git a/.changelog/unreleased/features/2931-generate-tx-schema.md b/.changelog/unreleased/features/2931-generate-tx-schema.md new file mode 100644 index 0000000000..a298b9b492 --- /dev/null +++ b/.changelog/unreleased/features/2931-generate-tx-schema.md @@ -0,0 +1,2 @@ +- Added a utility to generate Borsh schemas for external collaborators. + ([\#2931](https://github.com/anoma/namada/pull/2931)) \ No newline at end of file diff --git a/.changelog/unreleased/features/3004-jailed-inactive-vals-no-voting.md b/.changelog/unreleased/features/3004-jailed-inactive-vals-no-voting.md new file mode 100644 index 0000000000..ef31f88356 --- /dev/null +++ b/.changelog/unreleased/features/3004-jailed-inactive-vals-no-voting.md @@ -0,0 +1,2 @@ +- Prohibit jailed or inactive validators from voting in governance. + ([\#3004](https://github.com/anoma/namada/pull/3004)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2449-ibc-hermes-shielded.md b/.changelog/unreleased/improvements/2449-ibc-hermes-shielded.md new file mode 100644 index 0000000000..ad54573257 --- /dev/null +++ b/.changelog/unreleased/improvements/2449-ibc-hermes-shielded.md @@ -0,0 +1,2 @@ +- IBC shielded transfer with Hermes support + ([\#2449](https://github.com/anoma/namada/issues/2449)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2559-remove-pos-inflation-from-params.md b/.changelog/unreleased/improvements/2559-remove-pos-inflation-from-params.md new file mode 100644 index 0000000000..80cf23d175 --- /dev/null +++ b/.changelog/unreleased/improvements/2559-remove-pos-inflation-from-params.md @@ -0,0 +1,3 @@ +- Remove last staked ratio and pos inflation amount from + Parameters and initialize them in pos genesis initialization. + ([\#2559](https://github.com/anoma/namada/pull/2559)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2574-imrpove-slash-queue.md b/.changelog/unreleased/improvements/2574-imrpove-slash-queue.md new file mode 100644 index 0000000000..ac35c5d5a9 --- /dev/null +++ b/.changelog/unreleased/improvements/2574-imrpove-slash-queue.md @@ -0,0 +1,2 @@ +- Only process 1 slash per validator per block height. + ([\#2574](https://github.com/anoma/namada/pull/2574)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2575-refactor-pd-controller.md b/.changelog/unreleased/improvements/2575-refactor-pd-controller.md new file mode 100644 index 0000000000..4ea62bfef1 --- /dev/null +++ b/.changelog/unreleased/improvements/2575-refactor-pd-controller.md @@ -0,0 +1,3 @@ +- Refactor inflation with a standalone PD controller module. Then the + PoS and shielded inflation modules are wrappers around this controller. + ([\#2575](https://github.com/anoma/namada/pull/2575)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2627-remove-tx-queue.md b/.changelog/unreleased/improvements/2627-remove-tx-queue.md new file mode 100644 index 0000000000..989d7824e8 --- /dev/null +++ b/.changelog/unreleased/improvements/2627-remove-tx-queue.md @@ -0,0 +1,3 @@ +- Instead of having every user tx be executed across two blocks, the first executing a wrapper and the + second executing the main payload, this change makes it so that the entire tx is executed in a single + block (or rejected). ([\#2627](https://github.com/anoma/namada/pull/2627)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2630-refactor-proposal-loading.md b/.changelog/unreleased/improvements/2630-refactor-proposal-loading.md new file mode 100644 index 0000000000..af68020e2c --- /dev/null +++ b/.changelog/unreleased/improvements/2630-refactor-proposal-loading.md @@ -0,0 +1,3 @@ +- Only load governance proposals on a new epoch right before execution. + Decoupled the logic from the Shell and implemented in the gov crate. + ([\#2630](https://github.com/anoma/namada/pull/2630)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2781-addr-generator-randomness.md b/.changelog/unreleased/improvements/2781-addr-generator-randomness.md new file mode 100644 index 0000000000..43c884f8ab --- /dev/null +++ b/.changelog/unreleased/improvements/2781-addr-generator-randomness.md @@ -0,0 +1,3 @@ +- Adds a transaction's code and data section hashes as additional + sources of entropy, to compute an established account's address. + ([\#2781](https://github.com/anoma/namada/pull/2781)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2787-type-safe-events.md b/.changelog/unreleased/improvements/2787-type-safe-events.md new file mode 100644 index 0000000000..7bed5218cc --- /dev/null +++ b/.changelog/unreleased/improvements/2787-type-safe-events.md @@ -0,0 +1,3 @@ +- Refactor the events code in Namada. Now, we emit events + with type safe wrappers around the events' attributes. + ([\#2787](https://github.com/anoma/namada/pull/2787)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2795-improve-wasm-build.md b/.changelog/unreleased/improvements/2795-improve-wasm-build.md new file mode 100644 index 0000000000..ab843164a1 --- /dev/null +++ b/.changelog/unreleased/improvements/2795-improve-wasm-build.md @@ -0,0 +1,2 @@ +- Split up WASM transaction and validity-predicates into individual crates to + improve build time. ([\#2795](https://github.com/anoma/namada/pull/2795)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2813-read-from-buf.md b/.changelog/unreleased/improvements/2813-read-from-buf.md new file mode 100644 index 0000000000..12591606ae --- /dev/null +++ b/.changelog/unreleased/improvements/2813-read-from-buf.md @@ -0,0 +1,2 @@ +- Avoid reconstructing wasm result buffer with unsafe code. + ([\#2813](https://github.com/anoma/namada/pull/2813)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2838-runtime-gas-meter.md b/.changelog/unreleased/improvements/2838-runtime-gas-meter.md new file mode 100644 index 0000000000..204fb01605 --- /dev/null +++ b/.changelog/unreleased/improvements/2838-runtime-gas-meter.md @@ -0,0 +1,2 @@ +- Improved the gas metering system to track gas at runtime in wasm. + ([\#2838](https://github.com/anoma/namada/pull/2838)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2845-limit-metadata-size.md b/.changelog/unreleased/improvements/2845-limit-metadata-size.md new file mode 100644 index 0000000000..ff436b85ab --- /dev/null +++ b/.changelog/unreleased/improvements/2845-limit-metadata-size.md @@ -0,0 +1,2 @@ +- Limit the character length of the validator metadata strings. + ([\#2845](https://github.com/anoma/namada/pull/2845)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2849-lint-datetime.md b/.changelog/unreleased/improvements/2849-lint-datetime.md new file mode 100644 index 0000000000..e5fb5e1bec --- /dev/null +++ b/.changelog/unreleased/improvements/2849-lint-datetime.md @@ -0,0 +1,2 @@ +- Use clippy to disallow usage of fns to get current date/time unless explicitly + allowed. ([\#2849](https://github.com/anoma/namada/pull/2849)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2852-vps-use-storage-err.md b/.changelog/unreleased/improvements/2852-vps-use-storage-err.md new file mode 100644 index 0000000000..2304ac0f52 --- /dev/null +++ b/.changelog/unreleased/improvements/2852-vps-use-storage-err.md @@ -0,0 +1,2 @@ +- Replace `eyre!()` errors with `namada_storage` errors. + ([\#2852](https://github.com/anoma/namada/pull/2852)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2926-commit-only-merkle-tree.md b/.changelog/unreleased/improvements/2926-commit-only-merkle-tree.md new file mode 100644 index 0000000000..2b04195e07 --- /dev/null +++ b/.changelog/unreleased/improvements/2926-commit-only-merkle-tree.md @@ -0,0 +1,2 @@ +- Commit gas costs of applied transactions to new commit-only merkle tree store. + ([\#2926](https://github.com/anoma/namada/pull/2926)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2932-fix-ledger-significant-digits.md b/.changelog/unreleased/improvements/2932-fix-ledger-significant-digits.md new file mode 100644 index 0000000000..4b5b3c73ba --- /dev/null +++ b/.changelog/unreleased/improvements/2932-fix-ledger-significant-digits.md @@ -0,0 +1,2 @@ +- Remove unnecessary decimal digits in Ledger test vectors. + ([\#2932](https://github.com/anoma/namada/pull/2932)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2933-migrated-e2e-to-int.md b/.changelog/unreleased/improvements/2933-migrated-e2e-to-int.md new file mode 100644 index 0000000000..c94c24861b --- /dev/null +++ b/.changelog/unreleased/improvements/2933-migrated-e2e-to-int.md @@ -0,0 +1,4 @@ + - Closes [\#2661](https://github.com/anoma/namada/issues/2661). Closes [\#2927](https://github.com/anoma/namada/issues/2927). + This PR moves many e2e tests over to integration test. In the future, it may be possible to move more + tests over. Moving some of these tests over revealed issues and these have also been resolved, + including \#2927. ([\#2933](https://github.com/anoma/namada/pull/2933)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2934-tx-actions.md b/.changelog/unreleased/improvements/2934-tx-actions.md new file mode 100644 index 0000000000..27d9b46aa9 --- /dev/null +++ b/.changelog/unreleased/improvements/2934-tx-actions.md @@ -0,0 +1,4 @@ +- Some transactions now use temporary storage (only kept for the duration of + the tx execution and VPs validation) to indicate what actions were applied to + validity predicates that use the information to decide who has to authorize + the transaction. ([\#2934](https://github.com/anoma/namada/pull/2934)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2940-wasm-errs-ux.md b/.changelog/unreleased/improvements/2940-wasm-errs-ux.md new file mode 100644 index 0000000000..88d59c9293 --- /dev/null +++ b/.changelog/unreleased/improvements/2940-wasm-errs-ux.md @@ -0,0 +1,4 @@ +- Change the return type of a VP's predicate function to a Result of unit or + some error. In case Namada users perform invalid state changes, they should + be met with more descriptive error messages explaining the cause of their tx's + rejection. ([\#2940](https://github.com/anoma/namada/pull/2940)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2958-tx-errs.md b/.changelog/unreleased/improvements/2958-tx-errs.md new file mode 100644 index 0000000000..49298b8012 --- /dev/null +++ b/.changelog/unreleased/improvements/2958-tx-errs.md @@ -0,0 +1,2 @@ +- Return error messages from tx execution, instead of aborting execution with no + context. ([\#2958](https://github.com/anoma/namada/pull/2958)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/2982-gas-minor-adjustments.md b/.changelog/unreleased/improvements/2982-gas-minor-adjustments.md new file mode 100644 index 0000000000..ef852f7f78 --- /dev/null +++ b/.changelog/unreleased/improvements/2982-gas-minor-adjustments.md @@ -0,0 +1,2 @@ +- Simplified gas metering for code compilation and validation. + ([\#2982](https://github.com/anoma/namada/pull/2982)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/3002-remove-from-total-supply.md b/.changelog/unreleased/improvements/3002-remove-from-total-supply.md new file mode 100644 index 0000000000..fa121f31dc --- /dev/null +++ b/.changelog/unreleased/improvements/3002-remove-from-total-supply.md @@ -0,0 +1,3 @@ +- For inflation computations and the relevant RPC, don't + include the PGF balance in the total native supply + ([\#3002](https://github.com/anoma/namada/pull/3002)) \ No newline at end of file diff --git a/.changelog/unreleased/miscellaneous/2602-comptime-eth-bridge.md b/.changelog/unreleased/miscellaneous/2602-comptime-eth-bridge.md new file mode 100644 index 0000000000..8fcb938775 --- /dev/null +++ b/.changelog/unreleased/miscellaneous/2602-comptime-eth-bridge.md @@ -0,0 +1,2 @@ +- Disable Ethereum Bridge functionality at compile time. + ([\#2602](https://github.com/anoma/namada/pull/2602)) \ No newline at end of file diff --git a/.changelog/unreleased/sdk/3037-sdk-change-consensus-keys.md b/.changelog/unreleased/sdk/3037-sdk-change-consensus-keys.md new file mode 100644 index 0000000000..49740212d2 --- /dev/null +++ b/.changelog/unreleased/sdk/3037-sdk-change-consensus-keys.md @@ -0,0 +1,2 @@ +- Add a new method to the sdk to change a validator consensus key. + ([\#3037](https://github.com/anoma/namada/pull/3037)) \ No newline at end of file diff --git a/.changelog/unreleased/sdk/3039-sdk-update-account.md b/.changelog/unreleased/sdk/3039-sdk-update-account.md new file mode 100644 index 0000000000..1dca12e059 --- /dev/null +++ b/.changelog/unreleased/sdk/3039-sdk-update-account.md @@ -0,0 +1,2 @@ +- Improve the function to update an enstablished address via the sdk. + ([\#3039](https://github.com/anoma/namada/pull/3039)) \ No newline at end of file diff --git a/.changelog/unreleased/testing/2914-fee-unit-tests.md b/.changelog/unreleased/testing/2914-fee-unit-tests.md new file mode 100644 index 0000000000..58dc5e454b --- /dev/null +++ b/.changelog/unreleased/testing/2914-fee-unit-tests.md @@ -0,0 +1,2 @@ +- Improved unit tests for fee payment. + ([\#2914](https://github.com/anoma/namada/pull/2914)) \ No newline at end of file diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index eada61b27a..cf89e5e2ed 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -200,7 +200,10 @@ jobs: nightly_version: [nightly-2024-02-10] mold_version: [2.4.0] make: - - name: ABCI + - name: Run unit tests without Ethereum bridge enabled + command: make test-unit-with-coverage + - name: Run unit tests with Ethereum bridge enabled + command: make test-unit-with-eth-bridge env: RUSTC_WRAPPER: sccache @@ -264,8 +267,8 @@ jobs: - uses: taiki-e/install-action@cargo-llvm-cov - name: Check crates build with default features run: make check-crates - - name: Run unit tests - run: make test-unit-with-coverage + - name: ${{ matrix.make.name }} + run: ${{ matrix.make.command }} env: NAMADA_MASP_PARAMS_DIR: /home/runner/.masp-params RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" diff --git a/.github/workflows/scripts/e2e.json b/.github/workflows/scripts/e2e.json index 8e5abce2ad..b27559b4ef 100644 --- a/.github/workflows/scripts/e2e.json +++ b/.github/workflows/scripts/e2e.json @@ -4,21 +4,14 @@ "e2e::ibc_tests::run_ledger_ibc_with_hermes": 130, "e2e::ibc_tests::pgf_over_ibc_with_hermes": 240, "e2e::ibc_tests::proposal_ibc_token_inflation": 600, + "e2e::ibc_tests::ibc_rate_limit": 500, "e2e::eth_bridge_tests::test_add_to_bridge_pool": 10, "e2e::ledger_tests::double_signing_gets_slashed": 12, - "e2e::ledger_tests::invalid_transactions": 13, "e2e::ledger_tests::ledger_many_txs_in_a_block": 55, - "e2e::ledger_tests::ledger_txs_and_queries": 30, - "e2e::ledger_tests::masp_txs_and_queries": 82, "e2e::ledger_tests::pos_bonds": 77, - "e2e::ledger_tests::implicit_account_reveal_pk": 30, "e2e::ledger_tests::pos_init_validator": 40, - "e2e::ledger_tests::proposal_offline": 21, "e2e::ledger_tests::rollback": 21, - "e2e::ledger_tests::pgf_governance_proposal": 320, - "e2e::ledger_tests::proposal_submission": 200, "e2e::ledger_tests::proposal_change_shielded_reward": 200, - "e2e::pgf_steward_change_commissions": 30, "e2e::ledger_tests::run_ledger": 5, "e2e::ledger_tests::run_ledger_load_state_and_reset": 23, "e2e::ledger_tests::test_namada_shuts_down_if_tendermint_dies": 2, @@ -27,10 +20,7 @@ "e2e::ledger_tests::test_epoch_sleep": 12, "e2e::ledger_tests::wrapper_disposable_signer": 28, "e2e::ledger_tests::deactivate_and_reactivate_validator": 67, - "e2e::ledger_tests::change_validator_metadata": 31, - "e2e::ledger_tests::pos_rewards": 44, "e2e::ledger_tests::test_invalid_validator_txs": 73, - "e2e::ledger_tests::test_bond_queries": 95, "e2e::ledger_tests::suspend_ledger": 30, "e2e::ledger_tests::stop_ledger_at_height": 18, "e2e::ledger_tests::change_consensus_key": 91, @@ -38,4 +28,4 @@ "e2e::wallet_tests::wallet_encrypted_key_cmds": 1, "e2e::wallet_tests::wallet_encrypted_key_cmds_env_var": 1, "e2e::wallet_tests::wallet_unencrypted_key_cmds": 1 -} +} \ No newline at end of file diff --git a/.github/workflows/scripts/hermes.txt b/.github/workflows/scripts/hermes.txt index d57a2d003e..fc44e0ce9b 100644 --- a/.github/workflows/scripts/hermes.txt +++ b/.github/workflows/scripts/hermes.txt @@ -1 +1 @@ -1.7.4-namada-beta7 \ No newline at end of file +1.7.4-namada-beta8-rc2 diff --git a/Cargo.lock b/Cargo.lock index c4a2302c93..fac7864f0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -339,7 +339,7 @@ dependencies = [ "bitflags 1.3.2", "bytes", "futures-util", - "http", + "http 0.2.11", "http-body", "hyper", "itoa", @@ -365,7 +365,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", + "http 0.2.11", "http-body", "mime", "rustversion", @@ -414,9 +414,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -445,7 +445,7 @@ dependencies = [ "bitvec", "blake2s_simd", "byteorder", - "crossbeam-channel 0.5.8", + "crossbeam-channel 0.5.12", "ff", "group", "lazy_static", @@ -481,10 +481,10 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.11.0", "lazy_static", "lazycell", "proc-macro2", @@ -532,9 +532,12 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +dependencies = [ + "serde 1.0.193", +] [[package]] name = "bitvec" @@ -645,16 +648,39 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "borsh" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +dependencies = [ + "borsh-derive 0.10.3", + "hashbrown 0.12.3", +] + [[package]] name = "borsh" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" dependencies = [ - "borsh-derive", + "borsh-derive 1.2.1", "cfg_aliases", ] +[[package]] +name = "borsh-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn 1.0.109", +] + [[package]] name = "borsh-derive" version = "1.2.1" @@ -669,12 +695,34 @@ dependencies = [ "syn_derive", ] +[[package]] +name = "borsh-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "borsh-ext" version = "1.2.0" source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.2.0#a62fee3e847e512cad9ac0f1fd5a900e5db9ba37" dependencies = [ - "borsh", + "borsh 1.2.1", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -1022,7 +1070,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bech32 0.9.1", "bs58", "digest 0.10.7", @@ -1303,12 +1351,11 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1319,7 +1366,7 @@ checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1330,7 +1377,7 @@ checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", "memoffset 0.9.0", "scopeguard", ] @@ -1348,12 +1395,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1485,7 +1529,6 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim", "syn 2.0.52", ] @@ -1632,6 +1675,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "drain_filter_polyfill" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "669a445ee724c5c69b1b06fe0b63e70a1c84bc9bb7d9696cd4f4e3ec45050408" + [[package]] name = "dtoa" version = "0.4.8" @@ -1644,6 +1693,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" + [[package]] name = "dynasm" version = "1.2.3" @@ -1778,7 +1833,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "hex", "k256", @@ -2134,7 +2189,7 @@ checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.5", + "base64 0.21.7", "bytes", "const-hex", "enr", @@ -2143,7 +2198,7 @@ dependencies = [ "futures-timer", "futures-util", "hashers", - "http", + "http 0.2.11", "instant", "jsonwebtoken", "once_cell", @@ -2542,7 +2597,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "libc", "libgit2-sys", "log", @@ -2610,7 +2665,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.11", "indexmap 2.1.0", "slab", "tokio", @@ -2673,10 +2728,10 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "headers-core", - "http", + "http 0.2.11", "httpdate", "mime", "sha1", @@ -2688,7 +2743,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.11", ] [[package]] @@ -2787,6 +2842,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.5" @@ -2794,7 +2860,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", + "http 0.2.11", "pin-project-lite", ] @@ -2821,7 +2887,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", + "http 0.2.11", "http-body", "httparse", "httpdate", @@ -2841,7 +2907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", + "http 0.2.11", "hyper", "rustls", "tokio", @@ -2898,9 +2964,9 @@ dependencies = [ [[package]] name = "ibc" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "429b6aca6624a9364878e28c90311438c2621a8270942d80732b2651ac38ac74" +checksum = "8057203ab04368297a31ecd5d059bec7108c069d636bcfc9ab20e82d89b480b8" dependencies = [ "ibc-apps", "ibc-clients", @@ -2910,11 +2976,43 @@ dependencies = [ "ibc-primitives", ] +[[package]] +name = "ibc-app-nft-transfer" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e931737b69129ae417598fe29eace3e41a9ce32b8649abe3937495973e5843f" +dependencies = [ + "ibc-app-nft-transfer-types", + "ibc-core", + "serde-json-wasm", +] + +[[package]] +name = "ibc-app-nft-transfer-types" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2019d3a6adf6b333c55630f52ca71ad8f61702ca1cf291aaf5ee40b7c6c27ba2" +dependencies = [ + "base64 0.21.7", + "borsh 0.10.3", + "derive_more", + "displaydoc", + "http 1.0.0", + "ibc-core", + "ibc-proto", + "mime", + "parity-scale-codec", + "scale-info", + "schemars", + "serde 1.0.193", + "serde-json-wasm", +] + [[package]] name = "ibc-app-transfer" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b177b343385d9654d99be4709b5ed1574d41f91dfa4044b2d26d688be4179d7c" +checksum = "2595e4cc14828a4141a28b86777040d8bfbabea43838a425137202cff0ee6329" dependencies = [ "ibc-app-transfer-types", "ibc-core", @@ -2923,9 +3021,9 @@ dependencies = [ [[package]] name = "ibc-app-transfer-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f92a3eda225e5c86e7bb6501c95986583ac541c4369d3c528349d81390f947" +checksum = "0106c87ddcc619a6a5eac05da2b77287e3958f89dddf951daf9a2dfc470cb5f4" dependencies = [ "derive_more", "displaydoc", @@ -2938,18 +3036,19 @@ dependencies = [ [[package]] name = "ibc-apps" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4be40d55ed2dea9f2d05b902a3586f20850c723e4bdbfc4fb0ebe7a66ca5e40" +checksum = "b5738d8c842abce233f41d3be825d01e6ee075251b509c6947d05c75477eaeec" dependencies = [ + "ibc-app-nft-transfer", "ibc-app-transfer", ] [[package]] name = "ibc-client-tendermint" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119aa5873214228bf69bded3f20022b9ae1bc35b6841d295afcd73e53db05ccf" +checksum = "81ef4eefb4fd88167335fee4d212b1ff2fa4dd4e4ce87a58bda1798be1d128ac" dependencies = [ "ibc-client-tendermint-types", "ibc-core-client", @@ -2957,7 +3056,6 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", "serde 1.0.193", "tendermint", "tendermint-light-client-verifier", @@ -2965,38 +3063,52 @@ dependencies = [ [[package]] name = "ibc-client-tendermint-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f21679016931b332b295a761e65cc122dc6fbfb98444148b681ad3aaa474665" +checksum = "91a224a98b193810e1ef86316e9a08e677eeff6f98b22b9eb9806bd993d3753a" dependencies = [ - "bytes", "displaydoc", "ibc-core-client-types", "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", "serde 1.0.193", "tendermint", "tendermint-light-client-verifier", "tendermint-proto", ] +[[package]] +name = "ibc-client-wasm-types" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e1ea3be7ae201c99b6589c112a253f2fb3c9ae7322d8937a7303d1fbfe76d27" +dependencies = [ + "base64 0.21.7", + "displaydoc", + "ibc-core-client", + "ibc-core-host-types", + "ibc-primitives", + "ibc-proto", + "serde 1.0.193", +] + [[package]] name = "ibc-clients" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685c660323e93107a136aa3dbc412b7fa2eafd315c2fe71184096a43800f8ca5" +checksum = "84fef481dd1ebe5ef69ee8e095c225cb3e51cd3895096ba2884b3f5b827a6ed6" dependencies = [ "ibc-client-tendermint", + "ibc-client-wasm-types", ] [[package]] name = "ibc-core" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "100d9d0aa67432c5078a8a1c818e3fc990a193be6d35ed0abeda5b340d16c1da" +checksum = "5aedd421bae80115f44b198bec9af45f234e1c8ff81ee9d5e7f60444d526d2b6" dependencies = [ "ibc-core-channel", "ibc-core-client", @@ -3005,14 +3117,15 @@ dependencies = [ "ibc-core-handler", "ibc-core-host", "ibc-core-router", + "ibc-derive", "ibc-primitives", ] [[package]] name = "ibc-core-channel" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebaa37629ac029f914dfe552ab5dad01ddb240ec885ed0ae68221cbea4e9bfc" +checksum = "535048a8fe64101263e35a6a4503474811e379a115db72ee449df882b0f11b45" dependencies = [ "ibc-core-channel-types", "ibc-core-client", @@ -3022,15 +3135,15 @@ dependencies = [ "ibc-core-host", "ibc-core-router", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-channel-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2ba72c56c411b1e0ce6dc3f5e1fa1de9e6c84891f425b7be8a9e1705964378" +checksum = "8d97396ccb1840f4ea6021bbf049a4a7e30a8f5b126f00023ec44b2a48d4dabc" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-client-types", @@ -3039,7 +3152,9 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", "sha2 0.10.8", "subtle-encoding", @@ -3048,9 +3163,9 @@ dependencies = [ [[package]] name = "ibc-core-client" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c4fac8e05201795073dee8c93d5afe9dfeac9aec2412b4a2b0c5f0d1e1d725" +checksum = "15bcf0c59eaa935fa410497a56862f28c4df68317ea556724f0d0764b6c0307e" dependencies = [ "ibc-core-client-context", "ibc-core-client-types", @@ -3058,14 +3173,13 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-client-context" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b82abd9489021730d59ab2d00e9438d3711e8e78ecba4d083b64f833301682b" +checksum = "2d37d88be3dc7fd82d45418c257d826244a6b29b7902c76cf9e68fd61f1e9173" dependencies = [ "derive_more", "displaydoc", @@ -3073,26 +3187,27 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-handler-types", "ibc-core-host-types", - "ibc-derive", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", "tendermint", ] [[package]] name = "ibc-core-client-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafdbf6db5dab4c8ad610b6940e23b4f8abd0a6ac5e8e2801415a95defd4a583" +checksum = "cb717b1296e6cda0990346ba5203fadd043d5159d7d7173b3765f72f263c29db" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", "subtle-encoding", "tendermint", @@ -3100,40 +3215,43 @@ dependencies = [ [[package]] name = "ibc-core-commitment-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed4256b0216fc49024bac7e01c61b9bb055e31914ffe9ce6f468d7ce496a9357" +checksum = "a10ff34bf57bf4bc668b55208dbfdf312d7907adc6a0e39da2377883f12efada" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-primitives", "ibc-proto", "ics23", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", "subtle-encoding", ] [[package]] name = "ibc-core-connection" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e237b70b9ba0177a4e59ac9048fffac2ac44c334703cc0ae403ad221450850" +checksum = "de7f4f1e78e9ed5a63b09b1405f42713f3d076ba5e7889ec31a520cad4970344" dependencies = [ "ibc-core-client", "ibc-core-connection-types", "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-connection-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca841416fa29626423917099092f3698ae2735074cb3fe42936ddf6b2ccbf2f7" +checksum = "230d7f547e121147d136c563ae71707a9e3477a9bc1bc6c1dc29051e1408a381" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-client-types", @@ -3141,7 +3259,9 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", "subtle-encoding", "tendermint", @@ -3149,9 +3269,9 @@ dependencies = [ [[package]] name = "ibc-core-handler" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47e5e5a006aa0fc87ec3f5fb1e0ef6dd5aeea5079fa927d799d526c44329987" +checksum = "c60a2d072d8f7d8d64503bbf3fb69ffcd973b92667af053617a36682fadddea5" dependencies = [ "ibc-core-channel", "ibc-core-client", @@ -3165,10 +3285,11 @@ dependencies = [ [[package]] name = "ibc-core-handler-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3d59a8a5eb2069530c42783b4fef63472a89e0e9242334351df1bb58aaf542" +checksum = "7fae38340bffa42a74563a12703c994515cca4bab755a0c83089c18c3c1e481a" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-channel-types", @@ -3179,7 +3300,9 @@ dependencies = [ "ibc-core-router-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", "subtle-encoding", "tendermint", @@ -3187,9 +3310,9 @@ dependencies = [ [[package]] name = "ibc-core-host" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aa63c895c0e5a75e42fe859b8fd4250c12bfa8b9c6b114f94c927ecfad38a03" +checksum = "abaa0e2143855d12c19e814dab72a5e28daf5e31780afb1302e983614b248668" dependencies = [ "derive_more", "displaydoc", @@ -3201,15 +3324,14 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host-types", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", ] [[package]] name = "ibc-core-host-cosmos" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a325862af6c20b0df3d27c072a2d802a7232dc1666214d738cdfbd9a9c99720" +checksum = "7e3c792be21a340e42344e5bede1695c2d21d62abcc21bbfc7662b5950ffe8d4" dependencies = [ "derive_more", "displaydoc", @@ -3223,7 +3345,6 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", "serde 1.0.193", "sha2 0.10.8", "subtle-encoding", @@ -3232,21 +3353,25 @@ dependencies = [ [[package]] name = "ibc-core-host-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616955da310febbe93c0569a2feebd9f57cafed3eee5a56b0c3bb953a75f6089" +checksum = "1c25ce3082e036836d60aea3cc24f46dfb248d7718516a9a48e1feb466ce10c1" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-primitives", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", ] [[package]] name = "ibc-core-router" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31fe115da73e0616bdb44400fa6b11ca251648d070c4ff419d58e27804d30243" +checksum = "6c261fb7e9a7de7daafb6a38cb9abdce6e912230e30246eb2ef1bb5db32ba10f" dependencies = [ "derive_more", "displaydoc", @@ -3254,23 +3379,24 @@ dependencies = [ "ibc-core-host-types", "ibc-core-router-types", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", ] [[package]] name = "ibc-core-router-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d1fbb0bbbdeafa7ac989ba1693ed46d22e0e3eb0bdae478544e31157a4fdba6" +checksum = "6f3b37bc4c11fdc60a328488f4be205106666edda20a4080484d599a8b0978d2" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "ics23", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde 1.0.193", "subtle-encoding", "tendermint", @@ -3278,11 +3404,10 @@ dependencies = [ [[package]] name = "ibc-derive" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df07bf5bc1e65e291506b7497633e07967e49b36a8db10cda77a8fd686eb4548" +checksum = "3de1e69ff9d7d6094b720a36bb26fc8078b5e1b0e216e2d0a92f602e6dc8016e" dependencies = [ - "darling", "proc-macro2", "quote", "syn 2.0.52", @@ -3290,14 +3415,18 @@ dependencies = [ [[package]] name = "ibc-primitives" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5edea4685267fd68514c87e7aa3a62712340c4cff6903f088a9ab571428a08a" +checksum = "af5524046e645bdfbd96ef932c8ceab6bb2391dc31dee626e274d13e7ac25ec2" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-proto", + "parity-scale-codec", "prost 0.12.3", + "scale-info", + "schemars", "serde 1.0.193", "tendermint", "time", @@ -3305,15 +3434,20 @@ dependencies = [ [[package]] name = "ibc-proto" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93cbf4cbe9e5113cc7c70f3208a7029b2205c629502cbb2ae7ea0a09a97d3005" +checksum = "dd4ee32b22d3b06f31529b956f4928e5c9a068d71e46cf6abfa19c31ca550553" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", + "borsh 0.10.3", "bytes", "flex-error", "ics23", + "informalsystems-pbjson 0.7.0", + "parity-scale-codec", "prost 0.12.3", + "scale-info", + "schemars", "serde 1.0.193", "subtle-encoding", "tendermint-proto", @@ -3321,18 +3455,15 @@ dependencies = [ [[package]] name = "ibc-testkit" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f550c91648f3db6474880e18cd2bd294096a99b30621aa01a9059b71e3612d98" +checksum = "3443c6ccc7551266dce6e842aa10c472bf73d7cc0c3140aafc55c942e85f530a" dependencies = [ - "bytes", "derive_more", "displaydoc", "ibc", "ibc-proto", "parking_lot", - "primitive-types", - "prost 0.12.3", "subtle-encoding", "tendermint", "tendermint-testgen", @@ -3349,7 +3480,7 @@ dependencies = [ "anyhow", "bytes", "hex", - "informalsystems-pbjson", + "informalsystems-pbjson 0.6.0", "prost 0.12.3", "ripemd", "serde 1.0.193", @@ -3442,7 +3573,7 @@ name = "index-set" version = "0.8.0" source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.1#b0d928f83cf0d465ccda299d131e8df2859b5184" dependencies = [ - "borsh", + "borsh 1.2.1", "serde 1.0.193", ] @@ -3467,6 +3598,17 @@ dependencies = [ "hashbrown 0.14.3", ] +[[package]] +name = "indexmap" +version = "2.2.4" +source = "git+https://github.com/heliaxdev/indexmap?tag=2.2.4-heliax-1#b5b5b547bd6ab04bbb16e060326a50ddaeb6c909" +dependencies = [ + "borsh 1.2.1", + "equivalent", + "hashbrown 0.14.3", + "serde 1.0.193", +] + [[package]] name = "informalsystems-pbjson" version = "0.6.0" @@ -3477,6 +3619,16 @@ dependencies = [ "serde 1.0.193", ] +[[package]] +name = "informalsystems-pbjson" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" +dependencies = [ + "base64 0.21.7", + "serde 1.0.193", +] + [[package]] name = "inout" version = "0.1.3" @@ -3569,7 +3721,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "pem", "ring 0.16.20", "serde 1.0.193", @@ -3752,7 +3904,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "libc", "redox_syscall", ] @@ -3898,9 +4050,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ - "borsh", + "borsh 1.2.1", "chacha20", "chacha20poly1305", "cipher", @@ -3911,7 +4063,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ "aes", "bip0039", @@ -3919,7 +4071,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh", + "borsh 1.2.1", "byteorder", "ff", "fpe", @@ -3943,7 +4095,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ "bellman", "blake2b_simd", @@ -4106,7 +4258,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http", + "http 0.2.11", "httparse", "log", "memchr", @@ -4129,7 +4281,7 @@ dependencies = [ "async-trait", "base58 0.2.0", "bimap", - "borsh", + "borsh 1.2.1", "borsh-ext", "byte-unit", "circular-queue", @@ -4213,7 +4365,7 @@ dependencies = [ name = "namada_account" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "linkme", "namada_core", "namada_macros", @@ -4236,7 +4388,7 @@ dependencies = [ "bimap", "bit-set", "blake2b-rs", - "borsh", + "borsh 1.2.1", "borsh-ext", "byte-unit", "byteorder", @@ -4247,6 +4399,7 @@ dependencies = [ "data-encoding", "derivative", "directories", + "drain_filter_polyfill", "ed25519-consensus 1.2.1", "ethabi", "ethbridge-bridge-events", @@ -4321,16 +4474,29 @@ dependencies = [ name = "namada_benchmarks" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "borsh-ext", "criterion", + "lazy_static", "masp_primitives", "namada", "namada_apps", + "prost 0.12.3", "rand 0.8.5", "rand_core 0.6.4", "sha2 0.9.9", "tempfile", + "wasm-instrument", + "wasmer", + "wasmer-compiler-singlepass", + "wasmer-engine-universal", +] + +[[package]] +name = "namada_controller" +version = "0.32.1" +dependencies = [ + "namada_core", ] [[package]] @@ -4338,7 +4504,7 @@ name = "namada_core" version = "0.32.1" dependencies = [ "bech32 0.8.1", - "borsh", + "borsh 1.2.1", "borsh-ext", "chrono", "data-encoding", @@ -4350,6 +4516,7 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", + "indexmap 2.2.4", "k256", "linkme", "masp_primitives", @@ -4387,7 +4554,7 @@ dependencies = [ name = "namada_encoding_spec" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "itertools 0.10.5", "lazy_static", "madato", @@ -4399,7 +4566,7 @@ name = "namada_ethereum_bridge" version = "0.32.1" dependencies = [ "assert_matches", - "borsh", + "borsh 1.2.1", "data-encoding", "ethabi", "ethers", @@ -4432,7 +4599,7 @@ dependencies = [ name = "namada_examples" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "data-encoding", "masp_proofs", "namada_apps", @@ -4453,7 +4620,7 @@ name = "namada_gas" version = "0.32.1" dependencies = [ "assert_matches", - "borsh", + "borsh 1.2.1", "linkme", "namada_core", "namada_macros", @@ -4467,7 +4634,7 @@ dependencies = [ name = "namada_governance" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "itertools 0.10.5", "linkme", "namada_core", @@ -4487,7 +4654,7 @@ dependencies = [ name = "namada_ibc" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "ibc", "ibc-derive", "ibc-testkit", @@ -4503,6 +4670,7 @@ dependencies = [ "primitive-types", "proptest", "prost 0.12.3", + "serde_json", "sha2 0.9.9", "thiserror", "tracing", @@ -4512,7 +4680,7 @@ dependencies = [ name = "namada_light_sdk" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "borsh-ext", "namada_sdk", "prost 0.12.3", @@ -4541,7 +4709,7 @@ name = "namada_merkle_tree" version = "0.32.1" dependencies = [ "assert_matches", - "borsh", + "borsh 1.2.1", "eyre", "ics23", "itertools 0.10.5", @@ -4559,7 +4727,7 @@ dependencies = [ name = "namada_migrations" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "data-encoding", "lazy_static", "linkme", @@ -4571,7 +4739,7 @@ dependencies = [ name = "namada_parameters" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "namada_macros", "namada_storage", @@ -4583,12 +4751,13 @@ name = "namada_proof_of_stake" version = "0.32.1" dependencies = [ "assert_matches", - "borsh", + "borsh 1.2.1", "data-encoding", "derivative", "itertools 0.10.5", "linkme", "namada_account", + "namada_controller", "namada_core", "namada_governance", "namada_macros", @@ -4626,7 +4795,7 @@ dependencies = [ "base58 0.2.0", "bimap", "bls12_381", - "borsh", + "borsh 1.2.1", "borsh-ext", "circular-queue", "data-encoding", @@ -4645,6 +4814,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_gas", "namada_governance", "namada_ibc", "namada_macros", @@ -4690,8 +4860,9 @@ dependencies = [ name = "namada_shielded_token" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "masp_primitives", + "namada_controller", "namada_core", "namada_parameters", "namada_storage", @@ -4708,7 +4879,7 @@ name = "namada_state" version = "0.32.1" dependencies = [ "assert_matches", - "borsh", + "borsh 1.2.1", "chrono", "ics23", "itertools 0.10.5", @@ -4737,7 +4908,7 @@ dependencies = [ name = "namada_storage" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "itertools 0.10.5", "linkme", "namada_core", @@ -4757,7 +4928,7 @@ dependencies = [ name = "namada_test_utils" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "strum 0.24.1", ] @@ -4767,8 +4938,9 @@ name = "namada_tests" version = "0.32.1" dependencies = [ "assert_cmd", + "assert_matches", "async-trait", - "borsh", + "borsh 1.2.1", "borsh-ext", "chrono", "clap", @@ -4839,7 +5011,8 @@ version = "0.32.1" dependencies = [ "ark-bls12-381", "assert_matches", - "borsh", + "bitflags 2.5.0", + "borsh 1.2.1", "data-encoding", "linkme", "masp_primitives", @@ -4872,7 +5045,7 @@ dependencies = [ name = "namada_tx_prelude" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "masp_primitives", "namada_account", "namada_core", @@ -4894,7 +5067,7 @@ dependencies = [ name = "namada_vm_env" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "masp_primitives", "namada_core", ] @@ -4903,7 +5076,7 @@ dependencies = [ name = "namada_vote_ext" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "data-encoding", "linkme", "namada_core", @@ -4920,6 +5093,7 @@ dependencies = [ "derivative", "masp_primitives", "namada_core", + "namada_ibc", "namada_storage", "namada_tx", "thiserror", @@ -4929,7 +5103,7 @@ dependencies = [ name = "namada_vp_prelude" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_account", "namada_core", "namada_governance", @@ -5294,7 +5468,7 @@ version = "0.10.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -5748,6 +5922,15 @@ dependencies = [ "uint", ] +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml 0.5.11", +] + [[package]] name = "proc-macro-crate" version = "1.3.1" @@ -5809,7 +5992,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.5.0", "lazy_static", "num-traits 0.2.17", "rand 0.8.5", @@ -6065,7 +6248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ "crossbeam-deque", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] @@ -6201,13 +6384,13 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", "h2", - "http", + "http 0.2.11", "http-body", "hyper", "hyper-rustls", @@ -6300,7 +6483,7 @@ dependencies = [ "rkyv_derive", "seahash", "tinyvec", - "uuid 1.6.1", + "uuid 1.8.0", ] [[package]] @@ -6412,7 +6595,7 @@ version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -6449,7 +6632,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] @@ -6537,6 +6720,30 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde 1.0.193", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -6720,6 +6927,17 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_json" version = "1.0.108" @@ -6982,7 +7200,7 @@ version = "0.3.1-pre" source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=515687fe7884cb365067ac86c66ac3613de176bb#515687fe7884cb365067ac86c66ac3613de176bb" dependencies = [ "blake2b-rs", - "borsh", + "borsh 1.2.1", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -7712,7 +7930,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", + "http 0.2.11", "http-body", "hyper", "hyper-timeout", @@ -7812,7 +8030,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ - "crossbeam-channel 0.5.8", + "crossbeam-channel 0.5.12", "thiserror", "time", "tracing-subscriber", @@ -7926,7 +8144,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.11", "httparse", "log", "rand 0.8.5", @@ -8091,9 +8309,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" [[package]] name = "valuable" @@ -8151,7 +8369,7 @@ dependencies = [ "futures-channel", "futures-util", "headers", - "http", + "http 0.2.11", "hyper", "log", "mime", diff --git a/Cargo.toml b/Cargo.toml index 322238fb7e..25c8c2f1c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "crates/account", "crates/apps", "crates/benches", + "crates/controller", "crates/core", "crates/encoding_spec", "crates/ethereum_bridge", @@ -69,6 +70,7 @@ base64 = "0.13.0" bech32 = "0.8.0" bimap = {version = "0.6.2", features = ["serde"]} bit-set = "0.5.2" +bitflags = { version = "2.5.0", features = ["serde"] } blake2b-rs = "0.2.0" bls12_381 = "0.8" byte-unit = "4.0.13" @@ -86,6 +88,7 @@ data-encoding = "2.3.2" derivation-path = "0.2.0" derivative = "2.2.0" directories = "4.0.1" +drain_filter_polyfill = "0.1.3" ed25519-consensus = "1.2.0" escargot = "0.5.7" ethabi = "18.0.0" @@ -101,11 +104,12 @@ flate2 = "1.0.22" fs_extra = "1.2.0" futures = "0.3" git2 = { version = "0.18.1", default-features = false } -ibc = {version = "0.48.1", default-features = false, features = ["serde"]} -ibc-derive = "0.4.0" -ibc-testkit = {version = "0.48.1", default-features = false} +ibc = {version = "0.50.0", features = ["serde"]} +ibc-derive = "0.6.0" +ibc-testkit = {version = "0.50.0", default-features = false} ics23 = "0.11.0" index-set = { git = "https://github.com/heliaxdev/index-set", tag = "v0.8.1", features = ["serialize-borsh", "serialize-serde"] } +indexmap = { git = "https://github.com/heliaxdev/indexmap", tag = "2.2.4-heliax-1", features = ["borsh-schema", "serde"] } itertools = "0.10.0" jubjub = "0.10" k256 = { version = "0.13.0", default-features = false, features = ["ecdsa", "pkcs8", "precomputed-tables", "serde", "std"]} @@ -116,8 +120,8 @@ libc = "0.2.97" libloading = "0.7.2" linkme = "0.3.24" # branch = "murisi/namada-integration" -masp_primitives = { git = "https://github.com/anoma/masp", rev = "30492323d98b0531fd18b6285cd94afcaa4066d2" } -masp_proofs = { git = "https://github.com/anoma/masp", rev = "30492323d98b0531fd18b6285cd94afcaa4066d2", default-features = false, features = ["local-prover"] } +masp_primitives = { git = "https://github.com/anoma/masp", rev = "6cbc8bd90a71cc280492c44bc3415162093daa76" } +masp_proofs = { git = "https://github.com/anoma/masp", rev = "6cbc8bd90a71cc280492c44bc3415162093daa76", default-features = false, features = ["local-prover"] } num256 = "0.3.5" num_cpus = "1.13.0" num-derive = "0.3.3" @@ -178,6 +182,10 @@ tracing-appender = "0.2.2" tracing-log = "0.1.2" tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} wasmparser = "0.107.0" +wasm-instrument = {version = "0.4.0", features = ["sign_ext"]} +wasmer = {git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b"} +wasmer-compiler-singlepass = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b" } +wasmer-engine-universal = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b" } winapi = "0.3.9" yansi = "0.5.1" zeroize = { version = "1.5.5", features = ["zeroize_derive"] } diff --git a/Makefile b/Makefile index 401875bb65..954d5afb82 100644 --- a/Makefile +++ b/Makefile @@ -17,10 +17,8 @@ debug-cargo := $(env) $(debug-env) cargo nightly := $(shell cat rust-nightly-version) # Path to the wasm source for the provided txs and VPs -wasms := wasm/wasm_source -wasms_for_tests := wasm_for_tests/wasm_source -# Paths for all the wasm templates -wasm_templates := wasm/tx_template wasm/vp_template +wasms := wasm +wasms_for_tests := wasm_for_tests ifdef JOBS jobs := -j $(JOBS) @@ -93,8 +91,7 @@ check-wasm = $(cargo) check --target wasm32-unknown-unknown --manifest-path $(wa check: $(cargo) check --workspace && \ make -C $(wasms) check && \ - make -C $(wasms_for_tests) check && \ - $(foreach wasm,$(wasm_templates),$(check-wasm) && ) true + make -C $(wasms_for_tests) check check-mainnet: $(cargo) check --workspace --features "mainnet" @@ -102,18 +99,21 @@ check-mainnet: # Check that every crate can be built with default features and that namada crate # can be built for wasm check-crates: + rustup target add --toolchain $(nightly) wasm32-unknown-unknown $(foreach p,$(crates), echo "Checking $(p)" && cargo +$(nightly) check -Z unstable-options --tests -p $(p) && ) \ + make -C $(wasms) check && \ make -C $(wasms_for_tests) check && \ cargo check --package namada --target wasm32-unknown-unknown --no-default-features --features "namada-sdk" && \ cargo check --package namada_sdk --all-features clippy-wasm = $(cargo) +$(nightly) clippy --manifest-path $(wasm)/Cargo.toml --all-targets -- -D warnings +# Need a separate command for benchmarks to prevent the "testing" feature flag from being activated clippy: - $(cargo) +$(nightly) clippy $(jobs) --all-targets -- -D warnings && \ + $(cargo) +$(nightly) clippy $(jobs) --all-targets --workspace --exclude namada_benchmarks -- -D warnings && \ + $(cargo) +$(nightly) clippy $(jobs) --all-targets --package namada_benchmarks -- -D warnings && \ make -C $(wasms) clippy && \ - make -C $(wasms_for_tests) clippy && \ - $(foreach wasm,$(wasm_templates),$(clippy-wasm) && ) true + make -C $(wasms_for_tests) clippy clippy-mainnet: $(cargo) +$(nightly) clippy --all-targets --features "mainnet" -- -D warnings @@ -190,6 +190,14 @@ test-unit: -- --skip e2e --skip integration --skip pos_state_machine_test \ -Z unstable-options --report-time +test-unit-with-eth-bridge: + $(cargo) +$(nightly) test \ + --features namada-eth-bridge \ + $(TEST_FILTER) \ + $(jobs) \ + -- --skip e2e --skip integration --skip pos_state_machine_test \ + -Z unstable-options --report-time + test-unit-with-coverage: $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ --features namada/testing \ @@ -245,15 +253,11 @@ test-pos-sm: fmt-wasm = $(cargo) +$(nightly) fmt --manifest-path $(wasm)/Cargo.toml fmt: - $(cargo) +$(nightly) fmt --all && \ - make -C $(wasms) fmt && \ - $(foreach wasm,$(wasm_templates),$(fmt-wasm) && ) true + $(cargo) +$(nightly) fmt --all && make -C $(wasms) fmt fmt-check-wasm = $(cargo) +$(nightly) fmt --manifest-path $(wasm)/Cargo.toml -- --check fmt-check: - $(cargo) +$(nightly) fmt --all -- --check && \ - make -C $(wasms) fmt-check && \ - $(foreach wasm,$(wasm_templates),$(fmt-check-wasm) && ) true + $(cargo) +$(nightly) fmt --all -- --check && make -C $(wasms) fmt-check watch: $(cargo) watch @@ -262,7 +266,7 @@ clean: $(cargo) clean bench: - $(cargo) bench --package namada_benchmarks + $(cargo) bench --package namada_benchmarks build-doc: $(cargo) doc --no-deps @@ -282,7 +286,7 @@ debug-wasm-scripts-docker: build-wasm-image-docker # Build the validity predicate and transactions wasm build-wasm-scripts: - rm wasm/*.wasm || true + rm $(wasms)/*.wasm || true make -C $(wasms) make opt-wasm make checksum-wasm @@ -294,6 +298,18 @@ debug-wasm-scripts: make opt-wasm make checksum-wasm +# Build the validity predicate and transactions wasm for tests +build-wasm-tests-scripts: + rm $(wasms_for_tests)/*.wasm || true + make -C $(wasms_for_tests) + make opt-wasm-tests + +# Debug build the validity predicate and transactions wasm for tests +debug-wasm-tests-scripts: + rm $(wasms_for_tests)/*.wasm || true + make -C $(wasms_for_tests) debug + make opt-wasm-tests + # need python checksum-wasm: python3 wasm/checksums.py @@ -302,6 +318,9 @@ checksum-wasm: opt-wasm: @for file in $(shell ls wasm/*.wasm); do wasm-opt -Oz -o $${file} $${file}; done +opt-wasm-tests: + @for file in $(shell ls wasm_for_tests/*.wasm); do wasm-opt -Oz -o $${file} $${file}; done + clean-wasm-scripts: make -C $(wasms) clean diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..24f2f59ba3 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,11 @@ +disallowed-types = [ + { path = "std::collections::HashMap", reason = "Non-deterministic iter - use indexmap::IndexMap instead" }, + { path = "std::collections::HashSet", reason = "Non-deterministic iter - use indexmap::IndexSet instead" }, +] +disallowed-methods = [ + { path = "std::time::Instant::now", reason = "Do not use current date/time in code that must be deterministic" }, + { path = "chrono::DateTime::now", reason = "Do not use current date/time in code that must be deterministic" }, + { path = "chrono::Utc::now", reason = "Do not use current date/time in code that must be deterministic" }, + { path = "namada_core::time::DateTimeUtc::now", reason = "Do not use current date/time in code that must be deterministic" }, + { path = "wasmtimer::std::Instant", reason = "Do not use current date/time in code that must be deterministic" }, +] diff --git a/crates/account/src/lib.rs b/crates/account/src/lib.rs index b1eda546ab..5b25e1d06e 100644 --- a/crates/account/src/lib.rs +++ b/crates/account/src/lib.rs @@ -53,4 +53,9 @@ impl Account { ) -> Option { self.public_keys_map.get_index_from_public_key(public_key) } + + /// Get all public keys of the account + pub fn get_all_public_keys(&self) -> Vec { + self.public_keys_map.pk_to_idx.keys().cloned().collect() + } } diff --git a/crates/apps/Cargo.toml b/crates/apps/Cargo.toml index b470bbbcaf..f3ce2af705 100644 --- a/crates/apps/Cargo.toml +++ b/crates/apps/Cargo.toml @@ -56,7 +56,7 @@ mainnet = [ ] # for integration tests and test utilities testing = ["namada_test_utils"] -benches = ["testing", "namada_test_utils"] +benches = ["namada_test_utils"] integration = [] jemalloc = ["rocksdb/jemalloc"] migrations = [ @@ -65,6 +65,10 @@ migrations = [ "namada/migrations", "linkme", ] +namada-eth-bridge = [ + "namada/namada-eth-bridge", + "namada_sdk/namada-eth-bridge", +] [dependencies] namada = {path = "../namada", features = ["multicore", "http-client", "tendermint-rpc", "std"]} @@ -93,6 +97,7 @@ config.workspace = true data-encoding.workspace = true derivative.workspace = true directories.workspace = true +drain_filter_polyfill.workspace = true ed25519-consensus = { workspace = true, features = ["std"] } ethabi.workspace = true ethbridge-bridge-events.workspace = true diff --git a/crates/apps/src/bin/namada-node/cli.rs b/crates/apps/src/bin/namada-node/cli.rs index 5e63a6bae2..8be47b4cfc 100644 --- a/crates/apps/src/bin/namada-node/cli.rs +++ b/crates/apps/src/bin/namada-node/cli.rs @@ -137,9 +137,9 @@ pub fn main() -> Result<()> { fn sleep_until(time: Option) { // Sleep until start time if needed if let Some(time) = time { - if let Ok(sleep_time) = - time.0.signed_duration_since(Utc::now()).to_std() - { + #[allow(clippy::disallowed_methods)] + let now = Utc::now(); + if let Ok(sleep_time) = time.0.signed_duration_since(now).to_std() { if !sleep_time.is_zero() { tracing::info!( "Waiting ledger start time: {:?}, time left: {:?}", diff --git a/crates/apps/src/lib/bench_utils.rs b/crates/apps/src/lib/bench_utils.rs index 766f73e20b..8dbb038312 100644 --- a/crates/apps/src/lib/bench_utils.rs +++ b/crates/apps/src/lib/bench_utils.rs @@ -27,7 +27,7 @@ use namada::core::time::DateTimeUtc; use namada::core::token::{Amount, DenominatedAmount, Transfer}; use namada::governance::storage::proposal::ProposalType; use namada::governance::InitProposalData; -use namada::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; +use namada::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; use namada::ibc::apps::transfer::types::packet::PacketData; use namada::ibc::apps::transfer::types::PrefixedCoin; use namada::ibc::clients::tendermint::client_state::ClientState; @@ -52,15 +52,16 @@ use namada::ibc::core::connection::types::{ }; use namada::ibc::core::host::types::identifiers::{ ChainId as IbcChainId, ChannelId as NamadaChannelId, ChannelId, ClientId, - ClientType, ConnectionId, ConnectionId as NamadaConnectionId, - PortId as NamadaPortId, PortId, + ConnectionId, ConnectionId as NamadaConnectionId, PortId as NamadaPortId, + PortId, }; use namada::ibc::core::host::types::path::{ ClientConsensusStatePath, ClientStatePath, Path as IbcPath, }; use namada::ibc::primitives::proto::{Any, Protobuf}; -use namada::ibc::primitives::{Msg, Timestamp as IbcTimestamp}; -use namada::ibc::storage::port_key; +use namada::ibc::primitives::Timestamp as IbcTimestamp; +use namada::ibc::storage::{mint_limit_key, port_key, throughput_limit_key}; +use namada::ibc::MsgTransfer; use namada::io::StdIo; use namada::ledger::dry_run_tx; use namada::ledger::gas::TxGasMeter; @@ -71,8 +72,8 @@ use namada::ledger::queries::{ }; use namada::state::StorageRead; use namada::tx::data::pos::Bond; -use namada::tx::data::{TxResult, VpsResult}; -use namada::tx::{Code, Data, Section, Signature, Tx}; +use namada::tx::data::{Fee, TxResult, VpsResult}; +use namada::tx::{Authorization, Code, Data, Section, Tx}; use namada::vm::wasm::run; use namada::{proof_of_stake, tendermint}; use namada_sdk::masp::{ @@ -249,13 +250,12 @@ impl Default for BenchShell { let signed_tx = bench_shell.generate_tx( TX_INIT_PROPOSAL_WASM, InitProposalData { - id: 0, content: content_section.get_hash(), author: defaults::albert_address(), - r#type: ProposalType::Default(None), + r#type: ProposalType::Default, voting_start_epoch, voting_end_epoch: voting_start_epoch + 3_u64, - grace_epoch: voting_start_epoch + 9_u64, + activation_epoch: voting_start_epoch + 9_u64, }, None, Some(vec![content_section]), @@ -289,9 +289,7 @@ impl BenchShell { extra_sections: Option>, signers: Vec<&SecretKey>, ) -> Tx { - let mut tx = Tx::from_type(namada::tx::data::TxType::Decrypted( - namada::tx::data::DecryptedTx::Decrypted, - )); + let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); // NOTE: here we use the code hash to avoid including the cost for the // wasm validation. The wasm codes (both txs and vps) are always @@ -319,7 +317,7 @@ impl BenchShell { } for signer in signers { - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], [(0, signer.clone())].into_iter().collect(), None, @@ -329,11 +327,9 @@ impl BenchShell { tx } - pub fn generate_ibc_tx(&self, wasm_code_path: &str, msg: impl Msg) -> Tx { + pub fn generate_ibc_tx(&self, wasm_code_path: &str, data: Vec) -> Tx { // This function avoid serializaing the tx data with Borsh - let mut tx = Tx::from_type(namada::tx::data::TxType::Decrypted( - namada::tx::data::DecryptedTx::Decrypted, - )); + let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); let code_hash = self .read_storage_key(&Key::wasm_hash(wasm_code_path)) .unwrap(); @@ -342,10 +338,7 @@ impl BenchShell { Some(wasm_code_path.to_string()), )); - let mut data = vec![]; - prost::Message::encode(&msg.to_any(), &mut data).unwrap(); tx.set_data(Data::new(data)); - // NOTE: the Ibc VP doesn't actually check the signature tx } @@ -365,13 +358,14 @@ impl BenchShell { let timeout_height = TimeoutHeight::At(IbcHeight::new(0, 100).unwrap()); + #[allow(clippy::disallowed_methods)] let now: namada::tendermint::Time = DateTimeUtc::now().try_into().unwrap(); let now: IbcTimestamp = now.into(); let timeout_timestamp = (now + std::time::Duration::new(3600, 0)).unwrap(); - let msg = MsgTransfer { + let message = IbcMsgTransfer { port_id_on_a: PortId::transfer(), chan_id_on_a: ChannelId::new(5), packet_data: PacketData { @@ -384,10 +378,16 @@ impl BenchShell { timeout_timestamp_on_b: timeout_timestamp, }; - self.generate_ibc_tx(TX_IBC_WASM, msg) + let msg = MsgTransfer { + message, + transfer: None, + }; + + self.generate_ibc_tx(TX_IBC_WASM, msg.serialize_to_vec()) } - pub fn execute_tx(&mut self, tx: &Tx) { + /// Execute the tx and retur a set of verifiers inserted by the tx. + pub fn execute_tx(&mut self, tx: &Tx) -> BTreeSet
{ let gas_meter = RefCell::new(TxGasMeter::new_from_sub_limit(u64::MAX.into())); run::tx( @@ -398,7 +398,7 @@ impl BenchShell { &mut self.inner.vp_wasm_cache, &mut self.inner.tx_wasm_cache, ) - .unwrap(); + .unwrap() } pub fn advance_epoch(&mut self) { @@ -429,9 +429,7 @@ impl BenchShell { .set_header(get_dummy_header()) .unwrap(); // Set client state - let client_id = - ClientId::new(ClientType::new("01-tendermint").unwrap(), 1) - .unwrap(); + let client_id = ClientId::new("07-tendermint", 1).unwrap(); let client_state_key = addr_key.join(&Key::from( IbcPath::ClientState(ClientStatePath(client_id.clone())) .to_string() @@ -459,6 +457,7 @@ impl BenchShell { .expect("write failed"); // Set consensus state + #[allow(clippy::disallowed_methods)] let now: namada::tendermint::Time = DateTimeUtc::now().try_into().unwrap(); let consensus_key = addr_key.join(&Key::from( @@ -549,6 +548,21 @@ impl BenchShell { .unwrap(); } + pub fn enable_ibc_transfer(&mut self) { + let token = address::testing::nam(); + let mint_limit_key = mint_limit_key(&token); + self.state + .db_write(&mint_limit_key, Amount::max_signed().serialize_to_vec()) + .unwrap(); + let throughput_limit_key = throughput_limit_key(&token); + self.state + .db_write( + &throughput_limit_key, + Amount::max_signed().serialize_to_vec(), + ) + .unwrap(); + } + // Update the block height in state to guarantee a valid response to the // client queries pub fn commit_block(&mut self) { @@ -560,11 +574,27 @@ impl BenchShell { .unwrap(); self.inner.commit(); + self.inner + .state + .in_mem_mut() + .set_header(get_dummy_header()) + .unwrap(); } // Commit a masp transaction and cache the tx and the changed keys for // client queries - pub fn commit_masp_tx(&mut self, masp_tx: Tx) { + pub fn commit_masp_tx(&mut self, mut masp_tx: Tx) { + use namada::core::key::RefTo; + masp_tx.add_wrapper( + Fee { + amount_per_gas_unit: DenominatedAmount::native(0.into()), + token: self.state.in_mem().native_token.clone(), + }, + defaults::albert_keypair().ref_to(), + self.state.in_mem().last_epoch, + 0.into(), + None, + ); self.last_block_masp_txs .push((masp_tx, self.state.write_log().get_keys())); self.state.commit_tx(); @@ -575,9 +605,7 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { let wasm_code = std::fs::read("../../wasm_for_tests/tx_write.wasm").unwrap(); - let mut tx = Tx::from_type(namada::tx::data::TxType::Decrypted( - namada::tx::data::DecryptedTx::Decrypted, - )); + let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); tx.set_code(Code::new(wasm_code, None)); tx.set_data(Data::new( TxWriteData { @@ -586,7 +614,7 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> Tx { } .serialize_to_vec(), )); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], [(0, signer.clone())].into_iter().collect(), None, @@ -857,6 +885,7 @@ impl Client for BenchShell { .map(|(idx, (_tx, changed_keys))| { let tx_result = TxResult { gas_used: 0.into(), + wrapper_changed_keys: Default::default(), changed_keys: changed_keys.to_owned(), vps_result: VpsResult::default(), initialized_accounts: vec![], @@ -1071,4 +1100,68 @@ impl BenchShieldedCtx { }; (ctx, tx) } + + pub fn generate_shielded_action( + self, + amount: Amount, + source: TransferSource, + target: TransferTarget, + ) -> (Self, Tx) { + let (ctx, tx) = self.generate_masp_tx( + amount, + source.clone(), + TransferTarget::Address(Address::Internal(InternalAddress::Ibc)), + ); + + let token = PrefixedCoin { + denom: address::testing::nam().to_string().parse().unwrap(), + amount: amount + .to_string_native() + .split('.') + .next() + .unwrap() + .to_string() + .parse() + .unwrap(), + }; + let timeout_height = TimeoutHeight::At(IbcHeight::new(0, 100).unwrap()); + + #[allow(clippy::disallowed_methods)] + let now: namada::tendermint::Time = + DateTimeUtc::now().try_into().unwrap(); + let now: IbcTimestamp = now.into(); + let timeout_timestamp = + (now + std::time::Duration::new(3600, 0)).unwrap(); + let msg = IbcMsgTransfer { + port_id_on_a: PortId::transfer(), + chan_id_on_a: ChannelId::new(5), + packet_data: PacketData { + token, + sender: source.effective_address().to_string().into(), + receiver: target.effective_address().to_string().into(), + memo: "".parse().unwrap(), + }, + timeout_height_on_b: timeout_height, + timeout_timestamp_on_b: timeout_timestamp, + }; + + let transfer = + Transfer::deserialize(&mut tx.data().unwrap().as_slice()).unwrap(); + let masp_tx = tx + .get_section(&transfer.shielded.unwrap()) + .unwrap() + .masp_tx() + .unwrap(); + let msg = MsgTransfer { + message: msg, + transfer: Some(transfer), + }; + + let mut ibc_tx = ctx + .shell + .generate_ibc_tx(TX_IBC_WASM, msg.serialize_to_vec()); + ibc_tx.add_masp_tx_section(masp_tx); + + (ctx, ibc_tx) + } } diff --git a/crates/apps/src/lib/cli.rs b/crates/apps/src/lib/cli.rs index f405644a6b..dc03d33d21 100644 --- a/crates/apps/src/lib/cli.rs +++ b/crates/apps/src/lib/cli.rs @@ -251,6 +251,7 @@ pub mod cmds { .subcommand(QueryMaspRewardTokens::def().display_order(5)) .subcommand(QueryBlock::def().display_order(5)) .subcommand(QueryBalance::def().display_order(5)) + .subcommand(QueryIbcToken::def().display_order(5)) .subcommand(QueryBonds::def().display_order(5)) .subcommand(QueryBondedStake::def().display_order(5)) .subcommand(QuerySlashes::def().display_order(5)) @@ -270,7 +271,6 @@ pub mod cmds { // Actions .subcommand(SignTx::def().display_order(6)) .subcommand(ShieldedSync::def().display_order(6)) - .subcommand(GenIbcShieldedTransfer::def().display_order(6)) // Utils .subcommand(Utils::def().display_order(7)) } @@ -325,6 +325,7 @@ pub mod cmds { Self::parse_with_ctx(matches, QueryMaspRewardTokens); let query_block = Self::parse_with_ctx(matches, QueryBlock); let query_balance = Self::parse_with_ctx(matches, QueryBalance); + let query_ibc_token = Self::parse_with_ctx(matches, QueryIbcToken); let query_bonds = Self::parse_with_ctx(matches, QueryBonds); let query_bonded_stake = Self::parse_with_ctx(matches, QueryBondedStake); @@ -353,8 +354,6 @@ pub mod cmds { Self::parse_with_ctx(matches, AddToEthBridgePool); let sign_tx = Self::parse_with_ctx(matches, SignTx); let shielded_sync = Self::parse_with_ctx(matches, ShieldedSync); - let gen_ibc_shielded = - Self::parse_with_ctx(matches, GenIbcShieldedTransfer); let utils = SubCmd::parse(matches).map(Self::WithoutContext); tx_custom .or(tx_transfer) @@ -388,6 +387,7 @@ pub mod cmds { .or(query_masp_reward_tokens) .or(query_block) .or(query_balance) + .or(query_ibc_token) .or(query_bonds) .or(query_bonded_stake) .or(query_slashes) @@ -407,7 +407,6 @@ pub mod cmds { .or(query_account) .or(sign_tx) .or(shielded_sync) - .or(gen_ibc_shielded) .or(utils) } } @@ -479,6 +478,7 @@ pub mod cmds { QueryMaspRewardTokens(QueryMaspRewardTokens), QueryBlock(QueryBlock), QueryBalance(QueryBalance), + QueryIbcToken(QueryIbcToken), QueryBonds(QueryBonds), QueryBondedStake(QueryBondedStake), QueryCommissionRate(QueryCommissionRate), @@ -496,7 +496,6 @@ pub mod cmds { QueryRewards(QueryRewards), SignTx(SignTx), ShieldedSync(ShieldedSync), - GenIbcShieldedTransfer(GenIbcShieldedTransfer), } #[allow(clippy::large_enum_variant)] @@ -1723,6 +1722,25 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct QueryIbcToken(pub args::QueryIbcToken); + + impl SubCmd for QueryIbcToken { + const CMD: &'static str = "ibc-token"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|matches| { + QueryIbcToken(args::QueryIbcToken::parse(matches)) + }) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Query IBC token(s).") + .add_args::>() + } + } + #[derive(Clone, Debug)] pub struct QueryBonds(pub args::QueryBonds); @@ -2167,29 +2185,6 @@ pub mod cmds { } } - #[derive(Clone, Debug)] - pub struct GenIbcShieldedTransfer( - pub args::GenIbcShieldedTransfer, - ); - - impl SubCmd for GenIbcShieldedTransfer { - const CMD: &'static str = "ibc-gen-shielded"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - GenIbcShieldedTransfer(args::GenIbcShieldedTransfer::parse( - matches, - )) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Generate shielded transfer for IBC.") - .add_args::>() - } - } - #[derive(Clone, Debug)] pub struct EpochSleep(pub args::Query); @@ -3002,7 +2997,6 @@ pub mod cmds { } pub mod args { - use std::collections::HashMap; use std::env; use std::net::SocketAddr; use std::path::PathBuf; @@ -3011,6 +3005,7 @@ pub mod args { use data_encoding::HEXUPPER; use namada::core::address::{Address, EstablishedAddress}; use namada::core::chain::{ChainId, ChainIdPrefix}; + use namada::core::collections::HashMap; use namada::core::dec::Dec; use namada::core::ethereum_events::EthAddress; use namada::core::keccak::KeccakHash; @@ -3217,7 +3212,6 @@ pub mod args { pub const PROPOSAL_ETH: ArgFlag = flag("eth"); pub const PROPOSAL_PGF_STEWARD: ArgFlag = flag("pgf-stewards"); pub const PROPOSAL_PGF_FUNDING: ArgFlag = flag("pgf-funding"); - pub const PROPOSAL_OFFLINE: ArgFlag = flag("offline"); pub const PROTOCOL_KEY: ArgOpt = arg_opt("protocol-key"); pub const PRE_GENESIS_PATH: ArgOpt = arg_opt("pre-genesis-path"); pub const PUBLIC_KEY: Arg = arg("public-key"); @@ -3242,6 +3236,9 @@ pub mod args { pub const RAW_PUBLIC_KEY_HASH_OPT: ArgOpt = RAW_PUBLIC_KEY_HASH.opt(); pub const RECEIVER: Arg = arg("receiver"); + pub const REFUND: ArgFlag = flag("refund"); + pub const REFUND_TARGET: ArgOpt = + arg_opt("refund-target"); pub const RELAYER: Arg
= arg("relayer"); pub const SAFE_MODE: ArgFlag = flag("safe-mode"); pub const SCHEME: ArgDefault = @@ -3250,6 +3247,7 @@ pub mod args { arg("self-bond-amount"); pub const SENDER: Arg = arg("sender"); pub const SHIELDED: ArgFlag = flag("shielded"); + pub const SHOW_IBC_TOKENS: ArgFlag = flag("show-ibc-tokens"); pub const SIGNER: ArgOpt = arg_opt("signer"); pub const SIGNING_KEYS: ArgMulti = arg_multi("signing-keys"); @@ -3267,6 +3265,7 @@ pub mod args { pub const TIMEOUT_SEC_OFFSET: ArgOpt = arg_opt("timeout-sec-offset"); pub const TM_ADDRESS: ArgOpt = arg_opt("tm-address"); pub const TOKEN_OPT: ArgOpt = TOKEN.opt(); + pub const TOKEN_STR_OPT: ArgOpt = TOKEN_STR.opt(); pub const TOKEN: Arg = arg("token"); pub const TOKEN_STR: Arg = arg("token"); pub const TRANSFER_SOURCE: Arg = arg("source"); @@ -4222,6 +4221,7 @@ pub mod args { channel_id: self.channel_id, timeout_height: self.timeout_height, timeout_sec_offset: self.timeout_sec_offset, + refund_target: chain_ctx.get_opt(&self.refund_target), memo: self.memo, tx_code_path: self.tx_code_path.to_path_buf(), } @@ -4239,6 +4239,7 @@ pub mod args { let channel_id = CHANNEL_ID.parse(matches); let timeout_height = TIMEOUT_HEIGHT.parse(matches); let timeout_sec_offset = TIMEOUT_SEC_OFFSET.parse(matches); + let refund_target = REFUND_TARGET.parse(matches); let memo = IBC_TRANSFER_MEMO_PATH.parse(matches).map(|path| { std::fs::read_to_string(path) .expect("Expected a file at given path") @@ -4254,6 +4255,7 @@ pub mod args { channel_id, timeout_height, timeout_sec_offset, + refund_target, memo, tx_code_path, } @@ -4278,6 +4280,10 @@ pub mod args { .help("The timeout height of the destination chain."), ) .arg(TIMEOUT_SEC_OFFSET.def().help("The timeout as seconds.")) + .arg(REFUND_TARGET.def().help( + "The refund target address when IBC shielded transfer \ + failure.", + )) .arg( IBC_TRANSFER_MEMO_PATH .def() @@ -4906,7 +4912,6 @@ pub mod args { InitProposal:: { tx: self.tx.to_sdk(ctx), proposal_data: std::fs::read(self.proposal_data).expect(""), - is_offline: self.is_offline, is_pgf_stewards: self.is_pgf_stewards, is_pgf_funding: self.is_pgf_funding, tx_code_path: self.tx_code_path, @@ -4918,7 +4923,6 @@ pub mod args { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); let proposal_data = DATA_PATH.parse(matches); - let is_offline = PROPOSAL_OFFLINE.parse(matches); let is_pgf_stewards = PROPOSAL_PGF_STEWARD.parse(matches); let is_pgf_funding = PROPOSAL_PGF_FUNDING.parse(matches); let tx_code_path = PathBuf::from(TX_INIT_PROPOSAL); @@ -4927,7 +4931,6 @@ pub mod args { tx, proposal_data, tx_code_path, - is_offline, is_pgf_stewards, is_pgf_funding, } @@ -4938,19 +4941,6 @@ pub mod args { .arg(DATA_PATH.def().help( "The data path file (json) that describes the proposal.", )) - .arg( - PROPOSAL_OFFLINE - .def() - .help( - "Flag if the proposal should be serialized \ - offline (only for default types).", - ) - .conflicts_with_all([ - PROPOSAL_PGF_FUNDING.name, - PROPOSAL_PGF_STEWARD.name, - PROPOSAL_ETH.name, - ]), - ) .arg( PROPOSAL_ETH .def() @@ -4993,12 +4983,9 @@ pub mod args { tx: self.tx.to_sdk(ctx), proposal_id: self.proposal_id, vote: self.vote, - voter: ctx.borrow_chain_or_exit().get(&self.voter), - is_offline: self.is_offline, - proposal_data: self.proposal_data.map(|path| { - std::fs::read(path) - .expect("Should be able to read the file.") - }), + voter_address: ctx + .borrow_chain_or_exit() + .get(&self.voter_address), tx_code_path: self.tx_code_path.to_path_buf(), } } @@ -5007,54 +4994,26 @@ pub mod args { impl Args for VoteProposal { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); - let proposal_id = PROPOSAL_ID_OPT.parse(matches); + let proposal_id = PROPOSAL_ID.parse(matches); let vote = PROPOSAL_VOTE.parse(matches); - let voter = ADDRESS.parse(matches); - let is_offline = PROPOSAL_OFFLINE.parse(matches); - let proposal_data = DATA_PATH_OPT.parse(matches); + let voter_address = ADDRESS.parse(matches); let tx_code_path = PathBuf::from(TX_VOTE_PROPOSAL); Self { tx, proposal_id, vote, - is_offline, - voter, - proposal_data, + voter_address, tx_code_path, } } fn def(app: App) -> App { app.add_args::>() - .arg( - PROPOSAL_ID_OPT - .def() - .help("The proposal identifier.") - .conflicts_with_all([ - PROPOSAL_OFFLINE.name, - DATA_PATH_OPT.name, - ]), - ) + .arg(PROPOSAL_ID_OPT.def().help("The proposal identifier.")) .arg(PROPOSAL_VOTE.def().help( "The vote for the proposal. Either yay, nay, or abstain.", )) - .arg( - PROPOSAL_OFFLINE - .def() - .help("Flag if the proposal vote should run offline.") - .conflicts_with(PROPOSAL_ID.name), - ) - .arg( - DATA_PATH_OPT - .def() - .help( - "The data path file (json) that describes the \ - proposal.", - ) - .requires(PROPOSAL_OFFLINE.name) - .conflicts_with(PROPOSAL_ID.name), - ) .arg(ADDRESS.def().help("The address of the voter.")) } } @@ -5142,11 +5101,7 @@ pub mod args { /// Common query args pub query: Query, /// Proposal id - pub proposal_id: Option, - /// Flag if proposal result should be run on offline data - pub offline: bool, - /// The folder containing the proposal and votes - pub proposal_folder: Option, + pub proposal_id: u64, } impl CliToSdk> for QueryProposalResult { @@ -5154,8 +5109,6 @@ pub mod args { QueryProposalResult:: { query: self.query.to_sdk(ctx), proposal_id: self.proposal_id, - offline: self.offline, - proposal_folder: self.proposal_folder, } } } @@ -5163,49 +5116,14 @@ pub mod args { impl Args for QueryProposalResult { fn parse(matches: &ArgMatches) -> Self { let query = Query::parse(matches); - let proposal_id = PROPOSAL_ID_OPT.parse(matches); - let offline = PROPOSAL_OFFLINE.parse(matches); - let proposal_folder = DATA_PATH_OPT.parse(matches); + let proposal_id = PROPOSAL_ID.parse(matches); - Self { - query, - proposal_id, - offline, - proposal_folder, - } + Self { query, proposal_id } } fn def(app: App) -> App { app.add_args::>() - .arg( - PROPOSAL_ID_OPT - .def() - .help("The proposal identifier.") - .conflicts_with_all([ - PROPOSAL_OFFLINE.name, - DATA_PATH_OPT.name, - ]), - ) - .arg( - PROPOSAL_OFFLINE - .def() - .help( - "Flag if the proposal result should run on \ - offline data.", - ) - .conflicts_with(PROPOSAL_ID.name) - .requires(DATA_PATH_OPT.name), - ) - .arg( - DATA_PATH_OPT - .def() - .help( - "The path to the folder containing the proposal \ - and votes files in json format.", - ) - .conflicts_with(PROPOSAL_ID.name) - .requires(PROPOSAL_OFFLINE.name), - ) + .arg(PROPOSAL_ID.def().help("The proposal identifier.")) } } @@ -5401,6 +5319,7 @@ pub mod args { owner: self.owner.map(|x| chain_ctx.get_cached(&x)), token: self.token.map(|x| chain_ctx.get(&x)), no_conversions: self.no_conversions, + show_ibc_tokens: self.show_ibc_tokens, } } } @@ -5411,11 +5330,13 @@ pub mod args { let owner = BALANCE_OWNER.parse(matches); let token = TOKEN_OPT.parse(matches); let no_conversions = NO_CONVERSIONS.parse(matches); + let show_ibc_tokens = SHOW_IBC_TOKENS.parse(matches); Self { query, owner, token, no_conversions, + show_ibc_tokens, } } @@ -5436,6 +5357,45 @@ pub mod args { "Whether not to automatically perform conversions.", ), ) + .arg(SHOW_IBC_TOKENS.def().help( + "Show IBC tokens. When the given token is an IBC denom, \ + IBC tokens will be shown even if this flag is false.", + )) + } + } + + impl CliToSdk> for QueryIbcToken { + fn to_sdk(self, ctx: &mut Context) -> QueryIbcToken { + let query = self.query.to_sdk(ctx); + let chain_ctx = ctx.borrow_mut_chain_or_exit(); + QueryIbcToken:: { + query, + token: self.token, + owner: self.owner.map(|x| chain_ctx.get_cached(&x)), + } + } + } + + impl Args for QueryIbcToken { + fn parse(matches: &ArgMatches) -> Self { + let query = Query::parse(matches); + let token = TOKEN_STR_OPT.parse(matches); + let owner = BALANCE_OWNER.parse(matches); + Self { + query, + owner, + token, + } + } + + fn def(app: App) -> App { + app.add_args::>() + .arg(TOKEN_STR_OPT.def().help("The base token to query.")) + .arg( + BALANCE_OWNER + .def() + .help("The account address whose token to query."), + ) } } @@ -5955,6 +5915,7 @@ pub mod args { amount: self.amount, port_id: self.port_id, channel_id: self.channel_id, + refund: self.refund, } } } @@ -5968,6 +5929,7 @@ pub mod args { let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let port_id = PORT_ID.parse(matches); let channel_id = CHANNEL_ID.parse(matches); + let refund = REFUND.parse(matches); Self { query, output_folder, @@ -5976,6 +5938,7 @@ pub mod args { amount, port_id, channel_id, + refund, } } @@ -5997,6 +5960,11 @@ pub mod args { "The channel ID via which the token is received.", ), ) + .arg( + REFUND + .def() + .help("Generate the shielded transfer for refunding."), + ) } } diff --git a/crates/apps/src/lib/cli/client.rs b/crates/apps/src/lib/cli/client.rs index a344c41b21..1f1616887d 100644 --- a/crates/apps/src/lib/cli/client.rs +++ b/crates/apps/src/lib/cli/client.rs @@ -285,25 +285,8 @@ impl CliApi { }); client.wait_until_node_is_synced(&io).await?; let args = args.to_sdk(&mut ctx); - let cli::context::ChainContext { - wallet, - mut config, - shielded, - native_token, - } = ctx.take_chain_or_exit(); - let namada = NamadaImpl::native_new( - client, - wallet, - shielded, - io, - native_token, - ); - tx::submit_change_consensus_key( - &namada, - &mut config, - args, - ) - .await?; + let namada = ctx.to_sdk(client, io); + tx::submit_change_consensus_key(&namada, args).await?; } Sub::TxMetadataChange(TxMetadataChange(args)) => { let chain_ctx = ctx.borrow_mut_chain_or_exit(); @@ -352,7 +335,7 @@ impl CliApi { ) .await?; } - // Eth bridge + #[cfg(feature = "namada-eth-bridge")] Sub::AddToEthBridgePool(args) => { let args = args.0; let chain_ctx = ctx.borrow_mut_chain_or_exit(); @@ -366,6 +349,13 @@ impl CliApi { let namada = ctx.to_sdk(client, io); tx::submit_bridge_pool_tx(&namada, args).await?; } + #[cfg(not(feature = "namada-eth-bridge"))] + Sub::AddToEthBridgePool(_) => { + display_line!( + &io, + "The Namada Ethereum bridge is disabled" + ); + } Sub::TxUnjailValidator(TxUnjailValidator(args)) => { let chain_ctx = ctx.borrow_mut_chain_or_exit(); let ledger_address = @@ -533,6 +523,18 @@ impl CliApi { let namada = ctx.to_sdk(client, io); rpc::query_balance(&namada, args).await; } + Sub::QueryIbcToken(QueryIbcToken(args)) => { + let chain_ctx = ctx.borrow_mut_chain_or_exit(); + let ledger_address = + chain_ctx.get(&args.query.ledger_address); + let client = client.unwrap_or_else(|| { + C::from_tendermint_address(&ledger_address) + }); + client.wait_until_node_is_synced(&io).await?; + let args = args.to_sdk(&mut ctx); + let namada = ctx.to_sdk(client, io); + rpc::query_ibc_tokens(&namada, args).await; + } Sub::QueryBonds(QueryBonds(args)) => { let chain_ctx = ctx.borrow_mut_chain_or_exit(); let ledger_address = @@ -742,20 +744,6 @@ impl CliApi { let namada = ctx.to_sdk(client, io); tx::sign_tx(&namada, args).await?; } - Sub::GenIbcShieldedTransfer(GenIbcShieldedTransfer( - args, - )) => { - let chain_ctx = ctx.borrow_mut_chain_or_exit(); - let ledger_address = - chain_ctx.get(&args.query.ledger_address); - let client = client.unwrap_or_else(|| { - C::from_tendermint_address(&ledger_address) - }); - client.wait_until_node_is_synced(&io).await?; - let args = args.to_sdk(&mut ctx); - let namada = ctx.to_sdk(client, io); - tx::gen_ibc_shielded_transfer(&namada, args).await?; - } } } cli::NamadaClient::WithoutContext(cmd, global_args) => match cmd { diff --git a/crates/apps/src/lib/cli/context.rs b/crates/apps/src/lib/cli/context.rs index 2cb586bfa0..bf4d0569e4 100644 --- a/crates/apps/src/lib/cli/context.rs +++ b/crates/apps/src/lib/cli/context.rs @@ -9,7 +9,7 @@ use color_eyre::eyre::Result; use namada::core::address::{Address, InternalAddress}; use namada::core::chain::ChainId; use namada::core::ethereum_events::EthAddress; -use namada::core::ibc::is_ibc_denom; +use namada::core::ibc::{is_ibc_denom, is_nft_trace}; use namada::core::key::*; use namada::core::masp::*; use namada::io::Io; @@ -439,6 +439,11 @@ impl ArgFromContext for Address { }) .ok_or(Skip) }) + .or_else(|_| { + is_nft_trace(raw) + .map(|(_, _, _)| ibc_token(raw)) + .ok_or(Skip) + }) // Or it can be an alias that may be found in the wallet .or_else(|_| { ctx.wallet diff --git a/crates/apps/src/lib/cli/relayer.rs b/crates/apps/src/lib/cli/relayer.rs index 37f0cada62..6e4391190a 100644 --- a/crates/apps/src/lib/cli/relayer.rs +++ b/crates/apps/src/lib/cli/relayer.rs @@ -1,14 +1,26 @@ use color_eyre::eyre::Result; use namada::io::Io; -use namada_sdk::eth_bridge::{bridge_pool, validator_set}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; -use crate::cli::args::{CliToSdk, CliToSdkCtxless}; -use crate::cli::cmds::*; -use crate::cli::utils::get_eth_rpc_client; impl CliApi { + #[cfg(not(feature = "namada-eth-bridge"))] + pub async fn handle_relayer_command( + _client: Option, + _cmd: cli::NamadaRelayer, + io: impl Io, + ) -> Result<()> + where + C: CliClient, + { + use namada_sdk::display_line; + + display_line!(&io, "The Namada Ethereum bridge is disabled"); + Ok(()) + } + + #[cfg(feature = "namada-eth-bridge")] pub async fn handle_relayer_command( client: Option, cmd: cli::NamadaRelayer, @@ -17,6 +29,12 @@ impl CliApi { where C: CliClient, { + use namada_sdk::eth_bridge::{bridge_pool, validator_set}; + + use crate::cli::args::{CliToSdk, CliToSdkCtxless}; + use crate::cli::cmds::*; + use crate::cli::utils::get_eth_rpc_client; + match cmd { cli::NamadaRelayer::EthBridgePoolWithCtx(boxed) => { let (sub, mut ctx) = *boxed; diff --git a/crates/apps/src/lib/cli/utils.rs b/crates/apps/src/lib/cli/utils.rs index 5faee0318c..ea61718a5c 100644 --- a/crates/apps/src/lib/cli/utils.rs +++ b/crates/apps/src/lib/cli/utils.rs @@ -22,6 +22,7 @@ use crate::cli::context::FromContext; /// keys are stored. // TODO: remove this in favor of getting eth keys from // namadaw, ledger, or something more secure +#[cfg_attr(not(feature = "namada-eth-bridge"), allow(dead_code))] const RELAYER_KEY_ENV_VAR: &str = "NAMADA_RELAYER_KEY"; // We only use static strings @@ -431,6 +432,7 @@ pub fn safe_exit(_: i32) -> ! { } /// Load an Ethereum wallet from the environment. +#[cfg_attr(not(feature = "namada-eth-bridge"), allow(dead_code))] fn get_eth_signer_from_env(chain_id: u64) -> Option { let relayer_key = std::env::var(RELAYER_KEY_ENV_VAR).ok()?; let relayer_key = HEXLOWER_PERMISSIVE.decode(relayer_key.as_ref()).ok()?; @@ -443,6 +445,7 @@ fn get_eth_signer_from_env(chain_id: u64) -> Option { } /// Return an Ethereum RPC client. +#[cfg_attr(not(feature = "namada-eth-bridge"), allow(dead_code))] pub async fn get_eth_rpc_client(url: &str) -> Arc { let client = Provider::::try_from(url) .expect("Failed to instantiate Ethereum RPC client"); diff --git a/crates/apps/src/lib/client/rpc.rs b/crates/apps/src/lib/client/rpc.rs index 6ae61f5534..307c12b516 100644 --- a/crates/apps/src/lib/client/rpc.rs +++ b/crates/apps/src/lib/client/rpc.rs @@ -1,8 +1,7 @@ //! Client RPC queries use std::cmp::Ordering; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::fs::{self, read_dir}; +use std::collections::{BTreeMap, BTreeSet}; use std::io; use std::str::FromStr; @@ -14,19 +13,15 @@ use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::{Node, ViewingKey}; use masp_primitives::transaction::components::I128Sum; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::core::address::{Address, InternalAddress, MASP}; +use namada::core::address::{Address, InternalAddress, MASP, MULTITOKEN}; +use namada::core::collections::{HashMap, HashSet}; use namada::core::hash::Hash; -use namada::core::ibc::{is_ibc_denom, IbcTokenHash}; use namada::core::key::*; use namada::core::masp::{BalanceOwner, ExtendedViewingKey, PaymentAddress}; use namada::core::storage::{ BlockHeight, BlockResults, Epoch, IndexedTx, Key, KeySeg, }; use namada::core::token::{Change, MaspDigitPos}; -use namada::governance::cli::offline::{ - find_offline_proposal, find_offline_votes, read_offline_files, - OfflineSignedProposal, OfflineVote, -}; use namada::governance::parameters::GovernanceParameters; use namada::governance::pgf::parameters::PgfParameters; use namada::governance::pgf::storage::steward::StewardDetail; @@ -34,14 +29,11 @@ use namada::governance::storage::keys as governance_storage; use namada::governance::storage::proposal::{ StoragePgfFunding, StorageProposal, }; -use namada::governance::utils::{ - compute_proposal_result, ProposalVotes, TallyType, TallyVote, VotePower, -}; +use namada::governance::utils::{ProposalVotes, VotePower}; +use namada::governance::ProposalVote; use namada::io::Io; use namada::ledger::events::Event; -use namada::ledger::ibc::storage::{ - ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, -}; +use namada::ledger::ibc::storage::ibc_trace_key; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pos::types::{CommissionPair, Slash}; use namada::ledger::pos::PosParams; @@ -58,7 +50,7 @@ use namada_sdk::rpc::{ self, enriched_bonds_and_unbonds, query_epoch, TxResponse, }; use namada_sdk::tendermint_rpc::endpoint::status; -use namada_sdk::tx::{display_inner_resp, display_wrapper_resp_and_get_result}; +use namada_sdk::tx::display_inner_resp; use namada_sdk::wallet::AddressVpType; use namada_sdk::{display, display_line, edisplay_line, error, prompt, Namada}; use tokio::time::Instant; @@ -177,7 +169,8 @@ pub async fn query_transfers( let mut shielded = context.shielded_mut().await; let _ = shielded.load().await; // Precompute asset types to increase chances of success in decoding - let token_map = query_tokens(context, None, None).await; + let token_map = + query_tokens(context, query_token.as_ref(), None, true).await; let tokens = token_map.values().collect(); let _ = shielded .precompute_asset_types(context.client(), tokens) @@ -201,8 +194,12 @@ pub async fn query_transfers( .map(|fvk| (ExtendedFullViewingKey::from(*fvk).fvk.vk, fvk)) .collect(); // Now display historical shielded and transparent transactions - for (IndexedTx { height, index: idx }, (epoch, tfer_delta, tx_delta)) in - transfers + for ( + IndexedTx { + height, index: idx, .. + }, + (epoch, tfer_delta, tx_delta), + ) in transfers { // Check if this transfer pertains to the supplied owner let mut relevant = match &query_owner { @@ -378,15 +375,16 @@ pub async fn query_transparent_balance( context: &impl Namada, args: args::QueryBalance, ) { - let prefix = Key::from( - Address::Internal(namada::core::address::InternalAddress::Multitoken) - .to_db_key(), - ); match (args.token, args.owner) { (Some(base_token), Some(owner)) => { let owner = owner.address().unwrap(); - let tokens = - query_tokens(context, Some(&base_token), Some(&owner)).await; + let tokens = query_tokens( + context, + Some(&base_token), + Some(&owner), + args.show_ibc_tokens, + ) + .await; for (token_alias, token) in tokens { let balance_key = token::storage_key::balance_key(&token, &owner); @@ -425,7 +423,9 @@ pub async fn query_transparent_balance( } (None, Some(owner)) => { let owner = owner.address().unwrap(); - let tokens = query_tokens(context, None, Some(&owner)).await; + let tokens = + query_tokens(context, None, Some(&owner), args.show_ibc_tokens) + .await; for (token_alias, token) in tokens { let balance = get_token_balance(context.client(), &token, &owner).await; @@ -436,7 +436,13 @@ pub async fn query_transparent_balance( } } (Some(base_token), None) => { - let tokens = query_tokens(context, Some(&base_token), None).await; + let tokens = query_tokens( + context, + Some(&base_token), + None, + args.show_ibc_tokens, + ) + .await; for (_, token) in tokens { let prefix = token::storage_key::balance_prefix(&token); let balances = @@ -448,6 +454,7 @@ pub async fn query_transparent_balance( } } (None, None) => { + let prefix = Key::from(MULTITOKEN.to_db_key()); let balances = query_storage_prefix(context, &prefix).await; if let Some(balances) = balances { print_balances(context, balances, None, None).await; @@ -481,7 +488,9 @@ pub async fn query_pinned_balance( .collect(); let _ = context.shielded_mut().await.load().await; // Precompute asset types to increase chances of success in decoding - let token_map = query_tokens(context, None, None).await; + let token_map = + query_tokens(context, args.token.as_ref(), None, args.show_ibc_tokens) + .await; let tokens = token_map.values().collect(); let _ = context .shielded_mut() @@ -552,8 +561,13 @@ pub async fn query_pinned_balance( ) } (Ok((balance, _undecoded, epoch)), Some(base_token)) => { - let tokens = - query_tokens(context, Some(base_token), None).await; + let tokens = query_tokens( + context, + Some(base_token), + None, + args.show_ibc_tokens, + ) + .await; for (token_alias, token) in &tokens { let total_balance = balance .0 @@ -725,17 +739,26 @@ async fn lookup_token_alias( token: &Address, owner: &Address, ) -> String { - if let Address::Internal(InternalAddress::IbcToken(trace_hash)) = token { - let ibc_denom_key = - ibc_denom_key(owner.to_string(), trace_hash.to_string()); - match query_storage_value::<_, String>(context.client(), &ibc_denom_key) + match token { + Address::Internal(InternalAddress::IbcToken(trace_hash)) => { + let ibc_trace_key = + ibc_trace_key(owner.to_string(), trace_hash.to_string()); + match query_storage_value::<_, String>( + context.client(), + &ibc_trace_key, + ) .await - { - Ok(ibc_denom) => get_ibc_denom_alias(context, ibc_denom).await, - Err(_) => token.to_string(), + { + Ok(ibc_trace) => { + context.wallet().await.lookup_ibc_token_alias(ibc_trace) + } + Err(_) => token.to_string(), + } } - } else { - context.wallet().await.lookup_alias(token) + Address::Internal(InternalAddress::Erc20(eth_addr)) => { + eth_addr.to_canonical() + } + _ => context.wallet().await.lookup_alias(token), } } @@ -744,79 +767,88 @@ async fn query_tokens( context: &impl Namada, base_token: Option<&Address>, owner: Option<&Address>, + show_ibc_tokens: bool, ) -> BTreeMap { let wallet = context.wallet().await; - let mut base_token = base_token; - // Base tokens - let mut tokens = match base_token { - Some(base_token) => { - let mut map = BTreeMap::new(); - if let Some(alias) = wallet.find_alias(base_token) { - map.insert(alias.to_string(), base_token.clone()); + let mut tokens = BTreeMap::new(); + match base_token { + Some(token) + if matches!( + token, + Address::Internal(InternalAddress::IbcToken(_)) + ) => + { + let ibc_denom = + rpc::query_ibc_denom(context, token.to_string(), owner).await; + let alias = + context.wallet().await.lookup_ibc_token_alias(ibc_denom); + tokens.insert(alias, token.clone()); + // we don't need to check other IBC prefixes + return tokens; + } + Some(token) => { + if let Address::Internal(InternalAddress::Erc20(eth_addr)) = token { + tokens.insert(eth_addr.to_string(), token.clone()); + } else { + let alias = wallet + .find_alias(token) + .map(|alias| alias.to_string()) + .unwrap_or(token.to_string()); + tokens.insert(alias, token.clone()); } - map } - None => wallet.tokens_with_aliases(), - }; + None => tokens = wallet.tokens_with_aliases(), + } - // Check all IBC denoms if the token isn't an pre-existing token - if tokens.is_empty() { - base_token = None; + if !show_ibc_tokens { + return tokens; } - let prefixes = match (base_token, owner) { - (Some(base_token), Some(owner)) => vec![ - ibc_denom_key_prefix(Some(base_token.to_string())), - ibc_denom_key_prefix(Some(owner.to_string())), - ], - (Some(base_token), None) => { - vec![ibc_denom_key_prefix(Some(base_token.to_string()))] - } - (None, Some(_)) => { - // Check all IBC denoms because the owner might not know IBC token - // transfers in the same chain - vec![ibc_denom_key_prefix(None)] - } - (None, None) => vec![ibc_denom_key_prefix(None)], - }; - for prefix in prefixes { - let ibc_denoms = query_storage_prefix::(context, &prefix).await; - if let Some(ibc_denoms) = ibc_denoms { - for (key, ibc_denom) in ibc_denoms { - if let Some((_, hash)) = is_ibc_denom_key(&key) { - let ibc_denom_alias = - get_ibc_denom_alias(context, ibc_denom).await; - let hash: IbcTokenHash = hash.parse().expect( - "Parsing an IBC token hash from storage shouldn't fail", - ); - let ibc_token = - Address::Internal(InternalAddress::IbcToken(hash)); - tokens.insert(ibc_denom_alias, ibc_token); - } + match rpc::query_ibc_tokens( + context, + base_token.map(|t| t.to_string()), + owner, + ) + .await + { + Ok(ibc_tokens) => { + for (trace, addr) in ibc_tokens { + let ibc_trace_alias = + context.wallet().await.lookup_ibc_token_alias(trace); + tokens.insert(ibc_trace_alias, addr); } } + Err(e) => { + edisplay_line!(context.io(), "IBC token query failed: {}", e); + } } tokens } -async fn get_ibc_denom_alias( +pub async fn query_ibc_tokens( context: &impl Namada, - ibc_denom: impl AsRef, -) -> String { + args: args::QueryIbcToken, +) { let wallet = context.wallet().await; - is_ibc_denom(&ibc_denom) - .map(|(trace_path, base_token)| { - let base_token_alias = match Address::decode(&base_token) { - Ok(base_token) => wallet.lookup_alias(&base_token), - Err(_) => base_token, - }; - if trace_path.is_empty() { - base_token_alias - } else { - format!("{}/{}", trace_path, base_token_alias) + let token = args.token.map(|t| { + wallet + .find_address(&t) + .map(|addr| addr.to_string()) + .unwrap_or(t) + }); + let owner = args.owner.map(|o| o.address().unwrap_or(MASP)); + match rpc::query_ibc_tokens(context, token, owner.as_ref()).await { + Ok(ibc_tokens) => { + for (trace, addr) in ibc_tokens { + let alias = + context.wallet().await.lookup_ibc_token_alias(trace); + display_line!(context.io(), "{}: {}", alias, addr); } - }) - .unwrap_or(ibc_denom.as_ref().to_string()) + } + Err(e) => { + edisplay_line!(context.io(), "IBC token query failed: {}", e); + } + } } /// Query votes for the given proposal @@ -931,7 +963,13 @@ pub async fn query_shielded_balance( let mut shielded = context.shielded_mut().await; let _ = shielded.load().await; // Precompute asset types to increase chances of success in decoding - let token_map = query_tokens(context, None, None).await; + let token_map = query_tokens( + context, + args.token.as_ref(), + None, + args.show_ibc_tokens, + ) + .await; let tokens = token_map.values().collect(); let _ = shielded .precompute_asset_types(context.client(), tokens) @@ -945,8 +983,13 @@ pub async fn query_shielded_balance( match (args.token, owner.is_some()) { // Here the user wants to know the balance for a specific token (Some(base_token), true) => { - let tokens = - query_tokens(context, Some(&base_token), Some(&MASP)).await; + let tokens = query_tokens( + context, + Some(&base_token), + Some(&MASP), + args.show_ibc_tokens, + ) + .await; for (token_alias, token) in tokens { // Query the multi-asset balance at the given spending key let viewing_key = @@ -1072,7 +1115,13 @@ pub async fn query_shielded_balance( // Here the user wants to know the balance for a specific token across // users (Some(base_token), false) => { - let tokens = query_tokens(context, Some(&base_token), None).await; + let tokens = query_tokens( + context, + Some(&base_token), + None, + args.show_ibc_tokens, + ) + .await; for (token_alias, token) in tokens { let mut found_any = false; display_line!(context.io(), "Shielded Token {}:", token_alias); @@ -1251,133 +1300,43 @@ pub async fn query_proposal_result( context: &impl Namada, args: args::QueryProposalResult, ) { - if args.proposal_id.is_some() { - let proposal_id = - args.proposal_id.expect("Proposal id should be defined."); + let proposal_id = args.proposal_id; - let current_epoch = query_epoch(context.client()).await.unwrap(); - let proposal_result = namada_sdk::rpc::query_proposal_result( - context.client(), - proposal_id, - ) - .await; - let proposal_query = namada_sdk::rpc::query_proposal_by_id( - context.client(), - proposal_id, - ) - .await; + let current_epoch = query_epoch(context.client()).await.unwrap(); + let proposal_result = + namada_sdk::rpc::query_proposal_result(context.client(), proposal_id) + .await; + let proposal_query = + namada_sdk::rpc::query_proposal_by_id(context.client(), proposal_id) + .await; - if let (Ok(Some(proposal_result)), Ok(Some(proposal_query))) = - (proposal_result, proposal_query) - { - display_line!(context.io(), "Proposal Id: {} ", proposal_id); - if current_epoch > proposal_query.voting_end_epoch { - display_line!(context.io(), "{:4}{}", "", proposal_result); + if let (Ok(Some(proposal_result)), Ok(Some(proposal_query))) = + (proposal_result, proposal_query) + { + display_line!(context.io(), "Proposal Id: {} ", proposal_id); + if current_epoch > proposal_query.voting_end_epoch { + display_line!(context.io(), "{:4}{}", "", proposal_result); + } else { + display_line!( + context.io(), + "{:4}Still voting until epoch {}", + "", + proposal_query.voting_end_epoch + ); + let res = format!("{}", proposal_result); + if let Some(idx) = res.find(' ') { + let slice = &res[idx..]; + display_line!(context.io(), "{:4}Currently{}", "", slice); } else { display_line!( context.io(), - "{:4}Still voting until epoch {}", + "{:4}Error parsing the result string", "", - proposal_query.voting_end_epoch ); - let res = format!("{}", proposal_result); - if let Some(idx) = res.find(' ') { - let slice = &res[idx..]; - display_line!(context.io(), "{:4}Currently{}", "", slice); - } else { - display_line!( - context.io(), - "{:4}Error parsing the result string", - "", - ); - } } - } else { - edisplay_line!(context.io(), "Proposal {} not found.", proposal_id); - }; + } } else { - let proposal_folder = args.proposal_folder.expect( - "The argument --proposal-folder is required with --offline.", - ); - let data_directory = read_dir(&proposal_folder).unwrap_or_else(|_| { - panic!( - "Should be able to read {} directory.", - proposal_folder.to_string_lossy() - ) - }); - let files = read_offline_files(data_directory); - let proposal_path = find_offline_proposal(&files); - - let proposal = if let Some(path) = proposal_path { - let proposal_file = - fs::File::open(path).expect("file should open read only"); - let proposal: OfflineSignedProposal = - serde_json::from_reader(proposal_file) - .expect("file should be proper JSON"); - - let author_account = rpc::get_account_info( - context.client(), - &proposal.proposal.author, - ) - .await - .unwrap() - .expect("Account should exist."); - - let proposal = proposal.validate( - &author_account.public_keys_map, - author_account.threshold, - false, - ); - - if let Ok(proposal) = proposal { - proposal - } else { - edisplay_line!( - context.io(), - "The offline proposal is not valid." - ); - return; - } - } else { - edisplay_line!( - context.io(), - "Couldn't find a file name offline_proposal_*.json." - ); - return; - }; - - let votes = find_offline_votes(&files) - .iter() - .map(|path| { - let vote_file = fs::File::open(path).expect(""); - let vote: OfflineVote = - serde_json::from_reader(vote_file).expect(""); - vote - }) - .collect::>(); - - let proposal_votes = - compute_offline_proposal_votes(context, &proposal, votes.clone()) - .await; - let total_voting_power = get_total_staked_tokens( - context.client(), - proposal.proposal.tally_epoch, - ) - .await; - - let proposal_result = compute_proposal_result( - proposal_votes, - total_voting_power, - TallyType::TwoThirds, - ); - - display_line!( - context.io(), - "Proposal offline: {}", - proposal.proposal.hash() - ); - display_line!(context.io(), "Parsed {} votes.", votes.len()); - display_line!(context.io(), "{:4}{}", "", proposal_result); + edisplay_line!(context.io(), "Proposal {} not found.", proposal_id); } } @@ -2769,23 +2728,10 @@ pub async fn query_result(context: &impl Namada, args: args::QueryResult) { Ok(resp) => { display_inner_resp(context, &resp); } - Err(err1) => { - // If this fails then instead look for an acceptance event. - let wrapper_resp = query_tx_response( - context.client(), - namada_sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), - ) - .await; - match wrapper_resp { - Ok(resp) => { - display_wrapper_resp_and_get_result(context, &resp); - } - Err(err2) => { - // Print the errors that caused the lookups to fail - edisplay_line!(context.io(), "{}\n{}", err1, err2); - cli::safe_exit(1) - } - } + Err(err) => { + // Print the errors that caused the lookups to fail + edisplay_line!(context.io(), "{}", err); + cli::safe_exit(1) } } } @@ -2895,69 +2841,6 @@ fn unwrap_client_response( }) } -pub async fn compute_offline_proposal_votes( - context: &impl Namada, - proposal: &OfflineSignedProposal, - votes: Vec, -) -> ProposalVotes { - let mut validators_vote: HashMap = HashMap::default(); - let mut validator_voting_power: HashMap = - HashMap::default(); - let mut delegators_vote: HashMap = HashMap::default(); - let mut delegator_voting_power: HashMap< - Address, - HashMap, - > = HashMap::default(); - for vote in votes { - let is_validator = is_validator(context.client(), &vote.address).await; - let is_delegator = is_delegator(context.client(), &vote.address).await; - if is_validator { - let validator_stake = get_validator_stake( - context.client(), - proposal.proposal.tally_epoch, - &vote.address, - ) - .await - .unwrap_or_default(); - validators_vote.insert(vote.address.clone(), vote.clone().into()); - validator_voting_power - .insert(vote.address.clone(), validator_stake); - } else if is_delegator { - let validators = get_delegators_delegation_at( - context.client(), - &vote.address.clone(), - proposal.proposal.tally_epoch, - ) - .await; - - for validator in vote.delegations.clone() { - let delegator_stake = - validators.get(&validator).cloned().unwrap_or_default(); - - delegators_vote - .insert(vote.address.clone(), vote.clone().into()); - delegator_voting_power - .entry(vote.address.clone()) - .or_default() - .insert(validator, delegator_stake); - } - } else { - display_line!( - context.io(), - "Skipping vote, not a validator/delegator at epoch {}.", - proposal.proposal.tally_epoch - ); - } - } - - ProposalVotes { - validators_vote, - validator_voting_power, - delegators_vote, - delegator_voting_power, - } -} - pub async fn compute_proposal_votes< C: namada::ledger::queries::Client + Sync, >( @@ -2969,10 +2852,12 @@ pub async fn compute_proposal_votes< .await .unwrap(); - let mut validators_vote: HashMap = HashMap::default(); + let mut validators_vote: HashMap = + HashMap::default(); let mut validator_voting_power: HashMap = HashMap::default(); - let mut delegators_vote: HashMap = HashMap::default(); + let mut delegators_vote: HashMap = + HashMap::default(); let mut delegator_voting_power: HashMap< Address, HashMap, @@ -2985,7 +2870,7 @@ pub async fn compute_proposal_votes< .await .unwrap_or_default(); - validators_vote.insert(vote.validator.clone(), vote.data.into()); + validators_vote.insert(vote.validator.clone(), vote.data); validator_voting_power.insert(vote.validator, validator_stake); } else { let delegator_stake = get_bond_amount_at( @@ -2997,8 +2882,7 @@ pub async fn compute_proposal_votes< .await; if let Some(stake) = delegator_stake { - delegators_vote - .insert(vote.delegator.clone(), vote.data.into()); + delegators_vote.insert(vote.delegator.clone(), vote.data); delegator_voting_power .entry(vote.delegator.clone()) .or_default() diff --git a/crates/apps/src/lib/client/tx.rs b/crates/apps/src/lib/client/tx.rs index 30d41f17c7..5c67d3aef4 100644 --- a/crates/apps/src/lib/client/tx.rs +++ b/crates/apps/src/lib/client/tx.rs @@ -1,6 +1,4 @@ -use std::collections::HashSet; use std::fs::File; -use std::io::Write; use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; @@ -8,22 +6,16 @@ use ledger_namada_rs::{BIP44Path, NamadaApp}; use ledger_transport_hid::hidapi::HidApi; use ledger_transport_hid::TransportNativeHID; use namada::core::address::{Address, ImplicitAddress}; -use namada::core::dec::Dec; -use namada::core::key::{self, *}; -use namada::governance::cli::offline::{ - OfflineProposal, OfflineSignedProposal, OfflineVote, -}; +use namada::core::collections::HashSet; +use namada::core::key::*; use namada::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, }; -use namada::governance::ProposalVote; -use namada::ibc::apps::transfer::types::Memo; use namada::io::Io; use namada::state::EPOCH_SWITCH_BLOCKS_DELAY; -use namada::tx::data::pos::{BecomeValidator, ConsensusKeyChange}; use namada::tx::{CompressedSignature, Section, Signer, Tx}; +use namada_sdk::args::TxBecomeValidator; use namada_sdk::rpc::{InnerTxResult, TxBroadcastData, TxResponse}; -use namada_sdk::signing::validate_fee_and_gen_unshield; use namada_sdk::wallet::alias::validator_consensus_key; use namada_sdk::wallet::{Wallet, WalletIo}; use namada_sdk::{display_line, edisplay_line, error, signing, tx, Namada}; @@ -32,10 +24,7 @@ use tokio::sync::RwLock; use super::rpc; use crate::cli::{args, safe_exit}; -use crate::client::rpc::query_wasm_code_hash; -use crate::client::tx::signing::{ - default_sign, init_validator_signing_data, SigningTxData, -}; +use crate::client::tx::signing::{default_sign, SigningTxData}; use crate::client::tx::tx::ProcessTxResponse; use crate::config::TendermintMode; use crate::facade::tendermint_rpc::endpoint::broadcast::tx_sync::Response; @@ -133,7 +122,7 @@ pub async fn with_hardware_wallet<'a, U: WalletIo + Clone>( }; // Expand out the signature before adding it to the // transaction - tx.add_section(Section::Signature(compressed.expand(&tx))); + tx.add_section(Section::Authorization(compressed.expand(&tx))); } // Sign the fee header if that is requested if parts.contains(&signing::Signable::FeeHeader) { @@ -151,7 +140,7 @@ pub async fn with_hardware_wallet<'a, U: WalletIo + Clone>( }; // Expand out the signature before adding it to the // transaction - tx.add_section(Section::Signature(compressed.expand(&tx))); + tx.add_section(Section::Authorization(compressed.expand(&tx))); } Ok(tx) } @@ -217,7 +206,8 @@ pub async fn submit_reveal_aux( if tx::is_reveal_pk_needed(context.client(), address, args.force) .await? { - println!( + display_line!( + context.io(), "Submitting a tx to reveal the public key for address \ {address}..." ); @@ -320,22 +310,10 @@ where pub async fn submit_change_consensus_key( namada: &impl Namada, - config: &mut crate::config::Config, - args::ConsensusKeyChange { - tx: tx_args, - validator, - consensus_key, - unsafe_dont_encrypt, - tx_code_path: _, - }: args::ConsensusKeyChange, + args: args::ConsensusKeyChange, ) -> Result<(), error::Error> { - let tx_args = args::Tx { - chain_id: tx_args - .clone() - .chain_id - .or_else(|| Some(config.ledger.chain_id.clone())), - ..tx_args.clone() - }; + let validator = args.validator; + let consensus_key = args.consensus_key; // Determine the alias for the new key let mut wallet = namada.wallet_mut().await; @@ -369,13 +347,13 @@ pub async fn submit_change_consensus_key( .unwrap_or_else(|| { display_line!(namada.io(), "Generating new consensus key..."); let password = - read_and_confirm_encryption_password(unsafe_dont_encrypt); + read_and_confirm_encryption_password(args.unsafe_dont_encrypt); wallet .gen_store_secret_key( // Note that TM only allows ed25519 for consensus key SchemeType::Ed25519, Some(consensus_key_alias.clone()), - tx_args.wallet_alias_force, + args.tx.wallet_alias_force, password, &mut OsRng, ) @@ -383,66 +361,25 @@ pub async fn submit_change_consensus_key( .1 .ref_to() }); + // To avoid wallet deadlocks in following operations drop(wallet); - // Check that the new consensus key is unique - let consensus_keys = rpc::query_consensus_keys(namada.client()).await; - - if consensus_keys.contains(&new_key) { - edisplay_line!(namada.io(), "The consensus key is already being used."); - safe_exit(1) - } - - let tx_code_hash = - query_wasm_code_hash(namada, args::TX_CHANGE_CONSENSUS_KEY_WASM) - .await - .unwrap(); - - let chain_id = tx_args.chain_id.clone().unwrap(); - let mut tx = Tx::new(chain_id, tx_args.expiration); - - let data = ConsensusKeyChange { + let args = namada::sdk::args::ConsensusKeyChange { validator: validator.clone(), - consensus_key: new_key.clone(), - }; - - tx.add_code_from_hash( - tx_code_hash, - Some(args::TX_CHANGE_CONSENSUS_KEY_WASM.to_string()), - ) - .add_data(data); - - if let Some(memo) = &tx_args.memo { - tx.add_memo(memo); + consensus_key: Some(new_key.clone()), + ..args }; - let signing_data = - init_validator_signing_data(namada, &tx_args, vec![new_key]).await?; - let (fee_amount, _, unshield) = validate_fee_and_gen_unshield( - namada, - &tx_args, - &signing_data.fee_payer, - ) - .await?; - - tx::prepare_tx( - namada.client(), - &tx_args, - &mut tx, - unshield, - fee_amount, - signing_data.fee_payer.clone(), - ) - .await?; + let (mut tx, signing_data) = args.build(namada).await?; - if tx_args.dump_tx { - tx::dump_tx(namada.io(), &tx_args, tx); + if args.tx.dump_tx { + tx::dump_tx(namada.io(), &args.tx, tx); } else { - sign(namada, &mut tx, &tx_args, signing_data).await?; - let resp = namada.submit(tx, &tx_args).await?; + sign(namada, &mut tx, &args.tx, signing_data).await?; + let resp = namada.submit(tx, &args.tx).await?; - if !tx_args.dry_run { + if !args.tx.dry_run { if resp.is_applied_and_valid().is_some() { namada.wallet_mut().await.save().unwrap_or_else(|err| { edisplay_line!(namada.io(), "{}", err) @@ -471,106 +408,10 @@ pub async fn submit_change_consensus_key( pub async fn submit_become_validator( namada: &impl Namada, config: &mut crate::config::Config, - args::TxBecomeValidator { - tx: tx_args, - address, - scheme, - consensus_key, - eth_cold_key, - eth_hot_key, - protocol_key, - commission_rate, - max_commission_rate_change, - email, - website, - description, - discord_handle, - avatar, - unsafe_dont_encrypt, - tx_code_path, - }: args::TxBecomeValidator, + args: args::TxBecomeValidator, ) -> Result<(), error::Error> { - let tx_args = args::Tx { - chain_id: tx_args - .clone() - .chain_id - .or_else(|| Some(config.ledger.chain_id.clone())), - ..tx_args.clone() - }; - - // Check that the address is established - if !address.is_established() { - edisplay_line!( - namada.io(), - "The given address {address} is not established. Only an \ - established address can become a validator.", - ); - if !tx_args.force { - safe_exit(1) - } - }; - - // Check that the address is not already a validator - if rpc::is_validator(namada.client(), &address).await { - edisplay_line!( - namada.io(), - "The given address {address} is already a validator", - ); - if !tx_args.force { - safe_exit(1) - } - }; - - // If the address is not yet a validator, it cannot have self-bonds, but it - // may have delegations. It has to unbond those before it can become a - // validator. - if rpc::has_bonds(namada.client(), &address).await { - edisplay_line!( - namada.io(), - "The given address {address} has delegations and therefore cannot \ - become a validator. To become a validator, you have to unbond \ - your delegations first.", - ); - if !tx_args.force { - safe_exit(1) - } - } - - // Validate the commission rate data - if commission_rate > Dec::one() || commission_rate < Dec::zero() { - edisplay_line!( - namada.io(), - "The validator commission rate must not exceed 1.0 or 100%, and \ - it must be 0 or positive." - ); - if !tx_args.force { - safe_exit(1) - } - } - if max_commission_rate_change > Dec::one() - || max_commission_rate_change < Dec::zero() - { - edisplay_line!( - namada.io(), - "The validator maximum change in commission rate per epoch must \ - not exceed 1.0 or 100%, and it must be 0 or positive." - ); - if !tx_args.force { - safe_exit(1) - } - } - // Validate the email - if email.is_empty() { - edisplay_line!( - namada.io(), - "The validator email must not be an empty string." - ); - if !tx_args.force { - safe_exit(1) - } - } - - let alias = tx_args + let alias = args + .tx .initialized_account_alias .as_ref() .cloned() @@ -583,7 +424,9 @@ pub async fn submit_become_validator( let eth_cold_key_alias = format!("{}-eth-cold-key", alias); let mut wallet = namada.wallet_mut().await; - let consensus_key = consensus_key + let consensus_key = args + .consensus_key + .clone() .map(|key| match key { common::PublicKey::Ed25519(_) => key, common::PublicKey::Secp256k1(_) => { @@ -597,13 +440,13 @@ pub async fn submit_become_validator( .unwrap_or_else(|| { display_line!(namada.io(), "Generating consensus key..."); let password = - read_and_confirm_encryption_password(unsafe_dont_encrypt); + read_and_confirm_encryption_password(args.unsafe_dont_encrypt); wallet .gen_store_secret_key( // Note that TM only allows ed25519 for consensus key SchemeType::Ed25519, Some(consensus_key_alias.clone().into()), - tx_args.wallet_alias_force, + args.tx.wallet_alias_force, password, &mut OsRng, ) @@ -612,7 +455,9 @@ pub async fn submit_become_validator( .ref_to() }); - let eth_cold_pk = eth_cold_key + let eth_cold_pk = args + .eth_cold_key + .clone() .map(|key| match key { common::PublicKey::Secp256k1(_) => key, common::PublicKey::Ed25519(_) => { @@ -626,13 +471,13 @@ pub async fn submit_become_validator( .unwrap_or_else(|| { display_line!(namada.io(), "Generating Eth cold key..."); let password = - read_and_confirm_encryption_password(unsafe_dont_encrypt); + read_and_confirm_encryption_password(args.unsafe_dont_encrypt); wallet .gen_store_secret_key( // Note that ETH only allows secp256k1 SchemeType::Secp256k1, Some(eth_cold_key_alias.clone()), - tx_args.wallet_alias_force, + args.tx.wallet_alias_force, password, &mut OsRng, ) @@ -641,7 +486,9 @@ pub async fn submit_become_validator( .ref_to() }); - let eth_hot_pk = eth_hot_key + let eth_hot_pk = args + .eth_hot_key + .clone() .map(|key| match key { common::PublicKey::Secp256k1(_) => key, common::PublicKey::Ed25519(_) => { @@ -655,13 +502,13 @@ pub async fn submit_become_validator( .unwrap_or_else(|| { display_line!(namada.io(), "Generating Eth hot key..."); let password = - read_and_confirm_encryption_password(unsafe_dont_encrypt); + read_and_confirm_encryption_password(args.unsafe_dont_encrypt); wallet .gen_store_secret_key( // Note that ETH only allows secp256k1 SchemeType::Secp256k1, Some(eth_hot_key_alias.clone()), - tx_args.wallet_alias_force, + args.tx.wallet_alias_force, password, &mut OsRng, ) @@ -672,30 +519,40 @@ pub async fn submit_become_validator( // To avoid wallet deadlocks in following operations drop(wallet); - if protocol_key.is_none() { + if args.protocol_key.is_none() { display_line!(namada.io(), "Generating protocol signing key..."); } + // Generate the validator keys let validator_keys = gen_validator_keys( &mut *namada.wallet_mut().await, Some(eth_hot_pk.clone()), - protocol_key, - scheme, + args.protocol_key.clone(), + args.scheme, ) .unwrap(); let protocol_sk = validator_keys.get_protocol_keypair(); let protocol_key = protocol_sk.to_public(); + let args = TxBecomeValidator { + consensus_key: Some(consensus_key.clone()), + eth_cold_key: Some(eth_cold_pk), + eth_hot_key: Some(eth_hot_pk), + protocol_key: Some(protocol_key), + ..args + }; + // Store the protocol key in the wallet so that we can sign the tx with it // to verify ownership display_line!(namada.io(), "Storing protocol key in the wallet..."); - let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); + let password = + read_and_confirm_encryption_password(args.unsafe_dont_encrypt); namada .wallet_mut() .await .insert_keypair( protocol_key_alias, - tx_args.wallet_alias_force, + args.tx.wallet_alias_force, protocol_sk.clone(), password, None, @@ -705,152 +562,84 @@ pub async fn submit_become_validator( "Failed to store the keypair.", )))?; - let tx_code_hash = - query_wasm_code_hash(namada, tx_code_path.to_string_lossy()) - .await - .unwrap(); + let (mut tx, signing_data) = args.build(namada).await?; - let chain_id = tx_args.chain_id.clone().unwrap(); - let mut tx = Tx::new(chain_id, tx_args.expiration); - let data = BecomeValidator { - address: address.clone(), - consensus_key: consensus_key.clone(), - eth_cold_key: key::secp256k1::PublicKey::try_from_pk(ð_cold_pk) - .unwrap(), - eth_hot_key: key::secp256k1::PublicKey::try_from_pk(ð_hot_pk) - .unwrap(), - protocol_key, - commission_rate, - max_commission_rate_change, - email, - description, - website, - discord_handle, - avatar, - }; + if args.tx.dump_tx { + tx::dump_tx(namada.io(), &args.tx, tx); + } else { + sign(namada, &mut tx, &args.tx, signing_data).await?; + let resp = namada.submit(tx, &args.tx).await?; - // Put together all the PKs that we have to sign with to verify ownership - let account = namada_sdk::rpc::get_account_info(namada.client(), &address) - .await? - .unwrap_or_else(|| { - edisplay_line!( + if args.tx.dry_run { + display_line!( namada.io(), - "Unable to query account keys for address {address}." + "Transaction dry run. No key or addresses have been saved." ); - safe_exit(1) - }); - let mut all_pks: Vec<_> = - account.public_keys_map.pk_to_idx.into_keys().collect(); - all_pks.push(consensus_key.clone()); - all_pks.push(eth_cold_pk); - all_pks.push(eth_hot_pk); - all_pks.push(data.protocol_key.clone()); - - tx.add_code_from_hash( - tx_code_hash, - Some(args::TX_BECOME_VALIDATOR_WASM.to_string()), - ) - .add_data(data); - - if let Some(memo) = &tx_args.memo { - tx.add_memo(memo); - }; - - let signing_data = - init_validator_signing_data(namada, &tx_args, all_pks).await?; - let (fee_amount, _, unshield) = validate_fee_and_gen_unshield( - namada, - &tx_args, - &signing_data.fee_payer, - ) - .await?; - - tx::prepare_tx( - namada.client(), - &tx_args, - &mut tx, - unshield, - fee_amount, - signing_data.fee_payer.clone(), - ) - .await?; - - if tx_args.dump_tx { - tx::dump_tx(namada.io(), &tx_args, tx); - } else { - sign(namada, &mut tx, &tx_args, signing_data).await?; - let resp = namada.submit(tx, &tx_args).await?; - - if !tx_args.dry_run { - if resp.is_applied_and_valid().is_some() { - // add validator address and keys to the wallet - let mut wallet = namada.wallet_mut().await; - wallet.add_validator_data(address.clone(), validator_keys); - wallet.save().unwrap_or_else(|err| { - edisplay_line!(namada.io(), "{}", err) - }); - - let tendermint_home = config.ledger.cometbft_dir(); - tendermint_node::write_validator_key( - &tendermint_home, - &wallet.find_key_by_pk(&consensus_key, None).expect( - "unable to find consensus key pair in the wallet", - ), - ) - .unwrap(); - // To avoid wallet deadlocks in following operations - drop(wallet); - tendermint_node::write_validator_state(tendermint_home) - .unwrap(); - - // Write Namada config stuff or figure out how to do the above - // tendermint_node things two epochs in the future!!! - config.ledger.shell.tendermint_mode = TendermintMode::Validator; - config - .write( - &config.ledger.shell.base_dir, - &config.ledger.chain_id, - true, - ) - .unwrap(); - - let pos_params = - rpc::query_pos_parameters(namada.client()).await; + safe_exit(0) + } - display_line!(namada.io(), ""); - display_line!( - namada.io(), - "The keys for validator \"{alias}\" were stored in the \ - wallet:" - ); - display_line!( - namada.io(), - " Validator account key \"{}\"", - validator_key_alias - ); - display_line!( - namada.io(), - " Consensus key \"{}\"", - consensus_key_alias - ); - display_line!( - namada.io(), - "The ledger node has been setup to use this validator's \ - address and consensus key." - ); - display_line!( - namada.io(), - "Your validator will be active in {} epochs. Be sure to \ - restart your node for the changes to take effect!", - pos_params.pipeline_len - ); - } - } else { + if resp.is_applied_and_valid().is_none() { display_line!( namada.io(), - "Transaction dry run. No key or addresses have been saved." + "Transaction failed. No key or addresses have been saved." ); + safe_exit(1) } + + // add validator address and keys to the wallet + let mut wallet = namada.wallet_mut().await; + wallet.add_validator_data(args.address.clone(), validator_keys); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(namada.io(), "{}", err)); + + let tendermint_home = config.ledger.cometbft_dir(); + tendermint_node::write_validator_key( + &tendermint_home, + &wallet + .find_key_by_pk(&consensus_key, None) + .expect("unable to find consensus key pair in the wallet"), + ) + .unwrap(); + // To avoid wallet deadlocks in following operations + drop(wallet); + tendermint_node::write_validator_state(tendermint_home).unwrap(); + + // Write Namada config stuff or figure out how to do the above + // tendermint_node things two epochs in the future!!! + config.ledger.shell.tendermint_mode = TendermintMode::Validator; + config + .write(&config.ledger.shell.base_dir, &config.ledger.chain_id, true) + .unwrap(); + + let pos_params = rpc::query_pos_parameters(namada.client()).await; + + display_line!(namada.io(), ""); + display_line!( + namada.io(), + "The keys for validator \"{alias}\" were stored in the wallet:" + ); + display_line!( + namada.io(), + " Validator account key \"{}\"", + validator_key_alias + ); + display_line!( + namada.io(), + " Consensus key \"{}\"", + consensus_key_alias + ); + display_line!( + namada.io(), + "The ledger node has been setup to use this validator's address \ + and consensus key." + ); + display_line!( + namada.io(), + "Your validator will be active in {} epochs. Be sure to restart \ + your node for the changes to take effect!", + pos_params.pipeline_len + ); } Ok(()) } @@ -897,7 +686,7 @@ pub async fn submit_init_validator( "Cannot proceed to become validator in dry-run as no account has \ been created" ); - safe_exit(1); + safe_exit(1) } let address = address.unwrap_or_else(|| { eprintln!( @@ -1022,52 +811,7 @@ where let current_epoch = rpc::query_and_print_epoch(namada).await; let governance_parameters = rpc::query_governance_parameters(namada.client()).await; - let (mut tx_builder, signing_data) = if args.is_offline { - let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) - .map_err(|e| { - error::TxSubmitError::FailedGovernaneProposalDeserialize( - e.to_string(), - ) - })? - .validate(current_epoch, args.tx.force) - .map_err(|e| { - error::TxSubmitError::InvalidProposal(e.to_string()) - })?; - - let default_signer = Some(proposal.author.clone()); - let signing_data = aux_signing_data( - namada, - &args.tx, - Some(proposal.author.clone()), - default_signer, - ) - .await?; - - let mut wallet = namada.wallet_mut().await; - let signed_offline_proposal = proposal.sign( - args.tx - .signing_keys - .iter() - .map(|pk| wallet.find_key_by_pk(pk, None)) - .collect::>() - .expect("secret keys corresponding to public keys not found"), - &signing_data.account_public_keys_map.unwrap(), - ); - let output_file_path = signed_offline_proposal - .serialize(args.tx.output_folder) - .map_err(|e| { - error::TxSubmitError::FailedGovernaneProposalDeserialize( - e.to_string(), - ) - })?; - - display_line!( - namada.io(), - "Proposal serialized to: {}", - output_file_path - ); - return Ok(()); - } else if args.is_pgf_funding { + let (mut tx_builder, signing_data) = if args.is_pgf_funding { let proposal = PgfFundingProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { @@ -1162,69 +906,7 @@ pub async fn submit_vote_proposal( where ::Error: std::fmt::Display, { - let (mut tx_builder, signing_data) = if args.is_offline { - let default_signer = Some(args.voter.clone()); - let signing_data = aux_signing_data( - namada, - &args.tx, - Some(args.voter.clone()), - default_signer.clone(), - ) - .await?; - - let proposal_vote = ProposalVote::try_from(args.vote) - .map_err(|_| error::TxSubmitError::InvalidProposalVote)?; - - let proposal = OfflineSignedProposal::try_from( - args.proposal_data.clone().unwrap().as_ref(), - ) - .map_err(|e| error::TxSubmitError::InvalidProposal(e.to_string()))? - .validate( - &signing_data.account_public_keys_map.clone().unwrap(), - signing_data.threshold, - args.tx.force, - ) - .map_err(|e| error::TxSubmitError::InvalidProposal(e.to_string()))?; - let delegations = rpc::get_delegators_delegation_at( - namada.client(), - &args.voter, - proposal.proposal.tally_epoch, - ) - .await - .keys() - .cloned() - .collect::>(); - - let offline_vote = OfflineVote::new( - &proposal, - proposal_vote, - args.voter.clone(), - delegations, - ); - - let mut wallet = namada.wallet_mut().await; - let offline_signed_vote = offline_vote.sign( - args.tx - .signing_keys - .iter() - .map(|pk| wallet.find_key_by_pk(pk, None)) - .collect::>() - .expect("secret keys corresponding to public keys not found"), - &signing_data.account_public_keys_map.unwrap(), - ); - let output_file_path = offline_signed_vote - .serialize(args.tx.output_folder) - .expect("Should be able to serialize the offline proposal"); - - display_line!( - namada.io(), - "Proposal vote serialized to: {}", - output_file_path - ); - return Ok(()); - } else { - args.build(namada).await? - }; + let (mut tx_builder, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx_builder); @@ -1612,30 +1294,3 @@ pub async fn submit_tx( ) -> Result { tx::submit_tx(namada, to_broadcast).await } - -pub async fn gen_ibc_shielded_transfer( - context: &impl Namada, - args: args::GenIbcShieldedTransfer, -) -> Result<(), error::Error> { - if let Some(shielded_transfer) = - tx::gen_ibc_shielded_transfer(context, args.clone()).await? - { - let tx_id = shielded_transfer.masp_tx.txid().to_string(); - let filename = format!("ibc_shielded_transfer_{}.memo", tx_id); - let output_path = match &args.output_folder { - Some(path) => path.join(filename), - None => filename.into(), - }; - let mut out = File::create(&output_path) - .expect("Should be able to create the out file."); - out.write_all(Memo::from(shielded_transfer).as_ref().as_bytes()) - .expect("IBC memo should be deserializable."); - println!( - "Output IBC shielded transfer for {tx_id} to {}", - output_path.to_string_lossy() - ); - } else { - eprintln!("No shielded transfer for this IBC transfer.") - } - Ok(()) -} diff --git a/crates/apps/src/lib/config/genesis.rs b/crates/apps/src/lib/config/genesis.rs index 62b31afa52..a71f842ea5 100644 --- a/crates/apps/src/lib/config/genesis.rs +++ b/crates/apps/src/lib/config/genesis.rs @@ -6,7 +6,7 @@ pub mod templates; pub mod transactions; pub mod utils; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::fmt::{Display, Formatter}; use std::str::FromStr; @@ -14,6 +14,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derivative::Derivative; use namada::core::address::{Address, EstablishedAddress}; use namada::core::chain::ProposalBytes; +use namada::core::collections::HashMap; use namada::core::key::*; use namada::core::storage; use namada::core::string_encoding::StringEncoded; @@ -310,10 +311,6 @@ pub struct Parameters { pub epochs_per_year: u64, /// Maximum amount of signatures per transaction pub max_signatures_per_transaction: u8, - /// PoS staked ratio (read + write for every epoch) - pub staked_ratio: Dec, - /// PoS inflation amount from the last epoch (read + write for every epoch) - pub pos_inflation_amount: token::Amount, /// Fee unshielding gas limit pub fee_unshielding_gas_limit: u64, /// Fee unshielding descriptions limit @@ -361,6 +358,7 @@ pub fn make_dev_genesis( let mut genesis = finalize( templates, ChainIdPrefix::from_str("test").unwrap(), + #[allow(clippy::disallowed_methods)] DateTimeUtc::now(), Duration::from_secs(30).into(), ); diff --git a/crates/apps/src/lib/config/genesis/chain.rs b/crates/apps/src/lib/config/genesis/chain.rs index cfe4659bb6..aea50c5889 100644 --- a/crates/apps/src/lib/config/genesis/chain.rs +++ b/crates/apps/src/lib/config/genesis/chain.rs @@ -9,7 +9,6 @@ use namada::core::address::{ Address, EstablishedAddress, EstablishedAddressGen, }; use namada::core::chain::{ChainId, ChainIdPrefix}; -use namada::core::dec::Dec; use namada::core::hash::Hash; use namada::core::key::{common, RefTo}; use namada::core::time::{DateTimeUtc, DurationNanos, Rfc3339String}; @@ -295,6 +294,7 @@ impl Finalized { max_block_gas, minimum_gas_price, max_tx_bytes, + is_native_token_transferable, .. } = self.parameters.parameters.clone(); @@ -321,8 +321,6 @@ impl Finalized { .into(); let vp_allowlist = vp_allowlist.unwrap_or_default(); let tx_allowlist = tx_allowlist.unwrap_or_default(); - let staked_ratio = Dec::zero(); - let pos_inflation_amount = 0; namada::ledger::parameters::Parameters { max_tx_bytes, @@ -332,8 +330,6 @@ impl Finalized { tx_allowlist, implicit_vp_code_hash, epochs_per_year, - staked_ratio, - pos_inflation_amount: Amount::native_whole(pos_inflation_amount), max_proposal_bytes, max_signatures_per_transaction, fee_unshielding_gas_limit, @@ -348,6 +344,7 @@ impl Finalized { ) }) .collect(), + is_native_token_transferable, } } @@ -444,6 +441,17 @@ impl Finalized { } } + pub fn get_ibc_params(&self) -> namada::ibc::parameters::IbcParameters { + let templates::IbcParams { + default_mint_limit, + default_per_epoch_throughput_limit, + } = self.parameters.ibc_params.clone(); + namada::ibc::parameters::IbcParameters { + default_mint_limit, + default_per_epoch_throughput_limit, + } + } + pub fn get_token_address(&self, alias: &Alias) -> Option<&Address> { self.tokens.token.get(alias).map(|token| &token.address) } @@ -713,6 +721,7 @@ pub struct FinalizedParameters { pub gov_params: templates::GovernanceParams, pub pgf_params: namada::governance::pgf::parameters::PgfParameters, pub eth_bridge_params: Option, + pub ibc_params: templates::IbcParams, } impl FinalizedParameters { @@ -723,6 +732,7 @@ impl FinalizedParameters { gov_params, pgf_params, eth_bridge_params, + ibc_params, }: templates::Parameters, ) -> Self { use namada::governance::pgf::parameters::PgfParameters; @@ -737,6 +747,7 @@ impl FinalizedParameters { gov_params, pgf_params: finalized_pgf_params, eth_bridge_params, + ibc_params, } } } diff --git a/crates/apps/src/lib/config/genesis/templates.rs b/crates/apps/src/lib/config/genesis/templates.rs index 217871138c..3c602beb70 100644 --- a/crates/apps/src/lib/config/genesis/templates.rs +++ b/crates/apps/src/lib/config/genesis/templates.rs @@ -240,6 +240,7 @@ pub struct Parameters { pub gov_params: GovernanceParams, pub pgf_params: PgfParams, pub eth_bridge_params: Option, + pub ibc_params: IbcParams, } #[derive( @@ -259,6 +260,8 @@ pub struct ChainParams { /// Name of the native token - this must one of the tokens from /// `tokens.toml` file pub native_token: Alias, + /// Enable the native token transfer if it is true + pub is_native_token_transferable: bool, /// Minimum number of blocks per epoch. // TODO: u64 only works with values up to i64::MAX with toml-rs! pub min_num_of_blocks: u64, @@ -310,6 +313,7 @@ impl ChainParams { let ChainParams { max_tx_bytes, native_token, + is_native_token_transferable, min_num_of_blocks, max_expected_time_per_block, max_proposal_bytes, @@ -355,6 +359,7 @@ impl ChainParams { Ok(ChainParams { max_tx_bytes, native_token, + is_native_token_transferable, min_num_of_blocks, max_expected_time_per_block, max_proposal_bytes, @@ -445,7 +450,7 @@ pub struct GovernanceParams { pub max_proposal_period: u64, /// Maximum number of characters in the proposal content pub max_proposal_content_size: u64, - /// Minimum number of epoch between end and grace epoch + /// Minimum number of epochs between the end and activation epochs pub min_proposal_grace_epochs: u64, } @@ -500,6 +505,23 @@ pub struct EthBridgeParams { pub contracts: Contracts, } +#[derive( + Clone, + Debug, + Deserialize, + Serialize, + BorshDeserialize, + BorshSerialize, + PartialEq, + Eq, +)] +pub struct IbcParams { + /// Default supply limit of each token + pub default_mint_limit: token::Amount, + /// Default per-epoch throughput limit of each token + pub default_per_epoch_throughput_limit: token::Amount, +} + impl TokenBalances { pub fn get(&self, addr: &GenesisAddress) -> Option { self.0.get(addr).map(|amt| amt.amount()) @@ -873,6 +895,7 @@ pub fn validate_parameters( gov_params, pgf_params, eth_bridge_params, + ibc_params, } = parameters; match parameters.denominate(tokens) { Err(e) => { @@ -890,6 +913,7 @@ pub fn validate_parameters( valid: Default::default(), }, eth_bridge_params, + ibc_params, }), } } diff --git a/crates/apps/src/lib/config/genesis/transactions.rs b/crates/apps/src/lib/config/genesis/transactions.rs index 1f00d62a8d..bb0eb4af0c 100644 --- a/crates/apps/src/lib/config/genesis/transactions.rs +++ b/crates/apps/src/lib/config/genesis/transactions.rs @@ -1,6 +1,6 @@ //! Genesis transactions -use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; use std::fmt::Debug; use std::net::SocketAddr; @@ -13,6 +13,7 @@ use ledger_transport_hid::TransportNativeHID; use namada::account::AccountPublicKeysMap; use namada::core::address::{Address, EstablishedAddress}; use namada::core::chain::ChainId; +use namada::core::collections::HashSet; use namada::core::dec::Dec; use namada::core::key::{ common, ed25519, RefTo, SerializeWithBorsh, SigScheme, @@ -23,6 +24,7 @@ use namada::core::token; use namada::core::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use namada::ledger::pos::common::PublicKey; use namada::ledger::pos::types::ValidatorMetaData; +use namada::proof_of_stake::parameters::MAX_VALIDATOR_METADATA_LEN; use namada::tx::data::{pos, Fee, TxType}; use namada::tx::{ verify_standalone_sig, Code, Commitment, Data, Section, SignatureIndex, Tx, @@ -802,7 +804,7 @@ impl Signed { .sections .into_iter() .find_map(|sec| { - if let Section::Signature(signatures) = sec { + if let Section::Authorization(signatures) = sec { if [raw_header_hash] == signatures.targets.as_slice() { Some(signatures) } else { @@ -1361,6 +1363,54 @@ pub fn validate_validator_account( ); } + // Check that the validator metadata is not too large + let metadata = &signed_tx.data.metadata; + if metadata.email.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + panic!( + "The email metadata of the validator with address {} is too long, \ + must be within {MAX_VALIDATOR_METADATA_LEN} characters", + signed_tx.data.address + ); + } + if let Some(description) = metadata.description.as_ref() { + if description.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + panic!( + "The description metadata of the validator with address {} is \ + too long, must be within {MAX_VALIDATOR_METADATA_LEN} \ + characters", + signed_tx.data.address + ); + } + } + if let Some(website) = metadata.website.as_ref() { + if website.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + panic!( + "The website metadata of the validator with address {} is too \ + long, must be within {MAX_VALIDATOR_METADATA_LEN} characters", + signed_tx.data.address + ); + } + } + if let Some(discord_handle) = metadata.discord_handle.as_ref() { + if discord_handle.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + panic!( + "The discord handle metadata of the validator with address {} \ + is too long, must be within {MAX_VALIDATOR_METADATA_LEN} \ + characters", + signed_tx.data.address + ); + } + } + if let Some(avatar) = metadata.avatar.as_ref() { + if avatar.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + panic!( + "The avatar metadata of the validator with address {} is too \ + long, must be within {MAX_VALIDATOR_METADATA_LEN} characters", + signed_tx.data.address + ); + } + } + // Check signature let mut is_valid = { let maybe_threshold = { diff --git a/crates/apps/src/lib/config/genesis/utils.rs b/crates/apps/src/lib/config/genesis/utils.rs index a67f259db0..ea1acd9cbd 100644 --- a/crates/apps/src/lib/config/genesis/utils.rs +++ b/crates/apps/src/lib/config/genesis/utils.rs @@ -1,9 +1,9 @@ -use std::collections::HashSet; use std::path::Path; use eyre::Context; use ledger_namada_rs::NamadaApp; use ledger_transport_hid::TransportNativeHID; +use namada::core::collections::HashSet; use namada::core::key::common; use namada::tx::Tx; use namada_sdk::wallet::Wallet; diff --git a/crates/apps/src/lib/config/mod.rs b/crates/apps/src/lib/config/mod.rs index 782ffee77d..018783debc 100644 --- a/crates/apps/src/lib/config/mod.rs +++ b/crates/apps/src/lib/config/mod.rs @@ -5,13 +5,13 @@ pub mod genesis; pub mod global; pub mod utils; -use std::collections::HashMap; use std::fs::{create_dir_all, File}; use std::io::Write; use std::path::{Path, PathBuf}; use directories::ProjectDirs; use namada::core::chain::ChainId; +use namada::core::collections::HashMap; use namada::core::storage::BlockHeight; use namada::core::time::Rfc3339String; use serde::{Deserialize, Serialize}; diff --git a/crates/apps/src/lib/mod.rs b/crates/apps/src/lib/mod.rs index 9bfb5be0ef..22cb4d78d5 100644 --- a/crates/apps/src/lib/mod.rs +++ b/crates/apps/src/lib/mod.rs @@ -5,7 +5,7 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -#[cfg(feature = "testing")] +#[cfg(feature = "benches")] pub mod bench_utils; pub mod cli; pub mod client; diff --git a/crates/apps/src/lib/node/ledger/broadcaster.rs b/crates/apps/src/lib/node/ledger/broadcaster.rs index c2b6a38096..5bf038c182 100644 --- a/crates/apps/src/lib/node/ledger/broadcaster.rs +++ b/crates/apps/src/lib/node/ledger/broadcaster.rs @@ -33,8 +33,10 @@ impl Broadcaster { /// by the receiver async fn run_loop(&mut self, genesis_time: DateTimeUtc) { // wait for start time if necessary + #[allow(clippy::disallowed_methods)] + let now = Utc::now(); if let Ok(sleep_time) = - genesis_time.0.signed_duration_since(Utc::now()).to_std() + genesis_time.0.signed_duration_since(now).to_std() { if !sleep_time.is_zero() { tokio::time::sleep(sleep_time).await; @@ -57,7 +59,10 @@ impl Broadcaster { strategy: time::Constant(time::Duration::from_secs(1)), } .timeout( - time::Instant::now() + time::Duration::from_secs(timeout), + { + #[allow(clippy::disallowed_methods)] + time::Instant::now() + } + time::Duration::from_secs(timeout), || async { match self.client.status().await { Ok(status) => ControlFlow::Break(status), diff --git a/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs b/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs index 777d133769..9bd964c120 100644 --- a/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs +++ b/crates/apps/src/lib/node/ledger/ethereum_oracle/mod.rs @@ -470,6 +470,7 @@ async fn process_events_in_block( let last_processed_block_ref = oracle.last_processed_block.borrow(); let last_processed_block = last_processed_block_ref.as_ref(); let backoff = oracle.backoff; + #[allow(clippy::disallowed_methods)] let deadline = Instant::now() + oracle.ceiling; let latest_block = match oracle .client diff --git a/crates/apps/src/lib/node/ledger/mod.rs b/crates/apps/src/lib/node/ledger/mod.rs index 28abdc0567..605e2eae6a 100644 --- a/crates/apps/src/lib/node/ledger/mod.rs +++ b/crates/apps/src/lib/node/ledger/mod.rs @@ -9,16 +9,14 @@ pub mod tendermint_node; use std::convert::TryInto; use std::net::SocketAddr; use std::path::PathBuf; -use std::str::FromStr; use std::thread; use byte_unit::Byte; use data_encoding::HEXUPPER; use futures::future::TryFutureExt; -use namada::core::storage::{BlockHeight, Key}; +use namada::core::storage::BlockHeight; use namada::core::time::DateTimeUtc; use namada::eth_bridge::ethers::providers::{Http, Provider}; -use namada::governance::storage::keys as governance_storage; use namada::state::DB; use namada::storage::DbColFam; use namada::tendermint::abci::request::CheckTxKind; @@ -68,41 +66,17 @@ const ENV_VAR_RAYON_THREADS: &str = "NAMADA_RAYON_THREADS"; // } //``` impl Shell { - fn load_proposals(&mut self) { - let proposals_key = governance_storage::get_commiting_proposals_prefix( - self.state.in_mem().last_epoch.0, - ); - - let (proposal_iter, _) = self.state.db_iter_prefix(&proposals_key); - for (key, _, _) in proposal_iter { - let key = - Key::from_str(key.as_str()).expect("Key should be parsable"); - if governance_storage::get_commit_proposal_epoch(&key).unwrap() - != self.state.in_mem().last_epoch.0 - { - // NOTE: `iter_prefix` iterate over the matching prefix. In this - // case a proposal with grace_epoch 110 will be - // matched by prefixes 1, 11 and 110. Thus we - // have to skip to the next iteration of - // the cycle for all the prefixes that don't actually match - // the desired epoch. - continue; - } - - let proposal_id = governance_storage::get_commit_proposal_id(&key); - if let Some(id) = proposal_id { - self.proposal_data.insert(id); - } - } - } - fn call(&mut self, req: Request) -> Result { match req { Request::InitChain(init) => { tracing::debug!("Request InitChain"); self.init_chain( init, - #[cfg(any(test, feature = "testing"))] + #[cfg(any( + test, + feature = "testing", + feature = "benches" + ))] 1, ) .map(Response::InitChain) @@ -131,7 +105,6 @@ impl Shell { } Request::FinalizeBlock(finalize) => { tracing::debug!("Request FinalizeBlock"); - self.load_proposals(); self.finalize_block(finalize).map(Response::FinalizeBlock) } Request::Commit => { @@ -235,7 +208,7 @@ pub fn dump_db( #[cfg(feature = "migrations")] pub fn query_db( config: config::Ledger, - key: &Key, + key: &namada::core::storage::Key, type_hash: &[u8; 32], cf: &DbColFam, ) { diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc.rs index 09bb6d7847..625712cfe0 100644 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc.rs +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc.rs @@ -16,22 +16,17 @@ //! In the current implementation, we allocate space for transactions //! in the following order of preference: //! -//! - First, we allot space for DKG encrypted txs. We allow DKG encrypted txs to -//! take up at most 1/3 of the total block space. -//! - Next, we allot space for DKG decrypted txs. Decrypted txs take up as much -//! space as needed. We will see, shortly, why in practice this is fine. -//! - Finally, we allot space for protocol txs. Protocol txs get half of the -//! remaining block space allotted to them. +//! - First, we allot space for protocol txs. We allow them to take up at most +//! 1/2 of the total block space unless there is extra room due to a lack of +//! user txs. +//! - Next, we allot space for user submitted txs until the block is filled. +//! - If we cannot fill the block with normal txs, we try to fill it with +//! protocol txs that were not allocated in the initial phase. //! -//! Since at some fixed height `H` decrypted txs only take up as -//! much space as the encrypted txs from height `H - 1`, and we -//! restrict the space of encrypted txs to at most 1/3 of the -//! total block space, we roughly divide the Tendermint block -//! space in 3, for each major type of tx. //! //! # How gas is allocated //! -//! Gas is only relevant to DKG encrypted txs. Every encrypted tx defines its +//! Gas is only relevant to non-protocol txs. Every such tx defines its //! gas limit. We take this entire gas limit as the amount of gas requested by //! the tx. @@ -48,11 +43,6 @@ pub mod states; // and alloc space for large tx right at the start. the problem with // this is that then we may not have enough space for decrypted txs -// TODO: panic if we don't have enough space reserved for a -// decrypted tx; in theory, we should always have enough space -// reserved for decrypted txs, given the invariants of the state -// machine - use std::marker::PhantomData; use namada::proof_of_stake::pos_queries::PosQueries; @@ -60,6 +50,7 @@ use namada::state::{self, WlState}; #[allow(unused_imports)] use crate::facade::tendermint_proto::abci::RequestPrepareProposal; +use crate::node::ledger::shell::block_alloc::states::WithNormalTxs; /// Block allocation failure status responses. #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -121,11 +112,10 @@ impl Resource for BlockGas { /// /// We keep track of the current space utilized by: /// -/// - DKG encrypted transactions. -/// - DKG decrypted transactions. +/// - normal transactions. /// - Protocol transactions. /// -/// Gas usage of DKG encrypted txs is also tracked. +/// Gas usage of normal txs is also tracked. #[derive(Debug, Default)] pub struct BlockAllocator { /// The current state of the [`BlockAllocator`] state machine. @@ -135,14 +125,12 @@ pub struct BlockAllocator { block: TxBin, /// The current space utilized by protocol transactions. protocol_txs: TxBin, - /// The current space and gas utilized by DKG encrypted transactions. - encrypted_txs: EncryptedTxsBins, - /// The current space utilized by DKG decrypted transactions. - decrypted_txs: TxBin, + /// The current space and gas utilized by normal user transactions. + normal_txs: NormalTxsBins, } -impl From<&WlState> - for BlockAllocator> +impl From<&WlState> + for BlockAllocator> where D: 'static + state::DB + for<'iter> state::DBIter<'iter>, H: 'static + state::StorageHasher, @@ -156,7 +144,29 @@ where } } -impl BlockAllocator> { +impl BlockAllocator> { + /// Construct a new [`BlockAllocator`], with an upper bound + /// on the max size of all txs in a block defined by CometBFT and an upper + /// bound on the max gas in a block. + #[inline] + pub fn init( + cometbft_max_block_space_in_bytes: u64, + max_block_gas: u64, + ) -> Self { + let max = cometbft_max_block_space_in_bytes; + Self { + _state: PhantomData, + block: TxBin::init(max), + protocol_txs: { + let allotted_space_in_bytes = threshold::ONE_HALF.over(max); + TxBin::init(allotted_space_in_bytes) + }, + normal_txs: NormalTxsBins::new(max_block_gas), + } + } +} + +impl BlockAllocator { /// Construct a new [`BlockAllocator`], with an upper bound /// on the max size of all txs in a block defined by Tendermint and an upper /// bound on the max gas in a block. @@ -170,8 +180,10 @@ impl BlockAllocator> { _state: PhantomData, block: TxBin::init(max), protocol_txs: TxBin::default(), - encrypted_txs: EncryptedTxsBins::new(max, max_block_gas), - decrypted_txs: TxBin::default(), + normal_txs: NormalTxsBins { + space: TxBin::init(tendermint_max_block_space_in_bytes), + gas: TxBin::init(max_block_gas), + }, } } } @@ -184,10 +196,9 @@ impl BlockAllocator { /// block space for a given round and the sum of the allotted space /// to each [`TxBin`] instance in a [`BlockAllocator`]. #[inline] - fn uninitialized_space_in_bytes(&self) -> u64 { - let total_bin_space = self.protocol_txs.allotted - + self.encrypted_txs.space.allotted - + self.decrypted_txs.allotted; + fn unoccupied_space_in_bytes(&self) -> u64 { + let total_bin_space = + self.protocol_txs.occupied + self.normal_txs.space.occupied; self.block.allotted - total_bin_space } } @@ -256,16 +267,15 @@ impl TxBin { } #[derive(Debug, Default)] -pub struct EncryptedTxsBins { +pub struct NormalTxsBins { space: TxBin, gas: TxBin, } -impl EncryptedTxsBins { - pub fn new(max_bytes: u64, max_gas: u64) -> Self { - let allotted_space_in_bytes = threshold::ONE_THIRD.over(max_bytes); +impl NormalTxsBins { + pub fn new(max_gas: u64) -> Self { Self { - space: TxBin::init(allotted_space_in_bytes), + space: TxBin::default(), gas: TxBin::init(max_gas), } } @@ -273,10 +283,10 @@ impl EncryptedTxsBins { pub fn try_dump(&mut self, tx: &[u8], gas: u64) -> Result<(), String> { self.space.try_dump(tx).map_err(|e| match e { AllocFailure::Rejected { .. } => { - "No more space left in the block for wrapper txs".to_string() + "No more space left in the block for normal txs".to_string() } AllocFailure::OverflowsBin { .. } => "The given wrapper tx is \ - larger than 1/3 of the \ + larger than the remaining \ available block space" .to_string(), })?; @@ -316,33 +326,30 @@ pub mod threshold { } } - /// Divide free space in three. - pub const ONE_THIRD: Threshold = Threshold::new(1, 3); + /// Divide free space in half. + pub const ONE_HALF: Threshold = Threshold::new(1, 2); } #[cfg(test)] mod tests { - use std::cell::RefCell; use assert_matches::assert_matches; use proptest::prelude::*; use super::states::{ - BuildingEncryptedTxBatch, NextState, TryAlloc, WithEncryptedTxs, - WithoutEncryptedTxs, + BuildingNormalTxBatch, BuildingProtocolTxBatch, NextState, TryAlloc, }; use super::*; use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; - /// Convenience alias for a block space allocator at a state with encrypted + /// Convenience alias for a block space allocator at a state with protocol /// txs. - type BsaWrapperTxs = - BlockAllocator>; + type BsaInitialProtocolTxs = + BlockAllocator>; - /// Convenience alias for a block space allocator at a state without - /// encrypted txs. - type BsaNoWrapperTxs = - BlockAllocator>; + /// Convenience alias for a block allocator at a state with protocol + /// txs. + type BsaNormalTxs = BlockAllocator; /// Proptest generated txs. #[derive(Debug)] @@ -350,45 +357,46 @@ mod tests { tendermint_max_block_space_in_bytes: u64, max_block_gas: u64, protocol_txs: Vec, - encrypted_txs: Vec, - decrypted_txs: Vec, + normal_txs: Vec, } - /// Check that at most 1/3 of the block space is + /// Check that at most 1/2 of the block space is /// reserved for each kind of tx type, in the - /// allocator's common path. + /// allocator's common path. Further check that + /// if not enough normal txs are present, the rest + /// is filled with protocol txs #[test] - fn test_txs_are_evenly_split_across_block() { + fn test_filling_up_with_protocol() { const BLOCK_SIZE: u64 = 60; const BLOCK_GAS: u64 = 1_000; - // reserve block space for encrypted txs - let mut alloc = BsaWrapperTxs::init(BLOCK_SIZE, BLOCK_GAS); + // reserve block space for protocol txs + let mut alloc = BsaInitialProtocolTxs::init(BLOCK_SIZE, BLOCK_GAS); - // allocate ~1/3 of the block space to encrypted txs - assert!(alloc.try_alloc(BlockResources::new(&[0; 18], 0)).is_ok()); + // allocate ~1/2 of the block space to encrypted txs + assert!(alloc.try_alloc(&[0; 29]).is_ok()); - // reserve block space for decrypted txs + // reserve block space for normal txs let mut alloc = alloc.next_state(); - // the space we allotted to encrypted txs was shrunk to + // the space we allotted to protocol txs was shrunk to // the total space we actually used up - assert_eq!(alloc.encrypted_txs.space.allotted, 18); + assert_eq!(alloc.protocol_txs.allotted, 29); - // check that the allotted space for decrypted txs is correct - assert_eq!(alloc.decrypted_txs.allotted, BLOCK_SIZE - 18); + // check that the allotted space for normal txs is correct + assert_eq!(alloc.normal_txs.space.allotted, BLOCK_SIZE - 29); - // add about ~1/3 worth of decrypted txs - assert!(alloc.try_alloc(&[0; 17]).is_ok()); + // add about ~1/3 worth of normal txs + assert!(alloc.try_alloc(BlockResources::new(&[0; 17], 0)).is_ok()); - // reserve block space for protocol txs + // fill the rest of the block with protocol txs let mut alloc = alloc.next_state(); // check that space was shrunk - assert_eq!(alloc.protocol_txs.allotted, BLOCK_SIZE - (18 + 17)); + assert_eq!(alloc.protocol_txs.allotted, BLOCK_SIZE - (29 + 17)); // add protocol txs to the block space allocator - assert!(alloc.try_alloc(&[0; 25]).is_ok()); + assert!(alloc.try_alloc(&[0; 14]).is_ok()); // the block should be full at this point assert_matches!( @@ -397,15 +405,42 @@ mod tests { ); } - // Test that we cannot include encrypted txs in a block - // when the state invariants banish them from inclusion. + /// Test that if less than half of the block can be initially filled + /// with protocol txs, the rest if filled with normal txs. #[test] - fn test_encrypted_txs_are_rejected() { - let mut alloc = BsaNoWrapperTxs::init(1234, 1_000); + fn test_less_than_half_protocol() { + const BLOCK_SIZE: u64 = 60; + const BLOCK_GAS: u64 = 1_000; + + // reserve block space for protocol txs + let mut alloc = BsaInitialProtocolTxs::init(BLOCK_SIZE, BLOCK_GAS); + + // allocate ~1/3 of the block space to protocol txs + assert!(alloc.try_alloc(&[0; 18]).is_ok()); + + // reserve block space for normal txs + let mut alloc = alloc.next_state(); + + // the space we allotted to protocol txs was shrunk to + // the total space we actually used up + assert_eq!(alloc.protocol_txs.allotted, 18); + + // check that the allotted space for normal txs is correct + assert_eq!(alloc.normal_txs.space.allotted, BLOCK_SIZE - 18); + + // add about ~2/3 worth of normal txs + assert!(alloc.try_alloc(BlockResources::new(&[0; 42], 0)).is_ok()); + // the block should be full at this point assert_matches!( alloc.try_alloc(BlockResources::new(&[0; 1], 0)), Err(AllocFailure::Rejected { .. }) ); + + let mut alloc = alloc.next_state(); + assert_matches!( + alloc.try_alloc(&[0; 1]), + Err(AllocFailure::OverflowsBin { .. }) + ); } proptest! { @@ -436,21 +471,21 @@ mod tests { tendermint_max_block_space_in_bytes: u64, ) { let mut bins = - BsaWrapperTxs::init(tendermint_max_block_space_in_bytes, 1_000); + BsaNormalTxs::init(tendermint_max_block_space_in_bytes, 1_000); - // fill the entire bin of encrypted txs - bins.encrypted_txs.space.occupied = bins.encrypted_txs.space.allotted; + // fill the entire bin of protocol txs + bins.normal_txs.space.occupied = bins.normal_txs.space.allotted; - // make sure we can't dump any new encrypted txs in the bin + // make sure we can't dump any new protocol txs in the bin assert_matches!( bins.try_alloc(BlockResources::new(b"arbitrary tx bytes", 0)), Err(AllocFailure::Rejected { .. }) ); // Reset space bin - bins.encrypted_txs.space.occupied = 0; + bins.normal_txs.space.occupied = 0; // Fill the entire gas bin - bins.encrypted_txs.gas.occupied = bins.encrypted_txs.gas.allotted; + bins.normal_txs.gas.occupied = bins.normal_txs.gas.allotted; // Make sure we can't dump any new wncrypted txs in the bin assert_matches!( @@ -461,11 +496,16 @@ mod tests { /// Implementation of [`test_initial_bin_capacity`]. fn proptest_initial_bin_capacity(tendermint_max_block_space_in_bytes: u64) { - let bins = - BsaWrapperTxs::init(tendermint_max_block_space_in_bytes, 1_000); - let expected = tendermint_max_block_space_in_bytes - - threshold::ONE_THIRD.over(tendermint_max_block_space_in_bytes); - assert_eq!(expected, bins.uninitialized_space_in_bytes()); + let bins = BsaInitialProtocolTxs::init( + tendermint_max_block_space_in_bytes, + 1_000, + ); + let expected = tendermint_max_block_space_in_bytes; + assert_eq!( + bins.protocol_txs.allotted, + threshold::ONE_HALF.over(tendermint_max_block_space_in_bytes) + ); + assert_eq!(expected, bins.unoccupied_space_in_bytes()); } /// Implementation of [`test_tx_dump_doesnt_fill_up_bin`]. @@ -474,8 +514,7 @@ mod tests { tendermint_max_block_space_in_bytes, max_block_gas, protocol_txs, - encrypted_txs, - decrypted_txs, + normal_txs, } = args; // produce new txs until the moment we would have @@ -484,41 +523,57 @@ mod tests { // iterate over the produced txs to make sure we can keep // dumping new txs without filling up the bins - let bins = RefCell::new(BsaWrapperTxs::init( + let mut bins = BsaInitialProtocolTxs::init( tendermint_max_block_space_in_bytes, max_block_gas, - )); - let encrypted_txs = encrypted_txs.into_iter().take_while(|tx| { - let bin = bins.borrow().encrypted_txs.space; - let new_size = bin.occupied + tx.len() as u64; - new_size < bin.allotted - }); - for tx in encrypted_txs { - assert!( - bins.borrow_mut() - .try_alloc(BlockResources::new(&tx, 0)) - .is_ok() - ); + ); + let mut protocol_tx_iter = protocol_txs.iter(); + let mut allocated_txs = vec![]; + let mut new_size = 0; + for tx in protocol_tx_iter.by_ref() { + let bin = bins.protocol_txs; + if new_size + tx.len() as u64 >= bin.allotted { + break; + } else { + new_size += tx.len() as u64; + allocated_txs.push(tx); + } + } + for tx in allocated_txs { + assert!(bins.try_alloc(tx).is_ok()); } - let bins = RefCell::new(bins.into_inner().next_state()); - let decrypted_txs = decrypted_txs.into_iter().take_while(|tx| { - let bin = bins.borrow().decrypted_txs; - let new_size = bin.occupied + tx.len() as u64; - new_size < bin.allotted - }); + let mut bins = bins.next_state(); + let mut new_size = bins.normal_txs.space.allotted; + let mut decrypted_txs = vec![]; + for tx in normal_txs { + let bin = bins.normal_txs.space; + if (new_size + tx.len() as u64) < bin.allotted { + new_size += tx.len() as u64; + decrypted_txs.push(tx); + } else { + break; + } + } for tx in decrypted_txs { - assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + assert!(bins.try_alloc(BlockResources::new(&tx, 0)).is_ok()); + } + + let mut bins = bins.next_state(); + let mut allocated_txs = vec![]; + let mut new_size = bins.protocol_txs.allotted; + for tx in protocol_tx_iter.by_ref() { + let bin = bins.protocol_txs; + if new_size + tx.len() as u64 >= bin.allotted { + break; + } else { + new_size += tx.len() as u64; + allocated_txs.push(tx); + } } - let bins = RefCell::new(bins.into_inner().next_state()); - let protocol_txs = protocol_txs.into_iter().take_while(|tx| { - let bin = bins.borrow().protocol_txs; - let new_size = bin.occupied + tx.len() as u64; - new_size < bin.allotted - }); - for tx in protocol_txs { - assert!(bins.borrow_mut().try_alloc(&tx).is_ok()); + for tx in allocated_txs { + assert!(bins.try_alloc(tx).is_ok()); } } @@ -527,7 +582,7 @@ mod tests { fn arb_transactions() // create base strategies ( - (tendermint_max_block_space_in_bytes, max_block_gas, protocol_tx_max_bin_size, encrypted_tx_max_bin_size, + (tendermint_max_block_space_in_bytes, max_block_gas, protocol_tx_max_bin_size, decrypted_tx_max_bin_size) in arb_max_bin_sizes(), ) // compose strategies @@ -535,36 +590,30 @@ mod tests { tendermint_max_block_space_in_bytes in Just(tendermint_max_block_space_in_bytes), max_block_gas in Just(max_block_gas), protocol_txs in arb_tx_list(protocol_tx_max_bin_size), - encrypted_txs in arb_tx_list(encrypted_tx_max_bin_size), - decrypted_txs in arb_tx_list(decrypted_tx_max_bin_size), + normal_txs in arb_tx_list(decrypted_tx_max_bin_size), ) -> PropTx { PropTx { tendermint_max_block_space_in_bytes, max_block_gas, protocol_txs: protocol_txs.into_iter().map(prost::bytes::Bytes::from).collect(), - encrypted_txs: encrypted_txs.into_iter().map(prost::bytes::Bytes::from).collect(), - decrypted_txs: decrypted_txs.into_iter().map(prost::bytes::Bytes::from).collect(), + normal_txs: normal_txs.into_iter().map(prost::bytes::Bytes::from).collect(), } } } /// Return random bin sizes for a [`BlockAllocator`]. - fn arb_max_bin_sizes() - -> impl Strategy { + fn arb_max_bin_sizes() -> impl Strategy { const MAX_BLOCK_SIZE_BYTES: u64 = 1000; (1..=MAX_BLOCK_SIZE_BYTES).prop_map( |tendermint_max_block_space_in_bytes| { ( tendermint_max_block_space_in_bytes, tendermint_max_block_space_in_bytes, - threshold::ONE_THIRD - .over(tendermint_max_block_space_in_bytes) - as usize, - threshold::ONE_THIRD + threshold::ONE_HALF .over(tendermint_max_block_space_in_bytes) as usize, - threshold::ONE_THIRD + threshold::ONE_HALF .over(tendermint_max_block_space_in_bytes) as usize, ) diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs index 7163cdf877..ed5d5e3004 100644 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc/states.rs @@ -1,4 +1,4 @@ -//! All the states of the [`BlockAllocator`] state machine, +//! All the states of the `BlockAllocator` state machine, //! over the extent of a Tendermint consensus round //! block proposal. //! @@ -6,73 +6,52 @@ //! //! The state machine moves through the following state DAG: //! -//! 1. [`BuildingEncryptedTxBatch`] - the initial state. In this state, we -//! populate a block with DKG encrypted txs. This state supports two modes of -//! operation, which you can think of as two sub-states: -//! * [`WithoutEncryptedTxs`] - When this mode is active, no encrypted txs are -//! included in a block proposal. -//! * [`WithEncryptedTxs`] - When this mode is active, we are able to include -//! encrypted txs in a block proposal. -//! 2. [`BuildingDecryptedTxBatch`] - the second state. In this state, we -//! populate a block with DKG decrypted txs. -//! 3. [`BuildingProtocolTxBatch`] - the third state. In this state, we populate -//! a block with protocol txs. +//! 1. [`BuildingProtocolTxBatch`] - the initial state. In this state, we +//! populate a block with protocol txs. +//! 2. [`BuildingNormalTxBatch`] - the second state. In this state, we populate +//! a block with non-protocol txs. +//! 3. [`BuildingProtocolTxBatch`] - we return to this state to fill up any +//! remaining block space if possible. -mod decrypted_txs; -mod encrypted_txs; +mod normal_txs; mod protocol_txs; -use super::{AllocFailure, BlockAllocator}; - -/// Convenience wrapper for a [`BlockAllocator`] state that allocates -/// encrypted transactions. -#[allow(dead_code)] -pub enum EncryptedTxBatchAllocator { - WithEncryptedTxs( - BlockAllocator>, - ), - WithoutEncryptedTxs( - BlockAllocator>, - ), -} +use super::AllocFailure; /// The leader of the current Tendermint round is building -/// a new batch of DKG decrypted transactions. +/// a new batch of protocol txs. /// -/// For more info, read the module docs of -/// [`crate::node::ledger::shell::block_alloc::states`]. -pub enum BuildingDecryptedTxBatch {} - -/// The leader of the current Tendermint round is building -/// a new batch of Namada protocol transactions. +/// This happens twice, in the first stage, we fill up to 1/2 +/// of the block. At the end of allocating user txs, we fill +/// up any remaining space with un-allocated protocol txs. /// /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. -pub enum BuildingProtocolTxBatch {} +pub struct BuildingProtocolTxBatch { + /// One of [`WithNormalTxs`] and [`WithoutNormalTxs`]. + _mode: Mode, +} -/// The leader of the current Tendermint round is building -/// a new batch of DKG encrypted transactions. +/// Allow block proposals to include user submitted txs. /// /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. -pub struct BuildingEncryptedTxBatch { - /// One of [`WithEncryptedTxs`] and [`WithoutEncryptedTxs`]. - _mode: Mode, -} +pub enum WithNormalTxs {} /// Allow block proposals to include encrypted txs. /// /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. -pub enum WithEncryptedTxs {} +pub enum WithoutNormalTxs {} -/// Prohibit block proposals from including encrypted txs. +/// The leader of the current Tendermint round is building +/// a new batch of user submitted (non-protocol) transactions. /// /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. -pub enum WithoutEncryptedTxs {} +pub struct BuildingNormalTxBatch {} -/// Try to allocate a new transaction on a [`BlockAllocator`] state. +/// Try to allocate a new transaction on a `BlockAllocator` state. /// /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. @@ -86,7 +65,7 @@ pub trait TryAlloc { ) -> Result<(), AllocFailure>; } -/// Represents a state transition in the [`BlockAllocator`] state machine. +/// Represents a state transition in the `BlockAllocator` state machine. /// /// This trait should not be used directly. Instead, consider using /// [`NextState`]. @@ -94,10 +73,10 @@ pub trait TryAlloc { /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. pub trait NextStateImpl { - /// The next state in the [`BlockAllocator`] state machine. + /// The next state in the `BlockAllocator` state machine. type Next; - /// Transition to the next state in the [`BlockAllocator`] state + /// Transition to the next state in the `BlockAllocator`] state /// machine. fn next_state_impl(self) -> Self::Next; } @@ -108,7 +87,7 @@ pub trait NextStateImpl { /// For more info, read the module docs of /// [`crate::node::ledger::shell::block_alloc::states`]. pub trait NextState: NextStateImpl { - /// Transition to the next state in the [`BlockAllocator`] state, + /// Transition to the next state in the `BlockAllocator` state, /// using a null transiiton function. #[inline] fn next_state(self) -> Self::Next diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/decrypted_txs.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc/states/decrypted_txs.rs deleted file mode 100644 index 7d7cc51d90..0000000000 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/decrypted_txs.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::marker::PhantomData; - -use super::super::{AllocFailure, BlockAllocator, TxBin}; -use super::{ - BuildingDecryptedTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, -}; - -impl TryAlloc for BlockAllocator { - type Resources<'tx> = &'tx [u8]; - - #[inline] - fn try_alloc( - &mut self, - tx: Self::Resources<'_>, - ) -> Result<(), AllocFailure> { - self.decrypted_txs.try_dump(tx) - } -} - -impl NextStateImpl for BlockAllocator { - type Next = BlockAllocator; - - #[inline] - fn next_state_impl(mut self) -> Self::Next { - self.decrypted_txs.shrink_to_fit(); - - // the remaining space is allocated to protocol txs - let remaining_free_space = self.uninitialized_space_in_bytes(); - self.protocol_txs = TxBin::init(remaining_free_space); - - // cast state - let Self { - block, - protocol_txs, - encrypted_txs, - decrypted_txs, - .. - } = self; - - BlockAllocator { - _state: PhantomData, - block, - protocol_txs, - encrypted_txs, - decrypted_txs, - } - } -} diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/encrypted_txs.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc/states/encrypted_txs.rs deleted file mode 100644 index 05f74d1d56..0000000000 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/encrypted_txs.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::marker::PhantomData; - -use super::super::{AllocFailure, BlockAllocator, TxBin}; -use super::{ - BuildingDecryptedTxBatch, BuildingEncryptedTxBatch, - EncryptedTxBatchAllocator, NextStateImpl, TryAlloc, WithEncryptedTxs, - WithoutEncryptedTxs, -}; -use crate::node::ledger::shell::block_alloc::BlockResources; - -impl TryAlloc for BlockAllocator> { - type Resources<'tx> = BlockResources<'tx>; - - #[inline] - fn try_alloc( - &mut self, - resource_required: Self::Resources<'_>, - ) -> Result<(), AllocFailure> { - self.encrypted_txs.space.try_dump(resource_required.tx)?; - self.encrypted_txs.gas.try_dump(resource_required.gas) - } -} - -impl NextStateImpl - for BlockAllocator> -{ - type Next = BlockAllocator; - - #[inline] - fn next_state_impl(self) -> Self::Next { - next_state(self) - } -} - -impl TryAlloc - for BlockAllocator> -{ - type Resources<'tx> = BlockResources<'tx>; - - #[inline] - fn try_alloc( - &mut self, - _resource_required: Self::Resources<'_>, - ) -> Result<(), AllocFailure> { - Err(AllocFailure::Rejected { - bin_resource_left: 0, - }) - } -} - -impl NextStateImpl - for BlockAllocator> -{ - type Next = BlockAllocator; - - #[inline] - fn next_state_impl(self) -> Self::Next { - next_state(self) - } -} - -#[inline] -fn next_state( - mut alloc: BlockAllocator>, -) -> BlockAllocator { - alloc.encrypted_txs.space.shrink_to_fit(); - - // decrypted txs can use as much space as they need - which - // in practice will only be, at most, 1/3 of the block space - // used by encrypted txs at the prev height - let remaining_free_space = alloc.uninitialized_space_in_bytes(); - alloc.decrypted_txs = TxBin::init(remaining_free_space); - - // cast state - let BlockAllocator { - block, - protocol_txs, - encrypted_txs, - decrypted_txs, - .. - } = alloc; - - BlockAllocator { - _state: PhantomData, - block, - protocol_txs, - encrypted_txs, - decrypted_txs, - } -} - -impl TryAlloc for EncryptedTxBatchAllocator { - type Resources<'tx> = BlockResources<'tx>; - - #[inline] - fn try_alloc( - &mut self, - resource_required: Self::Resources<'_>, - ) -> Result<(), AllocFailure> { - match self { - EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { - state.try_alloc(resource_required) - } - EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { - // NOTE: this operation will cause the allocator to - // run out of memory immediately - state.try_alloc(resource_required) - } - } - } -} - -impl NextStateImpl for EncryptedTxBatchAllocator { - type Next = BlockAllocator; - - #[inline] - fn next_state_impl(self) -> Self::Next { - match self { - EncryptedTxBatchAllocator::WithEncryptedTxs(state) => { - state.next_state_impl() - } - EncryptedTxBatchAllocator::WithoutEncryptedTxs(state) => { - state.next_state_impl() - } - } - } -} diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/normal_txs.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc/states/normal_txs.rs new file mode 100644 index 0000000000..e15333216f --- /dev/null +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc/states/normal_txs.rs @@ -0,0 +1,45 @@ +use std::marker::PhantomData; + +use super::super::{AllocFailure, BlockAllocator, TxBin}; +use super::{ + BuildingNormalTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, + WithoutNormalTxs, +}; +use crate::node::ledger::shell::block_alloc::BlockResources; + +impl TryAlloc for BlockAllocator { + type Resources<'tx> = BlockResources<'tx>; + + #[inline] + fn try_alloc( + &mut self, + resource_required: Self::Resources<'_>, + ) -> Result<(), AllocFailure> { + self.normal_txs.space.try_dump(resource_required.tx)?; + self.normal_txs.gas.try_dump(resource_required.gas) + } +} + +impl NextStateImpl for BlockAllocator { + type Next = BlockAllocator>; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + let remaining_free_space = self.unoccupied_space_in_bytes(); + self.protocol_txs = TxBin::init(remaining_free_space); + // cast state + let Self { + block, + protocol_txs, + normal_txs, + .. + } = self; + + BlockAllocator { + _state: PhantomData, + block, + protocol_txs, + normal_txs, + } + } +} diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/protocol_txs.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc/states/protocol_txs.rs index aba289113e..302dc83824 100644 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc/states/protocol_txs.rs +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc/states/protocol_txs.rs @@ -1,7 +1,13 @@ +use std::marker::PhantomData; + use super::super::{AllocFailure, BlockAllocator}; -use super::{BuildingProtocolTxBatch, TryAlloc}; +use super::{ + BuildingNormalTxBatch, BuildingProtocolTxBatch, NextStateImpl, TryAlloc, + WithNormalTxs, +}; +use crate::node::ledger::shell::block_alloc::TxBin; -impl TryAlloc for BlockAllocator { +impl TryAlloc for BlockAllocator> { type Resources<'tx> = &'tx [u8]; #[inline] @@ -12,3 +18,28 @@ impl TryAlloc for BlockAllocator { self.protocol_txs.try_dump(tx) } } + +impl NextStateImpl for BlockAllocator> { + type Next = BlockAllocator; + + #[inline] + fn next_state_impl(mut self) -> Self::Next { + self.protocol_txs.shrink_to_fit(); + let remaining_free_space = self.unoccupied_space_in_bytes(); + self.normal_txs.space = TxBin::init(remaining_free_space); + // cast state + let BlockAllocator { + block, + protocol_txs, + normal_txs, + .. + } = self; + + BlockAllocator { + _state: PhantomData, + block, + protocol_txs, + normal_txs, + } + } +} diff --git a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs index f4376ba0f5..f2a61a809b 100644 --- a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -4,9 +4,13 @@ use data_encoding::HEXUPPER; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use namada::core::storage::{BlockHash, BlockResults, Epoch, Header}; +use namada::gas::event::WithGasUsed; use namada::governance::pgf::inflation as pgf_inflation; -use namada::ledger::events::EventType; +use namada::hash::Hash; +use namada::ledger::events::extend::{ComposeEvent, Height, Info, ValidMaspTx}; +use namada::ledger::events::EmitEvents; use namada::ledger::gas::GasMetering; +use namada::ledger::ibc; use namada::ledger::pos::namada_proof_of_stake; use namada::ledger::protocol::WrapperArgs; use namada::proof_of_stake; @@ -16,9 +20,11 @@ use namada::proof_of_stake::storage::{ use namada::state::write_log::StorageModification; use namada::state::{ResultExt, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY}; use namada::tx::data::protocol::ProtocolTxType; +use namada::tx::data::VpStatusFlags; +use namada::tx::event::{Code, InnerTx}; +use namada::tx::new_tx_event; use namada::vote_ext::ethereum_events::MultiSignedEthEvent; use namada::vote_ext::ethereum_tx_data_variants; -use namada_sdk::tx::new_tx_event; use super::*; use crate::facade::tendermint::abci::types::VoteInfo; @@ -101,7 +107,12 @@ where // Sub-system updates: // - Governance - applied first in case a proposal changes any of the // other syb-systems - governance::finalize_block(self, emit_events, new_epoch)?; + governance::finalize_block( + self, + emit_events, + current_epoch, + new_epoch, + )?; // - Token token::finalize_block(&mut self.state, emit_events, new_epoch)?; // - PoS @@ -114,19 +125,12 @@ where votes, req.byzantine_validators, )?; + // - IBC + ibc::finalize_block(&mut self.state, emit_events, new_epoch)?; if new_epoch { // Apply PoS and PGF inflation - self.apply_inflation(current_epoch)?; - - // Take IBC events that may be emitted from PGF - for ibc_event in self.state.write_log_mut().take_ibc_events() { - let mut event = Event::from(ibc_event.clone()); - // Add the height for IBC event query - let height = self.state.in_mem().get_last_block_height() + 1; - event["height"] = height.to_string(); - response.events.push(event); - } + self.apply_inflation(current_epoch, emit_events)?; } let mut stats = InternalStats::default(); @@ -156,12 +160,14 @@ where ); continue; }; + + let result_code = ResultCode::from_u32(processed_tx.result.code) + .expect("Result code conversion should not fail"); + // If [`process_proposal`] rejected a Tx due to invalid signature, // emit an event here and move on to next tx. - if ResultCode::from_u32(processed_tx.result.code).unwrap() - == ResultCode::InvalidSig - { - let mut tx_event = match tx.header().tx_type { + if result_code == ResultCode::InvalidSig { + let base_event = match tx.header().tx_type { TxType::Wrapper(_) | TxType::Protocol(_) => { new_tx_event(&tx, height.0) } @@ -175,11 +181,15 @@ where continue; } }; - tx_event["code"] = processed_tx.result.code.to_string(); - tx_event["info"] = - format!("Tx rejected: {}", &processed_tx.result.info); - tx_event["gas_used"] = "0".into(); - response.events.push(tx_event); + response.events.emit( + base_event + .with(Code(result_code)) + .with(Info(format!( + "Tx rejected: {}", + &processed_tx.result.info + ))) + .with(WithGasUsed(0.into())), + ); continue; } @@ -193,232 +203,176 @@ where let tx_header = tx.header(); // If [`process_proposal`] rejected a Tx, emit an event here and // move on to next tx - if ResultCode::from_u32(processed_tx.result.code).unwrap() - != ResultCode::Ok - { - let mut tx_event = new_tx_event(&tx, height.0); - tx_event["code"] = processed_tx.result.code.to_string(); - tx_event["info"] = - format!("Tx rejected: {}", &processed_tx.result.info); - tx_event["gas_used"] = "0".into(); - response.events.push(tx_event); - // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed - if let TxType::Decrypted(_) = &tx_header.tx_type { - self.state - .in_mem_mut() - .tx_queue - .pop() - .expect("Missing wrapper tx in queue"); - } - + if result_code != ResultCode::Ok { + response.events.emit( + new_tx_event(&tx, height.0) + .with(Code(result_code)) + .with(Info(format!( + "Tx rejected: {}", + &processed_tx.result.info + ))) + .with(WithGasUsed(0.into())), + ); continue; } - let ( - mut tx_event, - embedding_wrapper, - tx_gas_meter, - wrapper, - mut wrapper_args, - ) = match &tx_header.tx_type { - TxType::Wrapper(wrapper) => { - stats.increment_wrapper_txs(); - let tx_event = new_tx_event(&tx, height.0); - let gas_meter = TxGasMeter::new(wrapper.gas_limit); - ( - tx_event, - None, - gas_meter, - Some(tx.clone()), - Some(WrapperArgs { - block_proposer: &native_block_proposer_address, - is_committed_fee_unshield: false, - }), - ) - } - TxType::Decrypted(inner) => { - // We remove the corresponding wrapper tx from the queue - let tx_in_queue = self - .state - .in_mem_mut() - .tx_queue - .pop() - .expect("Missing wrapper tx in queue"); - let mut event = new_tx_event(&tx, height.0); - - match inner { - DecryptedTx::Decrypted => { - if let Some(code_sec) = tx - .get_section(tx.code_sechash()) - .and_then(|x| Section::code_sec(x.as_ref())) - { - stats.increment_tx_type( - code_sec.code.hash().to_string(), - ); - } - } - DecryptedTx::Undecryptable => { - tracing::info!( - "Tx with hash {} was un-decryptable", - tx_in_queue.tx.header_hash() + let (mut tx_event, tx_gas_meter, mut wrapper_args) = + match &tx_header.tx_type { + TxType::Wrapper(wrapper) => { + stats.increment_wrapper_txs(); + let tx_event = new_tx_event(&tx, height.0); + let gas_meter = TxGasMeter::new(wrapper.gas_limit); + if let Some(code_sec) = tx + .get_section(tx.code_sechash()) + .and_then(|x| Section::code_sec(x.as_ref())) + { + stats.increment_tx_type( + code_sec.code.hash().to_string(), ); - event["info"] = "Transaction is invalid.".into(); - event["log"] = - "Transaction could not be decrypted.".into(); - event["code"] = ResultCode::Undecryptable.into(); - response.events.push(event); - continue; } + ( + tx_event, + gas_meter, + Some(WrapperArgs { + block_proposer: &native_block_proposer_address, + is_committed_fee_unshield: false, + }), + ) } - - ( - event, - Some(tx_in_queue.tx), - TxGasMeter::new_from_sub_limit(tx_in_queue.gas), - None, - None, - ) - } - TxType::Raw => { - tracing::error!( - "Internal logic error: FinalizeBlock received a \ - TxType::Raw transaction" - ); - continue; - } - TxType::Protocol(protocol_tx) => match protocol_tx.tx { - ProtocolTxType::BridgePoolVext - | ProtocolTxType::BridgePool - | ProtocolTxType::ValSetUpdateVext - | ProtocolTxType::ValidatorSetUpdate => ( - new_tx_event(&tx, height.0), - None, - TxGasMeter::new_from_sub_limit(0.into()), - None, - None, - ), - ProtocolTxType::EthEventsVext => { - let ext = + TxType::Raw => { + tracing::error!( + "Internal logic error: FinalizeBlock received a \ + TxType::Raw transaction" + ); + continue; + } + TxType::Protocol(protocol_tx) => match protocol_tx.tx { + ProtocolTxType::BridgePoolVext + | ProtocolTxType::BridgePool + | ProtocolTxType::ValSetUpdateVext + | ProtocolTxType::ValidatorSetUpdate => ( + new_tx_event(&tx, height.0), + TxGasMeter::new_from_sub_limit(0.into()), + None, + ), + ProtocolTxType::EthEventsVext => { + let ext = ethereum_tx_data_variants::EthEventsVext::try_from( &tx, ) .unwrap(); - if self - .mode - .get_validator_address() - .map(|validator| { - validator == &ext.data.validator_addr - }) - .unwrap_or(false) - { - for event in ext.data.ethereum_events.iter() { - self.mode.dequeue_eth_event(event); + if self + .mode + .get_validator_address() + .map(|validator| { + validator == &ext.data.validator_addr + }) + .unwrap_or(false) + { + for event in ext.data.ethereum_events.iter() { + self.mode.dequeue_eth_event(event); + } } + ( + new_tx_event(&tx, height.0), + TxGasMeter::new_from_sub_limit(0.into()), + None, + ) } - ( - new_tx_event(&tx, height.0), - None, - TxGasMeter::new_from_sub_limit(0.into()), - None, - None, - ) - } - ProtocolTxType::EthereumEvents => { - let digest = + ProtocolTxType::EthereumEvents => { + let digest = ethereum_tx_data_variants::EthereumEvents::try_from( &tx, ).unwrap(); - if let Some(address) = - self.mode.get_validator_address().cloned() - { - let this_signer = &( - address, - self.state.in_mem().get_last_block_height(), - ); - for MultiSignedEthEvent { event, signers } in - &digest.events + if let Some(address) = + self.mode.get_validator_address().cloned() { - if signers.contains(this_signer) { - self.mode.dequeue_eth_event(event); + let this_signer = &( + address, + self.state.in_mem().get_last_block_height(), + ); + for MultiSignedEthEvent { event, signers } in + &digest.events + { + if signers.contains(this_signer) { + self.mode.dequeue_eth_event(event); + } } } + ( + new_tx_event(&tx, height.0), + TxGasMeter::new_from_sub_limit(0.into()), + None, + ) } - ( - new_tx_event(&tx, height.0), - None, - TxGasMeter::new_from_sub_limit(0.into()), - None, - None, - ) - } - }, - }; + }, + }; + let replay_protection_hashes = + if matches!(tx_header.tx_type, TxType::Wrapper(_)) { + Some(ReplayProtectionHashes { + raw_header_hash: tx.raw_header_hash(), + header_hash: tx.header_hash(), + }) + } else { + None + }; let tx_gas_meter = RefCell::new(tx_gas_meter); - let tx_result = protocol::check_tx_allowed(&tx, &self.state) - .and_then(|()| { - protocol::dispatch_tx( - tx, - processed_tx.tx.as_ref(), - TxIndex( - tx_index - .try_into() - .expect("transaction index out of bounds"), - ), - &tx_gas_meter, - &mut self.state, - &mut self.vp_wasm_cache, - &mut self.tx_wasm_cache, - wrapper_args.as_mut(), - ) - }) - .map_err(Error::TxApply); + let tx_result = protocol::dispatch_tx( + tx.clone(), + processed_tx.tx.as_ref(), + TxIndex( + tx_index + .try_into() + .expect("transaction index out of bounds"), + ), + &tx_gas_meter, + &mut self.state, + &mut self.vp_wasm_cache, + &mut self.tx_wasm_cache, + wrapper_args.as_mut(), + ) + .map_err(Error::TxApply); let tx_gas_meter = tx_gas_meter.into_inner(); + + // save the gas cost + let tx_hash = tx.header_hash(); + self.update_tx_gas( + tx_hash, + tx_gas_meter.get_tx_consumed_gas().into(), + ); + match tx_result { Ok(result) => { if result.is_accepted() { - if let EventType::Accepted = tx_event.event_type { - // Wrapper transaction - tracing::trace!( - "Wrapper transaction {} was accepted", - tx_event["hash"] - ); - if wrapper_args - .expect("Missing required wrapper arguments") - .is_committed_fee_unshield - { - tx_event["is_valid_masp_tx"] = - format!("{}", tx_index); - } - self.state.in_mem_mut().tx_queue.push(TxInQueue { - tx: wrapper.expect("Missing expected wrapper"), - gas: tx_gas_meter.get_available_gas(), - }); - } else { - tracing::trace!( - "all VPs accepted transaction {} storage \ - modification {:#?}", - tx_event["hash"], - result - ); - if result.vps_result.accepted_vps.contains( + if wrapper_args + .map(|args| args.is_committed_fee_unshield) + .unwrap_or_default() + || result.vps_result.accepted_vps.contains( &Address::Internal( address::InternalAddress::Masp, ), - ) { - tx_event["is_valid_masp_tx"] = - format!("{}", tx_index); - } - changed_keys - .extend(result.changed_keys.iter().cloned()); - stats.increment_successful_txs(); - if let Some(wrapper) = embedding_wrapper { - self.commit_inner_tx_hash(wrapper); - } + ) + { + tx_event.extend(ValidMaspTx(tx_index)); } + tracing::trace!( + "all VPs accepted transaction {} storage \ + modification {:#?}", + tx_event["hash"], + result + ); + + changed_keys + .extend(result.changed_keys.iter().cloned()); + changed_keys.extend( + result.wrapper_changed_keys.iter().cloned(), + ); + stats.increment_successful_txs(); + self.commit_inner_tx_hash(replay_protection_hashes); + self.state.commit_tx(); if !tx_event.contains_key("code") { - tx_event["code"] = ResultCode::Ok.into(); + tx_event.extend(Code(ResultCode::Ok)); self.state .in_mem_mut() .block @@ -433,10 +387,7 @@ where .iter() .cloned() .map(|ibc_event| { - // Add the IBC event besides the tx_event - let mut event = Event::from(ibc_event); - event["height"] = height.to_string(); - event + ibc_event.with(Height(height)).into() }) // eth bridge events .chain( @@ -447,29 +398,54 @@ where ), ); } else { + // this branch can only be reached by inner txs tracing::trace!( "some VPs rejected transaction {} storage \ modification {:#?}", tx_event["hash"], result.vps_result.rejected_vps ); + // The fee unshield operation could still have been + // committed + if wrapper_args + .map(|args| args.is_committed_fee_unshield) + .unwrap_or_default() + { + tx_event.extend(ValidMaspTx(tx_index)); + } - if let Some(wrapper) = embedding_wrapper { - // If decrypted tx failed for any reason but invalid - // signature, commit its hash to storage, otherwise - // allow for a replay - if !result.vps_result.invalid_sig { - self.commit_inner_tx_hash(wrapper); - } + // If an inner tx failed for any reason but invalid + // signature, commit its hash to storage, otherwise + // allow for a replay + if !result + .vps_result + .status_flags + .contains(VpStatusFlags::INVALID_SIGNATURE) + { + self.commit_inner_tx_hash(replay_protection_hashes); } stats.increment_rejected_txs(); self.state.drop_tx(); - tx_event["code"] = ResultCode::InvalidTx.into(); + tx_event.extend(Code(ResultCode::InvalidTx)); } - tx_event["gas_used"] = result.gas_used.to_string(); - tx_event["info"] = "Check inner_tx for result.".to_string(); - tx_event["inner_tx"] = result.to_string(); + tx_event + .extend(WithGasUsed(result.gas_used)) + .extend(Info("Check inner_tx for result.".to_string())) + .extend(InnerTx(&result)); + } + Err(Error::TxApply(protocol::Error::WrapperRunnerError( + msg, + ))) => { + tracing::info!( + "Wrapper transaction {} failed with: {}", + tx_event["hash"], + msg, + ); + tx_event + .extend(WithGasUsed(tx_gas_meter.get_tx_consumed_gas())) + .extend(Info(msg.to_string())) + .extend(Code(ResultCode::InvalidTx)); } Err(msg) => { tracing::info!( @@ -478,10 +454,10 @@ where msg ); - // If transaction type is Decrypted and didn't fail + // If user transaction didn't fail // because of out of gas nor invalid // section commitment, commit its hash to prevent replays - if let Some(wrapper) = embedding_wrapper { + if matches!(tx_header.tx_type, TxType::Wrapper(_)) { if !matches!( msg, Error::TxApply(protocol::Error::GasError(_)) @@ -492,7 +468,7 @@ where protocol::Error::ReplayAttempt(_) ) ) { - self.commit_inner_tx_hash(wrapper); + self.commit_inner_tx_hash(replay_protection_hashes); } else if let Error::TxApply( protocol::Error::ReplayAttempt(_), ) = msg @@ -501,38 +477,34 @@ where // hash. A replay of the wrapper is impossible since // the inner tx hash is committed to storage and // we validate the wrapper against that hash too - self.state - .delete_tx_hash(wrapper.header_hash()) - .expect( - "Error while deleting tx hash from storage", - ); + let header_hash = replay_protection_hashes + .expect("This cannot fail") + .header_hash; + self.state.delete_tx_hash(header_hash); } } stats.increment_errored_txs(); self.state.drop_tx(); - tx_event["gas_used"] = - tx_gas_meter.get_tx_consumed_gas().to_string(); - tx_event["info"] = msg.to_string(); - if let EventType::Accepted = tx_event.event_type { - // If wrapper, invalid tx error code - tx_event["code"] = ResultCode::InvalidTx.into(); - // The fee unshield operation could still have been - // committed - if wrapper_args - .expect("Missing required wrapper arguments") - .is_committed_fee_unshield - { - tx_event["is_valid_masp_tx"] = - format!("{}", tx_index); - } - } else { - tx_event["code"] = ResultCode::WasmRuntimeError.into(); + tx_event + .extend(WithGasUsed(tx_gas_meter.get_tx_consumed_gas())) + .extend(Info(msg.to_string())); + + // If wrapper, invalid tx error code + tx_event.extend(Code(ResultCode::InvalidTx)); + // The fee unshield operation could still have been + // committed + if wrapper_args + .map(|args| args.is_committed_fee_unshield) + .unwrap_or_default() + { + tx_event.extend(ValidMaspTx(tx_index)); } + tx_event.extend(Code(ResultCode::WasmRuntimeError)); } } - response.events.push(tx_event); + response.events.emit(tx_event); } stats.set_tx_cache_size( @@ -572,7 +544,7 @@ where native_block_proposer_address, )?; - self.event_log_mut().log_events(response.events.clone()); + self.event_log_mut().emit_many(response.events.clone()); tracing::debug!("End finalize_block {height} of epoch {current_epoch}"); Ok(response) @@ -608,6 +580,10 @@ where (height, new_epoch) } + fn update_tx_gas(&mut self, tx_hash: Hash, gas: u64) { + self.state.in_mem_mut().add_tx_gas(tx_hash, gas); + } + /// If a new epoch begins, we update the response to include /// changes to the validator sets and consensus parameters fn update_epoch(&mut self, response: &mut shim::response::FinalizeBlock) { @@ -631,7 +607,11 @@ where /// account, then update the reward products of the validators. This is /// executed while finalizing the first block of a new epoch and is applied /// with respect to the previous epoch. - fn apply_inflation(&mut self, current_epoch: Epoch) -> Result<()> { + fn apply_inflation( + &mut self, + current_epoch: Epoch, + events: &mut impl EmitEvents, + ) -> Result<()> { let last_epoch = current_epoch.prev(); // Get the number of blocks in the last epoch @@ -655,6 +635,16 @@ where namada::ibc::transfer_over_ibc, )?; + // Take IBC events that may be emitted from PGF + for ibc_event in self.state.write_log_mut().take_ibc_events() { + // Add the height for IBC event query + events.emit( + ibc_event.with(Height( + self.state.in_mem().get_last_block_height() + 1, + )), + ); + } + Ok(()) } @@ -662,17 +652,26 @@ where // hash since it's redundant (we check the inner tx hash too when validating // the wrapper). Requires the wrapper transaction as argument to recover // both the hashes. - fn commit_inner_tx_hash(&mut self, wrapper_tx: Tx) { - self.state - .write_tx_hash(wrapper_tx.raw_header_hash()) - .expect("Error while writing tx hash to storage"); + fn commit_inner_tx_hash(&mut self, hashes: Option) { + if let Some(ReplayProtectionHashes { + raw_header_hash, + header_hash, + }) = hashes + { + self.state + .write_tx_hash(raw_header_hash) + .expect("Error while writing tx hash to storage"); - self.state - .delete_tx_hash(wrapper_tx.header_hash()) - .expect("Error while deleting tx hash from storage"); + self.state.delete_tx_hash(header_hash) + } } } +struct ReplayProtectionHashes { + raw_header_hash: Hash, + header_hash: Hash, +} + /// Convert ABCI vote info to PoS vote info. Any info which fails the conversion /// will be skipped and errors logged. /// @@ -738,10 +737,11 @@ fn pos_votes_from_abci( /// are covered by the e2e tests. #[cfg(test)] mod test_finalize_block { - use std::collections::{BTreeMap, HashMap, HashSet}; + use std::collections::BTreeMap; use std::num::NonZeroU64; use std::str::FromStr; + use namada::core::collections::{HashMap, HashSet}; use namada::core::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::core::ethereum_events::{EthAddress, Uint as ethUint}; use namada::core::hash::Hash; @@ -753,6 +753,7 @@ mod test_finalize_block { use namada::eth_bridge::storage::bridge_pool::{ self, get_key_from_hash, get_nonce_key, get_signed_root_key, }; + use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::eth_bridge::storage::min_confirmations_key; use namada::ethereum_bridge::storage::wrapped_erc20s; use namada::governance::storage::keys::get_proposal_execution_key; @@ -780,7 +781,7 @@ mod test_finalize_block { use namada::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use namada::token::{Amount, DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use namada::tx::data::Fee; - use namada::tx::{Code, Data, Signature}; + use namada::tx::{Authorization, Code, Data}; use namada::vote_ext::ethereum_events; use namada_sdk::eth_bridge::MinimumConfirmations; use namada_sdk::governance::ProposalVote; @@ -788,7 +789,6 @@ mod test_finalize_block { liveness_missed_votes_handle, liveness_sum_missed_votes_handle, read_consensus_validator_set_addresses, }; - use namada_sdk::validity_predicate::VpSentinel; use namada_test_utils::tx_data::TxWriteData; use namada_test_utils::TestWasms; use test_log::test; @@ -801,7 +801,7 @@ mod test_finalize_block { FinalizeBlock, ProcessedTx, }; - const GAS_LIMIT_MULTIPLIER: u64 = 100_000_000; + const WRAPPER_GAS_LIMIT: u64 = 20_000; /// Make a wrapper tx and a processed tx from the wrapped tx that can be /// added to `FinalizeBlock` request. @@ -809,6 +809,7 @@ mod test_finalize_block { shell: &TestShell, keypair: &common::SecretKey, ) -> (Tx, ProcessedTx) { + let tx_code = TestWasms::TxNoOp.read_bytes(); let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { @@ -817,15 +818,15 @@ mod test_finalize_block { }, keypair.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + WRAPPER_GAS_LIMIT.into(), None, )))); wrapper_tx.header.chain_id = shell.chain_id.clone(); - wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx.set_data(Data::new( "Encrypted transaction data".as_bytes().to_owned(), )); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.set_code(Code::new(tx_code, None)); + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, keypair.clone())].into_iter().collect(), None, @@ -843,44 +844,6 @@ mod test_finalize_block { ) } - /// Make a wrapper tx and a processed tx from the wrapped tx that can be - /// added to `FinalizeBlock` request. - fn mk_decrypted_tx( - shell: &mut TestShell, - keypair: &common::SecretKey, - ) -> ProcessedTx { - let tx_code = TestWasms::TxNoOp.read_bytes(); - let mut outer_tx = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - outer_tx.header.chain_id = shell.chain_id.clone(); - outer_tx.set_code(Code::new(tx_code, None)); - outer_tx.set_data(Data::new( - "Decrypted transaction data".as_bytes().to_owned(), - )); - let gas_limit = - Gas::from(outer_tx.header().wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(outer_tx.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(outer_tx.clone(), gas_limit); - outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - ProcessedTx { - tx: outer_tx.to_bytes().into(), - result: TxResult { - code: ResultCode::Ok.into(), - info: "".into(), - }, - } - } - /// Check that if a wrapper tx was rejected by [`process_proposal`], /// check that the correct event is returned. Check that it does /// not appear in the queue of txs to be decrypted @@ -889,9 +852,8 @@ mod test_finalize_block { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); let mut processed_txs = vec![]; - let mut valid_wrappers = vec![]; - // Add unshielded balance for fee paymenty + // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), @@ -902,30 +864,10 @@ mod test_finalize_block { .unwrap(); // create some wrapper txs - for i in 1u64..5 { - let (wrapper, mut processed_tx) = mk_wrapper_tx(&shell, &keypair); - if i > 1 { - processed_tx.result.code = - u32::try_from(i.rem_euclid(2)).unwrap(); - processed_txs.push(processed_tx); - } else { - let wrapper_info = - if let TxType::Wrapper(w) = wrapper.header().tx_type { - w - } else { - panic!("Unexpected tx type"); - }; - shell.enqueue_tx( - wrapper.clone(), - Gas::from(wrapper_info.gas_limit) - .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) - .unwrap(), - ); - } - - if i != 3 { - valid_wrappers.push(wrapper) - } + for i in 0u64..4 { + let (_, mut processed_tx) = mk_wrapper_tx(&shell, &keypair); + processed_tx.result.code = u32::try_from(i.rem_euclid(2)).unwrap(); + processed_txs.push(processed_tx); } // check that the correct events were created @@ -937,209 +879,11 @@ mod test_finalize_block { .expect("Test failed") .iter() .enumerate() - { - assert_eq!(event.event_type.to_string(), String::from("accepted")); - let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &index.rem_euclid(2).to_string()); - } - // verify that the queue of wrapper txs to be processed is correct - let mut valid_tx = valid_wrappers.iter(); - let mut counter = 0; - for wrapper in shell.iter_tx_queue() { - // we cannot easily implement the PartialEq trait for WrapperTx - // so we check the hashes of the inner txs for equality - let valid_tx = valid_tx.next().expect("Test failed"); - assert_eq!(wrapper.tx.header.code_hash, *valid_tx.code_sechash()); - assert_eq!(wrapper.tx.header.data_hash, *valid_tx.data_sechash()); - counter += 1; - } - assert_eq!(counter, 3); - } - - /// Check that if a decrypted tx was rejected by [`process_proposal`], - /// the correct event is returned. Check that it is still - /// removed from the queue of txs to be included in the next block - /// proposal - #[test] - fn test_process_proposal_rejected_decrypted_tx() { - let (mut shell, _, _, _) = setup(); - let keypair = gen_keypair(); - let mut outer_tx = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native( - Default::default(), - ), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - outer_tx.header.chain_id = shell.chain_id.clone(); - outer_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - outer_tx.set_data(Data::new( - String::from("transaction data").as_bytes().to_owned(), - )); - let gas_limit = - Gas::from(outer_tx.header().wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(outer_tx.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(outer_tx.clone(), gas_limit); - - outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - let processed_tx = ProcessedTx { - tx: outer_tx.to_bytes().into(), - result: TxResult { - code: ResultCode::InvalidTx.into(), - info: "".into(), - }, - }; - - // check that the decrypted tx was not applied - for event in shell - .finalize_block(FinalizeBlock { - txs: vec![processed_tx], - ..Default::default() - }) - .expect("Test failed") - { - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &String::from(ResultCode::InvalidTx)); - } - // check that the corresponding wrapper tx was removed from the queue - assert!(shell.state.in_mem().tx_queue.is_empty()); - } - - /// Test that if a tx is undecryptable, it is applied - /// but the tx result contains the appropriate error code. - #[test] - fn test_undecryptable_returns_error_code() { - let (mut shell, _, _, _) = setup(); - - let keypair = crate::wallet::defaults::daewon_keypair(); - // not valid tx bytes - let wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - let processed_tx = ProcessedTx { - tx: Tx::from_type(TxType::Decrypted(DecryptedTx::Undecryptable)) - .to_bytes() - .into(), - result: TxResult { - code: ResultCode::Ok.into(), - info: "".into(), - }, - }; - - let gas_limit = - Gas::from(wrapper.header().wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(wrapper.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(wrapper, gas_limit); - - // check that correct error message is returned - for event in shell - .finalize_block(FinalizeBlock { - txs: vec![processed_tx], - ..Default::default() - }) - .expect("Test failed") { assert_eq!(event.event_type.to_string(), String::from("applied")); let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &String::from(ResultCode::Undecryptable)); - let log = event.attributes.get("log").expect("Test failed"); - assert!(log.contains("Transaction could not be decrypted.")) - } - // check that the corresponding wrapper tx was removed from the queue - assert!(shell.state.in_mem().tx_queue.is_empty()); - } - - /// Test that the wrapper txs are queued in the order they - /// are received from the block. Tests that the previously - /// decrypted txs are de-queued. - #[test] - fn test_mixed_txs_queued_in_correct_order() { - let (mut shell, _, _, _) = setup(); - let keypair = gen_keypair(); - let mut processed_txs = vec![]; - let mut valid_txs = vec![]; - - // Add unshielded balance for fee payment - let balance_key = token::storage_key::balance_key( - &shell.state.in_mem().native_token, - &Address::from(&keypair.ref_to()), - ); - shell - .state - .write(&balance_key, Amount::native_whole(1000)) - .unwrap(); - - // create two decrypted txs - for _ in 0..2 { - processed_txs.push(mk_decrypted_tx(&mut shell, &keypair)); - } - // create two wrapper txs - for _ in 0..2 { - let (tx, processed_tx) = mk_wrapper_tx(&shell, &keypair); - valid_txs.push(tx.clone()); - processed_txs.push(processed_tx); - } - // Put the wrapper txs in front of the decrypted txs - processed_txs.rotate_left(2); - // check that the correct events were created - for (index, event) in shell - .finalize_block(FinalizeBlock { - txs: processed_txs, - ..Default::default() - }) - .expect("Test failed") - .iter() - .enumerate() - { - if index < 2 { - // these should be accepted wrapper txs - assert_eq!( - event.event_type.to_string(), - String::from("accepted") - ); - let code = - event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); - } else { - // these should be accepted decrypted txs - assert_eq!( - event.event_type.to_string(), - String::from("applied") - ); - let code = - event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); - } - } - - // check that the applied decrypted txs were dequeued and the - // accepted wrappers were enqueued in correct order - let mut txs = valid_txs.iter(); - - let mut counter = 0; - for wrapper in shell.iter_tx_queue() { - let next = txs.next().expect("Test failed"); - assert_eq!(wrapper.tx.header.code_hash, *next.code_sechash()); - assert_eq!(wrapper.tx.header.data_hash, *next.data_sechash()); - counter += 1; + assert_eq!(code, &index.rem_euclid(2).to_string()); } - assert_eq!(counter, 2); } /// Test if a rejected protocol tx is applied and emits @@ -1328,6 +1072,11 @@ mod test_finalize_block { where F: FnOnce(&mut TestShell) -> (Tx, TestBpAction), { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _, _, _) = setup_at_height(1u64); namada::eth_bridge::test_utils::commit_bridge_pool_root_at_height( &mut shell.state, @@ -1496,7 +1245,10 @@ mod test_finalize_block { ) .unwrap(); shell.state.in_mem_mut().next_epoch_min_start_height = BlockHeight(5); - shell.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); + shell.state.in_mem_mut().next_epoch_min_start_time = { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + }; let txs_key = gen_keypair(); // Add unshielded balance for fee payment @@ -1512,16 +1264,14 @@ mod test_finalize_block { // Add a proposal to be executed on next epoch change. let mut add_proposal = |proposal_id, vote| { let validator = shell.mode.get_validator_address().unwrap().clone(); - shell.proposal_data.insert(proposal_id); let proposal = InitProposalData { - id: proposal_id, content: Hash::default(), author: validator.clone(), voting_start_epoch: Epoch::default(), voting_end_epoch: Epoch::default().next(), - grace_epoch: Epoch::default().next(), - r#type: ProposalType::Default(None), + activation_epoch: Epoch::default().next(), + r#type: ProposalType::Default, }; namada::governance::init_proposal( @@ -1605,10 +1355,6 @@ mod test_finalize_block { for _ in 0..20 { // Add some txs let mut txs = vec![]; - // create two decrypted txs - for _ in 0..2 { - txs.push(mk_decrypted_tx(&mut shell, &txs_key)); - } // create two wrapper txs for _ in 0..2 { let (_tx, processed_tx) = mk_wrapper_tx(&shell, &txs_key); @@ -2704,15 +2450,8 @@ mod test_finalize_block { ..Default::default() }) .expect("Test failed")[0]; - assert_eq!(event.event_type.to_string(), String::from("accepted")); - let code = event - .attributes - .get("code") - .expect( - "Test - failed", - ) - .as_str(); + assert_eq!(event.event_type.to_string(), String::from("applied")); + let code = event.attributes.get("code").expect("Test failed").as_str(); assert_eq!(code, String::from(ResultCode::Ok).as_str()); // the merkle tree root should not change after finalize_block @@ -2725,7 +2464,7 @@ mod test_finalize_block { .shell .state .write_log() - .has_replay_protection_entry(&wrapper_tx.header_hash()) + .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) .unwrap_or_default() ); // Check that the hash is present in the merkle tree @@ -2741,14 +2480,13 @@ mod test_finalize_block { ); } - /// Test that a decrypted tx that has already been applied in the same block + /// Test that a tx that has already been applied in the same block /// doesn't get reapplied #[test] - fn test_duplicated_decrypted_tx_same_block() { + fn test_duplicated_tx_same_block() { let (mut shell, _, _, _) = setup(); - let keypair = gen_keypair(); - let keypair_2 = gen_keypair(); - let mut batch = namada::state::testing::TestState::batch(); + let keypair = crate::wallet::defaults::albert_keypair(); + let keypair_2 = crate::wallet::defaults::bertha_keypair(); let tx_code = TestWasms::TxNoOp.read_bytes(); let mut wrapper = @@ -2759,14 +2497,12 @@ mod test_finalize_block { }, keypair.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + WRAPPER_GAS_LIMIT.into(), None, )))); wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new(tx_code, None)); - wrapper.set_data(Data::new( - "Decrypted transaction data".as_bytes().to_owned(), - )); + wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); let mut new_wrapper = wrapper.clone(); new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( @@ -2776,40 +2512,24 @@ mod test_finalize_block { }, keypair_2.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + WRAPPER_GAS_LIMIT.into(), None, )))); - new_wrapper.add_section(Section::Signature(Signature::new( + new_wrapper.add_section(Section::Authorization(Authorization::new( new_wrapper.sechashes(), [(0, keypair_2)].into_iter().collect(), None, ))); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, ))); - let mut inner = wrapper.clone(); - let mut new_inner = new_wrapper.clone(); - - for inner in [&mut inner, &mut new_inner] { - inner.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - } - - // Write wrapper hashes in storage - for tx in [&wrapper, &new_wrapper] { - let hash_subkey = replay_protection::last_key(&tx.header_hash()); - shell - .state - .write_replay_protection_entry(&mut batch, &hash_subkey) - .expect("Test failed"); - } - let mut processed_txs: Vec = vec![]; - for inner in [&inner, &new_inner] { + for tx in [&wrapper, &new_wrapper] { processed_txs.push(ProcessedTx { - tx: inner.to_bytes().into(), + tx: tx.to_bytes().into(), result: TxResult { code: ResultCode::Ok.into(), info: "".into(), @@ -2817,8 +2537,6 @@ mod test_finalize_block { }) } - shell.enqueue_tx(wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); - shell.enqueue_tx(new_wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block let root_pre = shell.shell.state.in_mem().block.tree.root(); @@ -2840,12 +2558,12 @@ mod test_finalize_block { let code = event[1].attributes.get("code").unwrap().as_str(); assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); - for (inner, wrapper) in [(inner, wrapper), (new_inner, new_wrapper)] { + for wrapper in [&wrapper, &new_wrapper] { assert!( shell .state .write_log() - .has_replay_protection_entry(&inner.raw_header_hash()) + .has_replay_protection_entry(&wrapper.raw_header_hash()) .unwrap_or_default() ); assert!( @@ -2858,23 +2576,48 @@ mod test_finalize_block { } } - /// Test that if a decrypted transaction fails because of out-of-gas, - /// undecryptable, invalid signature or wrong section commitment, its hash + /// Test that if a transaction fails because of out-of-gas, + /// invalid signature or wrong section commitment, its hash /// is not committed to storage. Also checks that a tx failing for other /// reason has its hash written to storage. #[test] fn test_tx_hash_handling() { let (mut shell, _, _, _) = setup(); - let keypair = gen_keypair(); - let mut batch = namada::state::testing::TestState::batch(); + let keypair = crate::wallet::defaults::bertha_keypair(); + let mut out_of_gas_wrapper = { + let tx_code = TestWasms::TxNoOp.read_bytes(); + let mut wrapper_tx = + Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( + Fee { + amount_per_gas_unit: DenominatedAmount::native( + 1.into(), + ), + token: shell.state.in_mem().native_token.clone(), + }, + keypair.ref_to(), + Epoch(0), + 0.into(), + None, + )))); + wrapper_tx.header.chain_id = shell.chain_id.clone(); + wrapper_tx.set_data(Data::new( + "Encrypted transaction data".as_bytes().to_owned(), + )); + wrapper_tx.set_code(Code::new(tx_code, None)); + wrapper_tx.add_section(Section::Authorization(Authorization::new( + wrapper_tx.sechashes(), + [(0, keypair.clone())].into_iter().collect(), + None, + ))); + wrapper_tx + }; - let (out_of_gas_wrapper, _) = mk_wrapper_tx(&shell, &keypair); - let (undecryptable_wrapper, _) = mk_wrapper_tx(&shell, &keypair); let mut wasm_path = top_level_directory(); // Write a key to trigger the vp to validate the signature wasm_path.push("wasm_for_tests/tx_write.wasm"); let tx_code = std::fs::read(wasm_path) .expect("Expected a file at given code path"); + let mut unsigned_wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { @@ -2885,11 +2628,13 @@ mod test_finalize_block { }, keypair.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + WRAPPER_GAS_LIMIT.into(), None, )))); unsigned_wrapper.header.chain_id = shell.chain_id.clone(); + let mut failing_wrapper = unsigned_wrapper.clone(); + unsigned_wrapper.set_code(Code::new(tx_code, None)); let addr = Address::from(&keypair.to_public()); let key = Key::from(addr.to_db_key()) @@ -2901,6 +2646,7 @@ mod test_finalize_block { }) .unwrap(), )); + let mut wasm_path = top_level_directory(); wasm_path.push("wasm_for_tests/tx_fail.wasm"); let tx_code = std::fs::read(wasm_path) @@ -2909,55 +2655,38 @@ mod test_finalize_block { failing_wrapper.set_data(Data::new( "Encrypted transaction data".as_bytes().to_owned(), )); - let mut wrong_commitment_wrapper = failing_wrapper.clone(); - wrong_commitment_wrapper.set_code_sechash(Hash::default()); - let mut out_of_gas_inner = out_of_gas_wrapper.clone(); - let mut undecryptable_inner = undecryptable_wrapper.clone(); - let mut unsigned_inner = unsigned_wrapper.clone(); - let mut wrong_commitment_inner = failing_wrapper.clone(); + let mut wrong_commitment_wrapper = failing_wrapper.clone(); + let tx_code = TestWasms::TxInvalidData.read_bytes(); + wrong_commitment_wrapper.set_code(Code::new(tx_code, None)); + wrong_commitment_wrapper + .sections + .retain(|sec| !matches!(sec, Section::Data(_))); // Add some extra data to avoid having the same Tx hash as the // `failing_wrapper` - wrong_commitment_inner.add_memo(&[0_u8]); - let mut failing_inner = failing_wrapper.clone(); - - undecryptable_inner - .update_header(TxType::Decrypted(DecryptedTx::Undecryptable)); - for inner in [ - &mut out_of_gas_inner, - &mut unsigned_inner, - &mut wrong_commitment_inner, - &mut failing_inner, - ] { - inner.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - } + wrong_commitment_wrapper.add_memo(&[0_u8]); - // Write wrapper hashes in storage - for wrapper in [ - &out_of_gas_wrapper, - &undecryptable_wrapper, - &unsigned_wrapper, - &wrong_commitment_wrapper, - &failing_wrapper, + let mut processed_txs: Vec = vec![]; + for tx in [ + &mut out_of_gas_wrapper, + &mut wrong_commitment_wrapper, + &mut failing_wrapper, ] { - let hash_subkey = - replay_protection::last_key(&wrapper.header_hash()); - shell - .state - .write_replay_protection_entry(&mut batch, &hash_subkey) - .unwrap(); + tx.sign_raw( + vec![keypair.clone()], + vec![keypair.ref_to()].into_iter().collect(), + None, + ); } - - let mut processed_txs: Vec = vec![]; - for inner in [ - &out_of_gas_inner, - &undecryptable_inner, - &unsigned_inner, - &wrong_commitment_inner, - &failing_inner, + for tx in [ + &mut out_of_gas_wrapper, + &mut unsigned_wrapper, + &mut wrong_commitment_wrapper, + &mut failing_wrapper, ] { + tx.sign_wrapper(keypair.clone()); processed_txs.push(ProcessedTx { - tx: inner.to_bytes().into(), + tx: tx.to_bytes().into(), result: TxResult { code: ResultCode::Ok.into(), info: "".into(), @@ -2965,17 +2694,6 @@ mod test_finalize_block { }) } - shell.enqueue_tx(out_of_gas_wrapper.clone(), Gas::default()); - shell.enqueue_tx( - undecryptable_wrapper.clone(), - GAS_LIMIT_MULTIPLIER.into(), - ); - shell.enqueue_tx(unsigned_wrapper.clone(), u64::MAX.into()); // Prevent out of gas which would still make the test pass - shell.enqueue_tx( - wrong_commitment_wrapper.clone(), - GAS_LIMIT_MULTIPLIER.into(), - ); - shell.enqueue_tx(failing_wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block let root_pre = shell.shell.state.in_mem().block.tree.root(); @@ -2992,38 +2710,35 @@ mod test_finalize_block { assert_eq!(event[0].event_type.to_string(), String::from("applied")); let code = event[0].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); + assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); assert_eq!(event[1].event_type.to_string(), String::from("applied")); let code = event[1].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::Undecryptable).as_str()); + assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); assert_eq!(event[2].event_type.to_string(), String::from("applied")); let code = event[2].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); + assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); assert_eq!(event[3].event_type.to_string(), String::from("applied")); let code = event[3].attributes.get("code").unwrap().as_str(); assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); - assert_eq!(event[4].event_type.to_string(), String::from("applied")); - let code = event[4].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); - for (invalid_inner, valid_wrapper) in [ - (out_of_gas_inner, out_of_gas_wrapper), - (undecryptable_inner, undecryptable_wrapper), - (unsigned_inner, unsigned_wrapper), - (wrong_commitment_inner, wrong_commitment_wrapper), + for valid_wrapper in [ + out_of_gas_wrapper, + unsigned_wrapper, + wrong_commitment_wrapper, ] { assert!( !shell .state .write_log() .has_replay_protection_entry( - &invalid_inner.raw_header_hash() + &valid_wrapper.raw_header_hash() ) .unwrap_or_default() ); assert!( shell .state + .write_log() .has_replay_protection_entry(&valid_wrapper.header_hash()) .unwrap_or_default() ); @@ -3032,7 +2747,7 @@ mod test_finalize_block { shell .state .write_log() - .has_replay_protection_entry(&failing_inner.raw_header_hash()) + .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) .expect("test failed") ); assert!( @@ -3046,8 +2761,8 @@ mod test_finalize_block { #[test] /// Test that the hash of the wrapper transaction is committed to storage - /// even if the wrapper tx fails. The inner transaction hash must instead be - /// removed + /// even if the wrapper tx fails. The inner transaction hash must not be + /// inserted fn test_commits_hash_if_wrapper_failure() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); @@ -3068,7 +2783,7 @@ mod test_finalize_block { wrapper.set_data(Data::new( "Encrypted transaction data".as_bytes().to_owned(), )); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -3099,7 +2814,7 @@ mod test_finalize_block { let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); - assert_eq!(event[0].event_type.to_string(), String::from("accepted")); + assert_eq!(event[0].event_type.to_string(), String::from("applied")); let code = event[0] .attributes .get("code") @@ -3123,6 +2838,79 @@ mod test_finalize_block { ); } + // Test that the fees are paid even if the inner transaction fails and its + // modifications are dropped + #[test] + fn test_fee_payment_if_invalid_inner_tx() { + let (mut shell, _, _, _) = setup(); + let keypair = crate::wallet::defaults::albert_keypair(); + + let mut wrapper = + Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( + Fee { + amount_per_gas_unit: DenominatedAmount::native(100.into()), + token: shell.state.in_mem().native_token.clone(), + }, + keypair.ref_to(), + Epoch(0), + WRAPPER_GAS_LIMIT.into(), + None, + )))); + wrapper.header.chain_id = shell.chain_id.clone(); + // Set no code to let the inner tx fail + wrapper.add_section(Section::Authorization(Authorization::new( + wrapper.sechashes(), + [(0, keypair.clone())].into_iter().collect(), + None, + ))); + + let fee_amount = + wrapper.header().wrapper().unwrap().get_tx_fee().unwrap(); + let fee_amount = namada::token::denom_to_amount( + fee_amount, + &wrapper.header().wrapper().unwrap().fee.token, + &shell.state, + ) + .unwrap(); + let signer_balance = namada::token::read_balance( + &shell.state, + &shell.state.in_mem().native_token, + &wrapper.header().wrapper().unwrap().fee_payer(), + ) + .unwrap(); + + let processed_tx = ProcessedTx { + tx: wrapper.to_bytes().into(), + result: TxResult { + code: ResultCode::Ok.into(), + info: "".into(), + }, + }; + + let event = &shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed")[0]; + + // Check balance of fee payer + assert_eq!(event.event_type.to_string(), String::from("applied")); + let code = event.attributes.get("code").expect("Test failed").as_str(); + assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); + + let new_signer_balance = namada::token::read_balance( + &shell.state, + &shell.state.in_mem().native_token, + &wrapper.header().wrapper().unwrap().fee_payer(), + ) + .unwrap(); + assert_eq!( + new_signer_balance, + signer_balance.checked_sub(fee_amount).unwrap() + ) + } + // Test that if the fee payer doesn't have enough funds for fee payment the // ledger drains their balance. Note that because of the checks in process // proposal this scenario should never happen @@ -3130,16 +2918,34 @@ mod test_finalize_block { fn test_fee_payment_if_insufficient_balance() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); + let native_token = shell.state.in_mem().native_token.clone(); + + // Credit some tokens for fee payment + let initial_balance = token::Amount::native_whole(1); + namada::token::credit_tokens( + &mut shell.state, + &native_token, + &Address::from(&keypair.to_public()), + initial_balance, + ) + .unwrap(); + let balance_key = token::storage_key::balance_key( + &native_token, + &Address::from(&keypair.to_public()), + ); + let balance: Amount = + shell.state.read(&balance_key).unwrap().unwrap_or_default(); + assert_eq!(balance, initial_balance); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.state.in_mem().native_token.clone(), + token: native_token.clone(), }, keypair.ref_to(), Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), + WRAPPER_GAS_LIMIT.into(), None, )))); wrapper.header.chain_id = shell.chain_id.clone(); @@ -3147,12 +2953,24 @@ mod test_finalize_block { wrapper.set_data(Data::new( "Encrypted transaction data".as_bytes().to_owned(), )); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair.clone())].into_iter().collect(), None, ))); + // Check that the fees are higher than the initial balance of the fee + // payer + let fee_amount = + wrapper.header().wrapper().unwrap().get_tx_fee().unwrap(); + let fee_amount = namada::token::denom_to_amount( + fee_amount, + &wrapper.header().wrapper().unwrap().fee.token, + &shell.state, + ) + .unwrap(); + assert!(fee_amount > initial_balance); + let processed_tx = ProcessedTx { tx: wrapper.to_bytes().into(), result: TxResult { @@ -3169,13 +2987,9 @@ mod test_finalize_block { .expect("Test failed")[0]; // Check balance of fee payer is 0 - assert_eq!(event.event_type.to_string(), String::from("accepted")); - let code = event.attributes.get("code").expect("Testfailed").as_str(); + assert_eq!(event.event_type.to_string(), String::from("applied")); + let code = event.attributes.get("code").expect("Test failed").as_str(); assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); - let balance_key = token::storage_key::balance_key( - &shell.state.in_mem().native_token, - &Address::from(&keypair.to_public()), - ); let balance: Amount = shell.state.read(&balance_key).unwrap().unwrap_or_default(); @@ -3227,10 +3041,8 @@ mod test_finalize_block { )))); wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new(tx_code, None)); - wrapper.set_data(Data::new( - "Enxrypted transaction data".as_bytes().to_owned(), - )); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.set_data(Data::new("Transaction data".as_bytes().to_owned())); + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -3270,7 +3082,7 @@ mod test_finalize_block { .expect("Test failed")[0]; // Check fee payment - assert_eq!(event.event_type.to_string(), String::from("accepted")); + assert_eq!(event.event_type.to_string(), String::from("applied")); let code = event.attributes.get("code").expect("Test failed").as_str(); assert_eq!(code, String::from(ResultCode::Ok).as_str()); @@ -3954,7 +3766,7 @@ mod test_finalize_block { let enqueued_slash = enqueued_slashes_handle() .at(&processing_epoch) .at(&val1.address) - .front(&shell.state) + .get(&shell.state, &height.0) .unwrap() .unwrap(); assert_eq!(enqueued_slash.epoch, misbehavior_epoch); @@ -3991,7 +3803,7 @@ mod test_finalize_block { address: pkh1, power: Default::default(), }, - height: height.try_into().unwrap(), + height: height.next_height().try_into().unwrap(), time: tendermint::Time::unix_epoch(), total_voting_power: Default::default(), }, @@ -4024,8 +3836,13 @@ mod test_finalize_block { .at(&processing_epoch.next()) .at(&val1.address); - assert_eq!(enqueued_slashes_8.len(&shell.state).unwrap(), 2_u64); - assert_eq!(enqueued_slashes_9.len(&shell.state).unwrap(), 1_u64); + let num_enqueued_8 = + enqueued_slashes_8.iter(&shell.state).unwrap().count(); + let num_enqueued_9 = + enqueued_slashes_9.iter(&shell.state).unwrap().count(); + + assert_eq!(num_enqueued_8, 2); + assert_eq!(num_enqueued_9, 1); let last_slash = namada_proof_of_stake::storage::read_validator_last_slash_epoch( &shell.state, @@ -4699,12 +4516,9 @@ mod test_finalize_block { )?; assert_eq!( consensus_vals, - HashSet::from_iter([ - val1.clone(), - val2.clone(), - val3.clone(), - val4.clone() - ]) + [val1.clone(), val2.clone(), val3.clone(), val4.clone()] + .into_iter() + .collect::>(), ); for offset in 1..=params.pipeline_len { let consensus_vals = read_consensus_validator_set_addresses( @@ -4713,7 +4527,9 @@ mod test_finalize_block { )?; assert_eq!( consensus_vals, - HashSet::from_iter([val1.clone(), val3.clone(), val4.clone()]) + [val1.clone(), val3.clone(), val4.clone()] + .into_iter() + .collect::>() ); let val2_state = validator_state_handle(&val2) .get(&shell.state, current_epoch + offset, ¶ms)? @@ -4891,6 +4707,11 @@ mod test_finalize_block { /// Test that updating the ethereum bridge params via governance works. #[tokio::test] async fn test_eth_bridge_param_updates() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _broadcaster, _, mut control_receiver) = setup_at_height(3u64); let proposal_execution_key = get_proposal_execution_key(0); @@ -4912,27 +4733,29 @@ mod test_finalize_block { )); let keys_changed = BTreeSet::from([min_confirmations_key()]); let verifiers = BTreeSet::default(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = namada::ledger::native_vp::Ctx::new( shell.mode.get_validator_address().expect("Test failed"), shell.state.read_only(), &tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), ); let parameters = ParametersVp { ctx }; - let result = parameters - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("Test failed"); - assert!(result); + assert!( + parameters + .validate_tx(&tx, &keys_changed, &verifiers) + .is_ok() + ); // we advance forward to the next epoch let mut req = FinalizeBlock::default(); - req.header.time = namada::core::time::DateTimeUtc::now(); + req.header.time = { + #[allow(clippy::disallowed_methods)] + namada::core::time::DateTimeUtc::now() + }; let current_decision_height = shell.get_current_decision_height(); if let Some(b) = shell.state.in_mem_mut().last_block.as_mut() { b.height = current_decision_height + 11; diff --git a/crates/apps/src/lib/node/ledger/shell/governance.rs b/crates/apps/src/lib/node/ledger/shell/governance.rs index e568f5e212..b72a7b889a 100644 --- a/crates/apps/src/lib/node/ledger/shell/governance.rs +++ b/crates/apps/src/lib/node/ledger/shell/governance.rs @@ -1,26 +1,29 @@ -use std::collections::HashMap; - +use namada::core::collections::HashMap; use namada::core::encode; use namada::core::event::EmitEvents; use namada::core::storage::Epoch; use namada::governance::pgf::storage::keys as pgf_storage; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::pgf::{storage as pgf, ADDRESS}; -use namada::governance::storage::keys as gov_storage; use namada::governance::storage::proposal::{ AddRemove, PGFAction, PGFTarget, ProposalType, StoragePgfFunding, }; +use namada::governance::storage::{keys as gov_storage, load_proposals}; use namada::governance::utils::{ - compute_proposal_result, ProposalVotes, TallyResult, TallyType, TallyVote, - VotePower, + compute_proposal_result, ProposalVotes, TallyResult, TallyType, VotePower, +}; +use namada::governance::{ + storage as gov_api, ProposalVote, ADDRESS as gov_address, }; -use namada::governance::{storage as gov_api, ADDRESS as gov_address}; use namada::ibc; +use namada::ledger::events::extend::{ComposeEvent, Height}; use namada::ledger::governance::utils::ProposalEvent; -use namada::ledger::pos::BondId; use namada::proof_of_stake::bond_amount; use namada::proof_of_stake::parameters::PosParams; -use namada::proof_of_stake::storage::read_total_stake; +use namada::proof_of_stake::storage::{ + read_total_active_stake, validator_state_handle, +}; +use namada::proof_of_stake::types::{BondId, ValidatorState}; use namada::state::StorageWrite; use namada::tx::{Code, Data}; use namada_sdk::proof_of_stake::storage::read_validator_stake; @@ -31,6 +34,7 @@ use super::*; pub fn finalize_block( shell: &mut Shell, events: &mut impl EmitEvents, + current_epoch: Epoch, is_new_epoch: bool, ) -> Result<()> where @@ -38,7 +42,7 @@ where H: 'static + StorageHasher + Sync, { if is_new_epoch { - execute_governance_proposals(shell, events)?; + load_and_execute_governance_proposals(shell, events, current_epoch)?; } Ok(()) } @@ -49,9 +53,27 @@ pub struct ProposalsResult { rejected: Vec, } +pub fn load_and_execute_governance_proposals( + shell: &mut Shell, + events: &mut impl EmitEvents, + current_epoch: Epoch, +) -> Result +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + let proposal_ids = load_proposals(&shell.state, current_epoch)?; + + let proposals_result = + execute_governance_proposals(shell, events, proposal_ids)?; + + Ok(proposals_result) +} + fn execute_governance_proposals( shell: &mut Shell, events: &mut impl EmitEvents, + proposal_ids: BTreeSet, ) -> Result where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -59,7 +81,7 @@ where { let mut proposals_result = ProposalsResult::default(); - for id in std::mem::take(&mut shell.proposal_data) { + for id in proposal_ids { let proposal_funds_key = gov_storage::get_funds_key(id); let proposal_end_epoch_key = gov_storage::get_voting_end_epoch_key(id); let proposal_type_key = gov_storage::get_proposal_type_key(id); @@ -77,8 +99,8 @@ where let is_steward = pgf::is_steward(&shell.state, &proposal_author)?; let params = read_pos_params(&shell.state)?; - let total_voting_power = - read_total_stake(&shell.state, ¶ms, proposal_end_epoch)?; + let total_active_voting_power = + read_total_active_stake(&shell.state, ¶ms, proposal_end_epoch)?; let tally_type = TallyType::from(proposal_type.clone(), is_steward); let votes = compute_proposal_votes( @@ -87,14 +109,37 @@ where id, proposal_end_epoch, )?; - let proposal_result = - compute_proposal_result(votes, total_voting_power, tally_type); + let proposal_result = compute_proposal_result( + votes, + total_active_voting_power, + tally_type, + ); gov_api::write_proposal_result(&mut shell.state, id, proposal_result)?; let transfer_address = match proposal_result.result { TallyResult::Passed => { let proposal_event = match proposal_type { - ProposalType::Default(_) => { + ProposalType::Default => { + let proposal_code = + gov_api::get_proposal_code(&shell.state, id)?; + let result = execute_default_proposal( + shell, + id, + proposal_code.clone(), + )?; + tracing::info!( + "Default Governance proposal {} has been executed \ + and passed.", + id, + ); + + ProposalEvent::default_proposal_event( + id, + proposal_code.is_some(), + result, + ) + } + ProposalType::DefaultWithWasm(_) => { let proposal_code = gov_api::get_proposal_code(&shell.state, id)?; let result = execute_default_proposal( @@ -103,15 +148,10 @@ where proposal_code.clone(), )?; tracing::info!( - "Governance proposal (default {} wasm) {} has \ - been executed ({}) and passed.", - if proposal_code.is_some() { - "with" - } else { - "without" - }, + "DefaultWithWasm Governance proposal {} has been \ + executed and passed, wasm executiong was {}.", id, - result + if result { "successful" } else { "unsuccessful" } ); ProposalEvent::default_proposal_event( @@ -119,7 +159,6 @@ where proposal_code.is_some(), result, ) - .into() } ProposalType::PGFSteward(stewards) => { let result = execute_pgf_steward_proposal( @@ -133,7 +172,6 @@ where ); ProposalEvent::pgf_steward_proposal_event(id, result) - .into() } ProposalType::PGFPayment(payments) => { let native_token = &shell.state.get_native_token()?; @@ -149,25 +187,20 @@ where id ); - for ibc_event in - shell.state.write_log_mut().take_ibc_events() - { - let mut event = Event::from(ibc_event.clone()); - // Add the height for IBC event query - let height = - shell.state.in_mem().get_last_block_height() - + 1; - event["height"] = height.to_string(); - events.emit(event); - } - ProposalEvent::pgf_payments_proposal_event(id, result) - .into() } }; events.emit(proposal_event); proposals_result.passed.push(id); + // Take events that could have been emitted by PGF + // over IBC, governance proposal execution, etc + for event in shell.state.write_log_mut().take_ibc_events() { + events.emit(event.with(Height( + shell.state.in_mem().get_last_block_height() + 1, + ))); + } + gov_api::get_proposal_author(&shell.state, id)? } TallyResult::Rejected => { @@ -188,8 +221,7 @@ where ); } } - let proposal_event = - ProposalEvent::rejected_proposal_event(id).into(); + let proposal_event = ProposalEvent::rejected_proposal_event(id); events.emit(proposal_event); proposals_result.rejected.push(id); @@ -235,28 +267,40 @@ where { let votes = gov_api::get_proposal_votes(storage, proposal_id)?; - let mut validators_vote: HashMap = HashMap::default(); + let mut validators_vote: HashMap = + HashMap::default(); let mut validator_voting_power: HashMap = HashMap::default(); - let mut delegators_vote: HashMap = HashMap::default(); + let mut delegators_vote: HashMap = + HashMap::default(); let mut delegator_voting_power: HashMap< Address, HashMap, > = HashMap::default(); for vote in votes { + // Skip votes involving jailed or inactive validators + let validator = vote.validator.clone(); + let validator_state = + validator_state_handle(&validator).get(storage, epoch, params)?; + if matches!( + validator_state, + Some(ValidatorState::Jailed) | Some(ValidatorState::Inactive) + ) { + continue; + } + + // Tally the votes involving active validators if vote.is_validator() { - let validator = vote.validator.clone(); let vote_data = vote.data.clone(); let validator_stake = read_validator_stake(storage, params, &validator, epoch) .unwrap_or_default(); - validators_vote.insert(validator.clone(), vote_data.into()); + validators_vote.insert(validator.clone(), vote_data); validator_voting_power.insert(validator, validator_stake); } else { - let validator = vote.validator.clone(); let delegator = vote.delegator.clone(); let vote_data = vote.data.clone(); @@ -267,7 +311,7 @@ where let delegator_stake = bond_amount(storage, &bond_id, epoch); if let Ok(stake) = delegator_stake { - delegators_vote.insert(delegator.clone(), vote_data.into()); + delegators_vote.insert(delegator.clone(), vote_data); delegator_voting_power .entry(delegator) .or_default() @@ -299,7 +343,7 @@ where let pending_execution_key = gov_storage::get_proposal_execution_key(id); shell.state.write(&pending_execution_key, ())?; - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); + let mut tx = Tx::from_type(TxType::Raw); tx.header.chain_id = shell.chain_id.clone(); tx.set_data(Data::new(encode(&id))); tx.set_code(Code::new(code, None)); diff --git a/crates/apps/src/lib/node/ledger/shell/init_chain.rs b/crates/apps/src/lib/node/ledger/shell/init_chain.rs index 2be20a23cf..1669a5b32d 100644 --- a/crates/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/crates/apps/src/lib/node/ledger/shell/init_chain.rs @@ -1,11 +1,12 @@ //! Implementation of chain initialization for the Shell -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::ops::ControlFlow; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_proofs::bls12_381; use namada::account::protocol_pk_key; +use namada::core::collections::HashMap; use namada::core::hash::Hash as CodeHash; use namada::core::time::{TimeZone, Utc}; use namada::ledger::parameters::Parameters; @@ -86,7 +87,8 @@ where pub fn init_chain( &mut self, init: request::InitChain, - #[cfg(any(test, feature = "testing"))] _num_validators: u64, + #[cfg(any(test, feature = "testing", feature = "benches"))] + _num_validators: u64, ) -> Result { let mut response = response::InitChain::default(); let chain_id = self.state.in_mem().chain_id.as_str(); @@ -249,6 +251,10 @@ where .unwrap(); } + // Initialize IBC parameters + let ibc_params = genesis.get_ibc_params(); + ibc_params.init_storage(&mut self.state).unwrap(); + // Depends on parameters being initialized self.state .in_mem_mut() @@ -308,7 +314,7 @@ where genesis: &genesis::chain::Finalized, vp_cache: &mut HashMap>, ) -> ControlFlow<(), Vec> { - use std::collections::hash_map::Entry; + use namada::core::collections::hash_map::Entry; let Some(vp_filename) = self .validate( genesis diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index fd5968ee5b..7c880e7693 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -10,6 +10,7 @@ mod finalize_block; mod governance; mod init_chain; pub use init_chain::InitChainValidation; +use namada::vm::wasm::run::check_tx_allowed; use namada_sdk::state::StateRead; use namada_sdk::tx::data::GasLimit; pub mod prepare_proposal; @@ -55,14 +56,14 @@ use namada::ledger::protocol::{ use namada::ledger::{parameters, protocol}; use namada::parameters::validate_tx_bytes; use namada::proof_of_stake::storage::read_pos_params; -use namada::state::tx_queue::{ExpiredTx, TxInQueue}; +use namada::state::tx_queue::ExpiredTx; use namada::state::{ DBIter, FullAccessState, Sha256Hasher, StorageHasher, StorageRead, TempWlState, WlState, DB, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::token; pub use namada::tx::data::ResultCode; -use namada::tx::data::{DecryptedTx, TxType, WrapperTx, WrapperTxErr}; +use namada::tx::data::{TxType, WrapperTx, WrapperTxErr}; use namada::tx::{Section, Tx}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::{WasmCacheAccess, WasmCacheRwAccess}; @@ -349,8 +350,6 @@ where /// limit the how many block heights in the past can the storage be /// queried for reading values. storage_read_past_height_limit: Option, - /// Proposal execution tracking - pub proposal_data: BTreeSet, /// Log of events emitted by `FinalizeBlock` ABCI calls. event_log: EventLog, } @@ -526,7 +525,6 @@ where tx_wasm_compilation_cache as usize, ), storage_read_past_height_limit, - proposal_data: BTreeSet::new(), // TODO: config event log params event_log: EventLog::default(), }; @@ -546,15 +544,9 @@ where &mut self.event_log } - /// Iterate over the wrapper txs in order - #[allow(dead_code)] - fn iter_tx_queue(&mut self) -> impl Iterator { - self.state.in_mem().tx_queue.iter() - } - /// Load the Merkle root hash and the height of the last committed block, if /// any. This is returned when ABCI sends an `info` request. - pub fn last_state(&mut self) -> response::Info { + pub fn last_state(&self) -> response::Info { if ledger::migrating_state().is_some() { // When migrating state, return a height of 0, such // that CometBFT calls InitChain and subsequently @@ -1055,11 +1047,22 @@ where } }, TxType::Wrapper(wrapper) => { + // Tx allowlist + if let Err(err) = check_tx_allowed(&tx, &self.state) { + response.code = ResultCode::TxNotAllowlisted.into(); + response.log = format!( + "{INVALID_MSG}: Wrapper transaction code didn't pass \ + the allowlist checks {}", + err + ); + return response; + } + // Tx gas limit let mut gas_meter = TxGasMeter::new(wrapper.gas_limit); if gas_meter.add_wrapper_gas(tx_bytes).is_err() { response.code = ResultCode::TxGasLimit.into(); - response.log = "{INVALID_MSG}: Wrapper transactions \ + response.log = "{INVALID_MSG}: Wrapper transaction \ exceeds its gas limit" .to_string(); return response; @@ -1128,12 +1131,6 @@ where the mempool" ); } - TxType::Decrypted(_) => { - response.code = ResultCode::InvalidTx.into(); - response.log = format!( - "{INVALID_MSG}: Decrypted txs cannot be sent by clients" - ); - } } if response.code == ResultCode::Ok.into() { @@ -1420,16 +1417,11 @@ mod test_utils { use namada::core::keccak::KeccakHash; use namada::core::key::*; use namada::core::storage::{BlockHash, Epoch, Header}; - use namada::core::time::DurationSecs; - use namada::ledger::parameters::{EpochDuration, Parameters}; use namada::proof_of_stake::parameters::PosParams; use namada::proof_of_stake::storage::validator_consensus_key_handle; use namada::state::mockdb::MockDB; use namada::state::{LastBlock, StorageWrite}; use namada::tendermint::abci::types::VoteInfo; - use namada::token::conversion::update_allowed_conversions; - use namada::tx::data::Fee; - use namada::tx::{Code, Data}; use tempfile::tempdir; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; @@ -1440,12 +1432,10 @@ mod test_utils { use crate::facade::tendermint_proto::v0_37::abci::{ RequestPrepareProposal, RequestProcessProposal, }; - use crate::node::ledger::shell::token::DenominatedAmount; use crate::node::ledger::shims::abcipp_shim_types; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; - use crate::node::ledger::storage::{PersistentDB, PersistentStorageHasher}; #[derive(Error, Debug)] pub enum TestError { @@ -1644,6 +1634,7 @@ mod test_utils { &self, req: ProcessProposal, ) -> std::result::Result, TestError> { + #[allow(clippy::disallowed_methods)] let time = DateTimeUtc::now(); let (resp, tx_results) = self.shell.process_proposal(RequestProcessProposal { @@ -1713,30 +1704,25 @@ mod test_utils { self.shell.prepare_proposal(req) } - /// Add a wrapper tx to the queue of txs to be decrypted - /// in the current block proposal. Takes the length of the encoded - /// wrapper as parameter. - #[cfg(test)] - pub fn enqueue_tx(&mut self, tx: Tx, inner_tx_gas: Gas) { - self.shell.state.in_mem_mut().tx_queue.push(TxInQueue { - tx, - gas: inner_tx_gas, - }); - } - /// Start a counter for the next epoch in `num_blocks`. pub fn start_new_epoch_in(&mut self, num_blocks: u64) { self.state.in_mem_mut().next_epoch_min_start_height = self.state.in_mem().get_last_block_height() + num_blocks; - self.state.in_mem_mut().next_epoch_min_start_time = - DateTimeUtc::now(); + self.state.in_mem_mut().next_epoch_min_start_time = { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + }; } /// Simultaneously call the `FinalizeBlock` and /// `Commit` handlers. pub fn finalize_and_commit(&mut self, req: Option) { let mut req = req.unwrap_or_default(); - req.header.time = DateTimeUtc::now(); + req.header.time = { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + }; + self.finalize_block(req).expect("Test failed"); self.commit(); } @@ -1883,6 +1869,7 @@ mod test_utils { hash: BlockHash([0u8; 32]), header: Header { hash: Hash([0; 32]), + #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), next_validators_hash: Hash([0; 32]), }, @@ -1911,130 +1898,6 @@ mod test_utils { .expect("Test failed"); } - /// We test that on shell shutdown, the tx queue gets persisted in a DB, and - /// on startup it is read successfully - #[test] - fn test_tx_queue_persistence() { - let base_dir = tempdir().unwrap().as_ref().canonicalize().unwrap(); - // we have to use RocksDB for this test - let (sender, _) = tokio::sync::mpsc::unbounded_channel(); - let (_, eth_receiver) = - tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); - let (control_sender, _) = oracle::control::channel(); - let (_, last_processed_block_receiver) = - last_processed_block::channel(); - let eth_oracle = EthereumOracleChannels::new( - eth_receiver, - control_sender, - last_processed_block_receiver, - ); - let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB - let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB - let native_token = address::testing::nam(); - let mut shell = Shell::::new( - config::Ledger::new( - base_dir.clone(), - Default::default(), - TendermintMode::Validator, - ), - top_level_directory().join("wasm"), - sender.clone(), - Some(eth_oracle), - None, - vp_wasm_compilation_cache, - tx_wasm_compilation_cache, - ); - shell - .state - .in_mem_mut() - .begin_block(BlockHash::default(), BlockHeight(1)) - .expect("begin_block failed"); - let keypair = gen_keypair(); - // enqueue a wrapper tx - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: native_token, - }, - keypair.ref_to(), - Epoch(0), - 300_000.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - - shell.state.in_mem_mut().tx_queue.push(TxInQueue { - tx: wrapper, - gas: u64::MAX.into(), - }); - // Artificially increase the block height so that chain - // will read the new block when restarted - shell - .state - .in_mem_mut() - .block - .pred_epochs - .new_epoch(BlockHeight(1)); - // initialize parameter storage - let params = Parameters { - max_tx_bytes: 1024 * 1024, - epoch_duration: EpochDuration { - min_num_of_blocks: 1, - min_duration: DurationSecs(3600), - }, - max_expected_time_per_block: DurationSecs(3600), - max_proposal_bytes: Default::default(), - max_block_gas: 100, - vp_allowlist: vec![], - tx_allowlist: vec![], - implicit_vp_code_hash: Default::default(), - epochs_per_year: 365, - max_signatures_per_transaction: 10, - staked_ratio: Default::default(), - pos_inflation_amount: Default::default(), - fee_unshielding_gas_limit: 0, - fee_unshielding_descriptions_limit: 0, - minimum_gas_price: Default::default(), - }; - parameters::init_storage(¶ms, &mut shell.state) - .expect("Test failed"); - // make state to update conversion for a new epoch - update_allowed_conversions(&mut shell.state) - .expect("update conversions failed"); - shell.state.commit_block().expect("commit failed"); - - // Drop the shell - std::mem::drop(shell); - let (_, eth_receiver) = - tokio::sync::mpsc::channel(ORACLE_CHANNEL_BUFFER_SIZE); - let (control_sender, _) = oracle::control::channel(); - let (_, last_processed_block_receiver) = - last_processed_block::channel(); - let eth_oracle = EthereumOracleChannels::new( - eth_receiver, - control_sender, - last_processed_block_receiver, - ); - // Reboot the shell and check that the queue was restored from DB - let shell = Shell::::new( - config::Ledger::new( - base_dir, - Default::default(), - TendermintMode::Validator, - ), - top_level_directory().join("wasm"), - sender, - Some(eth_oracle), - None, - vp_wasm_compilation_cache, - tx_wasm_compilation_cache, - ); - assert!(!shell.state.in_mem().tx_queue.is_empty()); - } - pub(super) fn get_pkh_from_address( storage: &S, params: &PosParams, @@ -2081,11 +1944,12 @@ mod test_utils { #[cfg(test)] mod shell_tests { use namada::core::storage::Epoch; + use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::replay_protection; use namada::token::read_denom; use namada::tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada::tx::data::Fee; - use namada::tx::{Code, Data, Signature, Signed}; + use namada::tx::{Authorization, Code, Data, Signed}; use namada::vote_ext::{ bridge_pool_roots, ethereum_events, ethereum_tx_data_variants, }; @@ -2101,6 +1965,12 @@ mod shell_tests { /// because the bridge is disabled). #[tokio::test] async fn test_broadcast_valset_upd_inspite_oracle_off() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + // this height should result in a validator set // update being broadcasted let (mut shell, mut broadcaster_rx, _, _) = @@ -2138,6 +2008,12 @@ mod shell_tests { /// as expected. #[test] fn test_commit_broadcasts_expired_eth_events() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + let (mut shell, mut broadcaster_rx, _, _) = test_utils::setup_at_height(5); @@ -2186,6 +2062,12 @@ mod shell_tests { const LAST_HEIGHT: BlockHeight = BlockHeight(3); + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell .state @@ -2314,6 +2196,12 @@ mod shell_tests { fn test_mempool_eth_events_vext_normal_op() { const LAST_HEIGHT: BlockHeight = BlockHeight(3); + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + let (shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); let (protocol_key, _) = wallet::defaults::validator_keys(); @@ -2375,7 +2263,7 @@ mod shell_tests { // invalid tx type, it doesn't match the // tx type declared in the header tx.set_data(Data::new(ext.serialize_to_vec())); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( tx.sechashes(), [(0, protocol_key)].into_iter().collect(), None, @@ -2452,11 +2340,13 @@ mod shell_tests { .set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); invalid_wrapper .set_data(Data::new("transaction data".as_bytes().to_owned())); - invalid_wrapper.add_section(Section::Signature(Signature::new( - invalid_wrapper.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); + invalid_wrapper.add_section(Section::Authorization( + Authorization::new( + invalid_wrapper.sechashes(), + [(0, keypair)].into_iter().collect(), + None, + ), + )); // we mount a malleability attack to try and remove the fee let mut new_wrapper = @@ -2522,7 +2412,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -2675,7 +2565,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -2708,7 +2598,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -2747,7 +2637,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -2782,7 +2672,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -2818,7 +2708,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -2854,7 +2744,7 @@ mod shell_tests { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -2902,7 +2792,7 @@ mod shell_tests { wrapper .set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new(vec![0; size as usize])); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, diff --git a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs index b08c2bec59..0affc3e1ce 100644 --- a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -2,22 +2,20 @@ use masp_primitives::transaction::Transaction; use namada::core::address::Address; -use namada::core::hints; use namada::core::key::tm_raw_hash_to_string; use namada::gas::TxGasMeter; use namada::ledger::protocol; -use namada::ledger::storage::tx_queue::TxInQueue; use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::state::{DBIter, StorageHasher, TempWlState, DB}; -use namada::tx::data::{DecryptedTx, TxType, WrapperTx}; +use namada::tx::data::{TxType, WrapperTx}; use namada::tx::Tx; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheAccess; use super::super::*; use super::block_alloc::states::{ - BuildingDecryptedTxBatch, BuildingProtocolTxBatch, - EncryptedTxBatchAllocator, NextState, TryAlloc, + BuildingNormalTxBatch, BuildingProtocolTxBatch, NextState, TryAlloc, + WithNormalTxs, WithoutNormalTxs, }; use super::block_alloc::{AllocFailure, BlockAllocator, BlockResources}; use crate::config::ValidatorLocalConfig; @@ -38,17 +36,21 @@ where /// /// INVARIANT: Any changes applied in this method must be reverted if /// the proposal is rejected (unless we can simply overwrite - /// them in the next block). + /// them in the next block). Furthermore, protocol transactions cannot + /// affect the ability of a tx to pay its wrapper fees. pub fn prepare_proposal( &self, - req: RequestPrepareProposal, + mut req: RequestPrepareProposal, ) -> response::PrepareProposal { let txs = if let ShellMode::Validator { ref local_config, .. } = self.mode { // start counting allotted space for txs - let alloc = self.get_encrypted_txs_allocator(); + let alloc = self.get_protocol_txs_allocator(); + // add initial protocol txs + let (alloc, mut txs) = + self.build_protocol_tx_with_normal_txs(alloc, &mut req.txs); // add encrypted txs let tm_raw_hash_string = @@ -60,22 +62,17 @@ where "Unable to find native validator address of block \ proposer from tendermint raw hash", ); - let (encrypted_txs, alloc) = self.build_encrypted_txs( + let (mut normal_txs, alloc) = self.build_normal_txs( alloc, &req.txs, req.time, &block_proposer, local_config.as_ref(), ); - let mut txs = encrypted_txs; - // decrypt the wrapper txs included in the previous block - let (mut decrypted_txs, alloc) = self.build_decrypted_txs(alloc); - txs.append(&mut decrypted_txs); - - // add vote extension protocol txs - let mut protocol_txs = self.build_protocol_txs(alloc, &req.txs); - txs.append(&mut protocol_txs); - + txs.append(&mut normal_txs); + let mut remaining_txs = + self.build_protocol_tx_without_normal_txs(alloc, &mut req.txs); + txs.append(&mut remaining_txs); txs } else { vec![] @@ -90,47 +87,28 @@ where response::PrepareProposal { txs } } - /// Depending on the current block height offset within the epoch, - /// transition state accordingly, return a block space allocator - /// with or without encrypted txs. - /// - /// # How to determine which path to take in the states DAG - /// - /// If we are at the second or third block height offset within an - /// epoch, we do not allow encrypted transactions to be included in - /// a block, therefore we return an allocator wrapped in an - /// [`EncryptedTxBatchAllocator::WithoutEncryptedTxs`] value. - /// Otherwise, we return an allocator wrapped in an - /// [`EncryptedTxBatchAllocator::WithEncryptedTxs`] value. + /// Get the first state of the block allocator. This is for protocol + /// transactions. #[inline] - fn get_encrypted_txs_allocator(&self) -> EncryptedTxBatchAllocator { - let is_2nd_height_off = self.is_deciding_offset_within_epoch(1); - let is_3rd_height_off = self.is_deciding_offset_within_epoch(2); - - if hints::unlikely(is_2nd_height_off || is_3rd_height_off) { - tracing::warn!( - proposal_height = - ?self.state.in_mem().block.height, - "No mempool txs are being included in the current proposal" - ); - EncryptedTxBatchAllocator::WithoutEncryptedTxs( - (&*self.state).into(), - ) - } else { - EncryptedTxBatchAllocator::WithEncryptedTxs((&*self.state).into()) - } + fn get_protocol_txs_allocator( + &self, + ) -> BlockAllocator> { + self.state.read_only().into() } /// Builds a batch of encrypted transactions, retrieved from - /// Tendermint's mempool. - fn build_encrypted_txs( + /// CometBFT's mempool. + fn build_normal_txs( &self, - mut alloc: EncryptedTxBatchAllocator, + mut alloc: BlockAllocator, txs: &[TxBytes], block_time: Option, block_proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, - ) -> (Vec, BlockAllocator) { + ) -> ( + Vec, + BlockAllocator>, + ) { let block_time = block_time.and_then(|block_time| { // If error in conversion, default to last block datetime, it's // valid because of mempool check @@ -191,85 +169,46 @@ where (txs, alloc) } - /// Builds a batch of DKG decrypted transactions. - // NOTE: we won't have frontrunning protection until V2 of the - // Anoma protocol; Namada runs V1, therefore this method is - // essentially a NOOP - // - // sources: - // - https://specs.namada.net/main/releases/v2.html - // - https://github.com/anoma/ferveo - fn build_decrypted_txs( + /// Allocate an initial set of protocol txs and advance to the + /// next allocation state. + fn build_protocol_tx_with_normal_txs( &self, - mut alloc: BlockAllocator, - ) -> (Vec, BlockAllocator) { - let txs = self - .state - .in_mem() - .tx_queue - .iter() - .map( - |TxInQueue { - tx, - gas: _, - }| { - let mut tx = tx.clone(); - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - tx.to_bytes().into() - }, - ) - // TODO: make sure all decrypted txs are accepted - .take_while(|tx_bytes: &TxBytes| { - alloc.try_alloc(&tx_bytes[..]).map_or_else( - |status| match status { - AllocFailure::Rejected { bin_resource_left: bin_space_left } => { - tracing::warn!( - ?tx_bytes, - bin_space_left, - proposal_height = - ?self.get_current_decision_height(), - "Dropping decrypted tx from the current proposal", - ); - false - } - AllocFailure::OverflowsBin { bin_resource: bin_size } => { - tracing::warn!( - ?tx_bytes, - bin_size, - proposal_height = - ?self.get_current_decision_height(), - "Dropping large decrypted tx from the current proposal", - ); - true - } - }, - |()| true, - ) - }) - .collect(); - let alloc = alloc.next_state(); + alloc: BlockAllocator>, + txs: &mut Vec, + ) -> (BlockAllocator, Vec) { + let (alloc, txs) = self.build_protocol_txs(alloc, txs); + (alloc.next_state(), txs) + } - (txs, alloc) + /// Allocate protocol txs into any remaining space. After this, no + /// more allocation will take place. + fn build_protocol_tx_without_normal_txs( + &self, + alloc: BlockAllocator>, + txs: &mut Vec, + ) -> Vec { + let (_, txs) = self.build_protocol_txs(alloc, txs); + txs } /// Builds a batch of protocol transactions. - fn build_protocol_txs( + fn build_protocol_txs( &self, - mut alloc: BlockAllocator, - txs: &[TxBytes], - ) -> Vec { + mut alloc: BlockAllocator>, + txs: &mut Vec, + ) -> (BlockAllocator>, Vec) { if self.state.in_mem().last_block.is_none() { // genesis should not contain vote extensions. // // this is because we have not decided any block through // consensus yet (hence height 0), which in turn means we // have not committed any vote extensions to a block either. - return vec![]; + return (alloc, vec![]); } - let deserialized_iter = self.deserialize_vote_extensions(txs); + let mut deserialized_iter = self.deserialize_vote_extensions(txs); - deserialized_iter.take_while(|tx_bytes| + let taken = deserialized_iter.by_ref().take_while(|tx_bytes| alloc.try_alloc(&tx_bytes[..]) .map_or_else( |status| match status { @@ -307,7 +246,10 @@ where |()| true, ) ) - .collect() + .collect(); + // avoid dropping the txs that couldn't be included in the block + deserialized_iter.keep_rest(); + (alloc, taken) } } @@ -424,12 +366,10 @@ where mod test_prepare_proposal { use std::collections::BTreeSet; - use borsh_ext::BorshSerializeExt; use namada::core::address; use namada::core::ethereum_events::EthereumEvent; use namada::core::key::RefTo; use namada::core::storage::{BlockHeight, InnerEthEventsQueue}; - use namada::ledger::gas::Gas; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::{ consensus_validator_set_handle, @@ -440,7 +380,7 @@ mod test_prepare_proposal { use namada::state::collections::lazy_map::{NestedSubKey, SubKey}; use namada::token::{read_denom, Amount, DenominatedAmount}; use namada::tx::data::Fee; - use namada::tx::{Code, Data, Header, Section, Signature, Signed}; + use namada::tx::{Authorization, Code, Data, Section, Signed}; use namada::vote_ext::{ethereum_events, ethereum_tx_data_variants}; use namada::{replay_protection, token}; use namada_sdk::storage::StorageWrite; @@ -476,7 +416,7 @@ mod test_prepare_proposal { #[test] fn test_prepare_proposal_rejects_non_wrapper_tx() { let (shell, _recv, _, _) = test_utils::setup(); - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); + let mut tx = Tx::from_type(TxType::Raw); tx.header.chain_id = shell.chain_id.clone(); let req = RequestPrepareProposal { txs: vec![tx.to_bytes().into()], @@ -764,100 +704,6 @@ mod test_prepare_proposal { assert_eq!(signed_eth_ev_vote_extension, rsp_ext.0); } - /// Test that the decrypted txs are included - /// in the proposal in the same order as their - /// corresponding wrappers - #[test] - fn test_decrypted_txs_in_correct_order() { - let (mut shell, _recv, _, _) = test_utils::setup(); - let keypair = gen_keypair(); - let mut expected_wrapper = vec![]; - let mut expected_decrypted = vec![]; - - // Load some tokens to tx signer to pay fees - let balance_key = token::storage_key::balance_key( - &shell.state.in_mem().native_token, - &Address::from(&keypair.ref_to()), - ); - shell - .state - .db_write( - &balance_key, - Amount::native_whole(1_000).serialize_to_vec(), - ) - .unwrap(); - - let mut req = RequestPrepareProposal { - txs: vec![], - ..Default::default() - }; - // create a request with two new wrappers from mempool and - // two wrappers from the previous block to be decrypted - for i in 0..2 { - let mut tx = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native( - 1.into(), - ), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - tx.header.chain_id = shell.chain_id.clone(); - tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - tx.set_data(Data::new( - format!("transaction data: {}", i).as_bytes().to_owned(), - )); - tx.add_section(Section::Signature(Signature::new( - tx.sechashes(), - [(0, keypair.clone())].into_iter().collect(), - None, - ))); - - let gas = Gas::from( - tx.header().wrapper().expect("Wrong tx type").gas_limit, - ) - .checked_sub(Gas::from(tx.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(tx.clone(), gas); - expected_wrapper.push(tx.clone()); - req.txs.push(tx.to_bytes().into()); - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - expected_decrypted.push(tx.clone()); - } - // we extract the inner data from the txs for testing - // equality since otherwise changes in timestamps would - // fail the test - let expected_txs: Vec
= expected_wrapper - .into_iter() - .chain(expected_decrypted) - .map(|tx| tx.header) - .collect(); - let received: Vec
= shell - .prepare_proposal(req) - .txs - .into_iter() - .map(|tx_bytes| { - Tx::try_from(tx_bytes.as_ref()).expect("Test failed").header - }) - .collect(); - // check that the order of the txs is correct - assert_eq!( - received - .iter() - .map(|x| x.serialize_to_vec()) - .collect::>(), - expected_txs - .iter() - .map(|x| x.serialize_to_vec()) - .collect::>(), - ); - } - /// Test that if the unsigned wrapper tx hash is known (replay attack), the /// transaction is not included in the block #[test] @@ -879,7 +725,7 @@ mod test_prepare_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -923,7 +769,7 @@ mod test_prepare_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -960,7 +806,7 @@ mod test_prepare_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1008,7 +854,7 @@ mod test_prepare_proposal { let tx_data = Data::new("transaction data".as_bytes().to_owned()); wrapper.set_data(tx_data); let mut new_wrapper = wrapper.clone(); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1024,7 +870,7 @@ mod test_prepare_proposal { GAS_LIMIT_MULTIPLIER.into(), None, )))); - new_wrapper.add_section(Section::Signature(Signature::new( + new_wrapper.add_section(Section::Authorization(Authorization::new( new_wrapper.sechashes(), [(0, keypair_2)].into_iter().collect(), None, @@ -1059,12 +905,13 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, keypair)].into_iter().collect(), None, ))); + #[allow(clippy::disallowed_methods)] let time = DateTimeUtc::now(); let block_time = namada::core::tendermint_proto::google::protobuf::Timestamp { @@ -1107,7 +954,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1147,7 +994,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1173,10 +1020,9 @@ mod test_prepare_proposal { if let ShellMode::Validator { local_config, .. } = &mut shell.mode { // Remove the allowed btc *local_config = Some(ValidatorLocalConfig { - accepted_gas_tokens: std::collections::HashMap::from([( - namada::core::address::testing::nam(), - Amount::from(1), - )]), + accepted_gas_tokens: namada::core::collections::HashMap::from( + [(namada::core::address::testing::nam(), Amount::from(1))], + ), }); } @@ -1203,7 +1049,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1251,7 +1097,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1279,10 +1125,12 @@ mod test_prepare_proposal { if let ShellMode::Validator { local_config, .. } = &mut shell.mode { // Remove btc and increase minimum for nam *local_config = Some(ValidatorLocalConfig { - accepted_gas_tokens: std::collections::HashMap::from([( - namada::core::address::testing::nam(), - Amount::from(100), - )]), + accepted_gas_tokens: namada::core::collections::HashMap::from( + [( + namada::core::address::testing::nam(), + Amount::from(100), + )], + ), }); } @@ -1301,7 +1149,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1341,7 +1189,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1382,7 +1230,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1423,7 +1271,7 @@ mod test_prepare_proposal { wrapper_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper_tx .set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper_tx.add_section(Section::Signature(Signature::new( + wrapper_tx.add_section(Section::Authorization(Authorization::new( wrapper_tx.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() diff --git a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs index 73648c7207..f4d1f35019 100644 --- a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -7,7 +7,7 @@ use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::tx::data::protocol::ProtocolTxType; use namada::vote_ext::ethereum_tx_data_variants; -use super::block_alloc::{BlockSpace, EncryptedTxsBins}; +use super::block_alloc::{BlockGas, BlockSpace}; use super::*; use crate::facade::tendermint_proto::v0_37::abci::RequestProcessProposal; use crate::node::ledger::shell::block_alloc::{AllocFailure, TxBin}; @@ -18,19 +18,10 @@ use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; /// transaction numbers, in a block proposal. #[derive(Default)] pub struct ValidationMeta { - /// Space and gas utilized by encrypted txs. - pub encrypted_txs_bins: EncryptedTxsBins, - /// Vote extension digest counters. + /// Gas emitted by users. + pub user_gas: TxBin, /// Space utilized by all txs. pub txs_bin: TxBin, - /// Check if the decrypted tx queue has any elements - /// left. - /// - /// This field will only evaluate to true if a block - /// proposer didn't include all decrypted txs in a block. - pub decrypted_queue_has_remaining_txs: bool, - /// Check if a block has decrypted txs. - pub has_decrypted_txs: bool, } impl From<&WlState> for ValidationMeta @@ -43,15 +34,10 @@ where state.pos_queries().get_max_proposal_bytes().get(); let max_block_gas = namada::parameters::get_max_block_gas(state).unwrap(); - let encrypted_txs_bin = - EncryptedTxsBins::new(max_proposal_bytes, max_block_gas); + + let user_gas = TxBin::init(max_block_gas); let txs_bin = TxBin::init(max_proposal_bytes); - Self { - decrypted_queue_has_remaining_txs: false, - has_decrypted_txs: false, - encrypted_txs_bins: encrypted_txs_bin, - txs_bin, - } + Self { user_gas, txs_bin } } } @@ -94,7 +80,7 @@ where ) }; - let (tx_results, meta) = self.process_txs( + let tx_results = self.process_txs( &req.txs, req.time .expect("Missing timestamp in proposed block") @@ -104,9 +90,8 @@ where ); // Erroneous transactions were detected when processing - // the leader's proposal. We allow txs that do not - // deserialize properly, that have invalid signatures - // and that have invalid wasm code to reach FinalizeBlock. + // the leader's proposal. We allow txs that are invalid at runtime + // (wasm) to reach FinalizeBlock. let invalid_txs = tx_results.iter().any(|res| { let error = ResultCode::from_u32(res.code).expect( "All error codes returned from process_single_tx are valid", @@ -121,22 +106,8 @@ where "Found invalid transactions, proposed block will be rejected" ); } - - let has_remaining_decrypted_txs = - meta.decrypted_queue_has_remaining_txs; - if has_remaining_decrypted_txs { - tracing::warn!( - proposer = ?HEXUPPER.encode(&req.proposer_address), - height = req.height, - hash = ?HEXUPPER.encode(&req.hash), - "Not all decrypted txs from the previous height were included in - the proposal, the block will be rejected" - ); - } - - let will_reject_proposal = invalid_txs || has_remaining_decrypted_txs; ( - if will_reject_proposal { + if invalid_txs { ProcessProposal::Reject } else { ProcessProposal::Accept @@ -157,8 +128,7 @@ where txs: &[TxBytes], block_time: DateTimeUtc, block_proposer: &Address, - ) -> (Vec, ValidationMeta) { - let mut tx_queue_iter = self.state.in_mem().tx_queue.iter(); + ) -> Vec { let mut temp_state = self.state.with_temp_write_log(); let mut metadata = ValidationMeta::from(self.state.read_only()); let mut vp_wasm_cache = self.vp_wasm_cache.clone(); @@ -169,7 +139,6 @@ where .map(|tx_bytes| { let result = self.check_proposal_tx( tx_bytes, - &mut tx_queue_iter, &mut metadata, &mut temp_state, block_time, @@ -192,10 +161,7 @@ where result }) .collect(); - metadata.decrypted_queue_has_remaining_txs = - !self.state.in_mem().tx_queue.is_empty() - && tx_queue_iter.next().is_some(); - (tx_results, metadata) + tx_results } /// Checks if the Tx can be deserialized from bytes. Checks the fees and @@ -221,10 +187,9 @@ where /// proposal is rejected (unless we can simply overwrite them in the /// next block). #[allow(clippy::too_many_arguments)] - pub fn check_proposal_tx<'a, CA>( + pub fn check_proposal_tx( &self, tx_bytes: &[u8], - tx_queue_iter: &mut impl Iterator, metadata: &mut ValidationMeta, temp_state: &mut TempWlState, block_time: DateTimeUtc, @@ -437,75 +402,14 @@ where }, } } - TxType::Decrypted(tx_header) => { - metadata.has_decrypted_txs = true; - match tx_queue_iter.next() { - Some(wrapper) => { - if wrapper.tx.raw_header_hash() != tx.raw_header_hash() - { - TxResult { - code: ResultCode::InvalidOrder.into(), - info: "Process proposal rejected a decrypted \ - transaction that violated the tx order \ - determined in the previous block" - .into(), - } - } else if matches!( - tx_header, - DecryptedTx::Undecryptable - ) { - // DKG is disabled, txs are not actually encrypted - TxResult { - code: ResultCode::InvalidTx.into(), - info: "The encrypted payload of tx was \ - incorrectly marked as un-decryptable" - .into(), - } - } else { - match tx.header().expiration { - Some(tx_expiration) - if block_time > tx_expiration => - { - TxResult { - code: ResultCode::ExpiredDecryptedTx - .into(), - info: format!( - "Tx expired at {:#?}, block time: \ - {:#?}", - tx_expiration, block_time - ), - } - } - _ => TxResult { - code: ResultCode::Ok.into(), - info: "Process Proposal accepted this \ - transaction" - .into(), - }, - } - } - } - None => TxResult { - code: ResultCode::ExtraTxs.into(), - info: "Received more decrypted txs than expected" - .into(), - }, - } - } TxType::Wrapper(wrapper) => { - // Account for gas and space. This is done even if the - // transaction is later deemed invalid, to - // incentivize the proposer to include only - // valid transaction and avoid wasting block - // resources (ABCI only) + // Account for the tx's resources + let allocated_gas = + metadata.user_gas.try_dump(u64::from(wrapper.gas_limit)); let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit); - if tx_gas_meter.add_wrapper_gas(tx_bytes).is_err() { - // Account for the tx's resources even in case of an error. - // Ignore any allocation error - let _ = metadata - .encrypted_txs_bins - .try_dump(tx_bytes, u64::from(wrapper.gas_limit)); - + if tx_gas_meter.add_wrapper_gas(tx_bytes).is_err() + || allocated_gas.is_err() + { return TxResult { code: ResultCode::TxGasLimit.into(), info: "Wrapper transactions exceeds its gas limit" @@ -513,31 +417,14 @@ where }; } - // try to allocate space and gas for this encrypted tx - if let Err(e) = metadata - .encrypted_txs_bins - .try_dump(tx_bytes, u64::from(wrapper.gas_limit)) - { - return TxResult { - code: ResultCode::AllocationError.into(), - info: e, - }; - } - // decrypted txs shouldn't show up before wrapper txs - if metadata.has_decrypted_txs { + // Tx allowlist + if let Err(err) = check_tx_allowed(&tx, &self.state) { return TxResult { - code: ResultCode::InvalidTx.into(), - info: "Decrypted txs should not be proposed before \ - wrapper txs" - .into(), - }; - } - if hints::unlikely(self.encrypted_txs_not_allowed()) { - return TxResult { - code: ResultCode::AllocationError.into(), - info: "Wrapper txs not allowed at the current block \ - height" - .into(), + code: ResultCode::TxNotAllowlisted.into(), + info: format!( + "Tx code didn't pass the allowlist check: {}", + err + ), }; } @@ -604,14 +491,6 @@ where ) -> shim::response::RevertProposal { Default::default() } - - /// Checks if it is not possible to include encrypted txs at the current - /// block height. - pub(super) fn encrypted_txs_not_allowed(&self) -> bool { - let is_2nd_height_off = self.is_deciding_offset_within_epoch(1); - let is_3rd_height_off = self.is_deciding_offset_within_epoch(2); - is_2nd_height_off || is_3rd_height_off - } } fn process_proposal_fee_check( @@ -656,12 +535,17 @@ where mod test_process_proposal { use namada::core::key::*; use namada::core::storage::Epoch; + use namada::eth_bridge::storage::eth_bridge_queries::{ + is_bridge_comptime_enabled, EthBridgeQueries, + }; use namada::replay_protection; use namada::state::StorageWrite; use namada::token::{read_denom, Amount, DenominatedAmount}; use namada::tx::data::Fee; - use namada::tx::{Code, Data, Signature, Signed}; - use namada::vote_ext::{bridge_pool_roots, ethereum_events}; + use namada::tx::{Authorization, Code, Data, Signed}; + use namada::vote_ext::{ + bridge_pool_roots, ethereum_events, validator_set_update, + }; use super::*; use crate::node::ledger::shell::test_utils::{ @@ -673,6 +557,69 @@ mod test_process_proposal { const GAS_LIMIT_MULTIPLIER: u64 = 100_000; + /// Check that we reject a validator set update protocol tx + /// if the bridge is not active. + #[test] + fn check_rejected_valset_upd_bridge_inactive() { + if is_bridge_comptime_enabled() { + // NOTE: validator set updates are always signed + // when the bridge is enabled at compile time + return; + } + + let (shell, _, _, _) = test_utils::setup_at_height(3); + let ext = { + let eth_hot_key = + shell.mode.get_eth_bridge_keypair().expect("Test failed"); + let signing_epoch = shell.state.in_mem().get_current_epoch().0; + let next_epoch = signing_epoch.next(); + let voting_powers = shell + .state + .ethbridge_queries() + .get_consensus_eth_addresses(Some(next_epoch)) + .iter() + .map(|(eth_addr_book, _, voting_power)| { + (eth_addr_book, voting_power) + }) + .collect(); + let validator_addr = shell + .mode + .get_validator_address() + .expect("Test failed") + .clone(); + let ext = validator_set_update::Vext { + voting_powers, + validator_addr, + signing_epoch, + }; + ext.sign(eth_hot_key) + }; + let request = { + let protocol_key = + shell.mode.get_protocol_key().expect("Test failed"); + let tx = EthereumTxData::ValSetUpdateVext(ext) + .sign(protocol_key, shell.chain_id.clone()) + .to_bytes(); + ProcessProposal { txs: vec![tx] } + }; + + let response = if let Err(TestError::RejectProposal(resp)) = + shell.process_proposal(request) + { + if let [resp] = resp.as_slice() { + resp.clone() + } else { + panic!("Test failed") + } + } else { + panic!("Test failed") + }; + assert_eq!( + response.result.code, + u32::from(ResultCode::InvalidVoteExtension) + ); + } + /// Check that we reject an eth events protocol tx /// if the bridge is not active. #[test] @@ -695,13 +642,15 @@ mod test_process_proposal { .to_bytes(); let request = ProcessProposal { txs: vec![tx] }; - let [resp]: [ProcessedTx; 1] = shell - .process_proposal(request.clone()) - .expect("Test failed") - .try_into() - .expect("Test failed"); - assert_eq!(resp.result.code, u32::from(ResultCode::Ok)); - deactivate_bridge(&mut shell); + if is_bridge_comptime_enabled() { + let [resp]: [ProcessedTx; 1] = shell + .process_proposal(request.clone()) + .expect("Test failed") + .try_into() + .expect("Test failed"); + assert_eq!(resp.result.code, u32::from(ResultCode::Ok)); + deactivate_bridge(&mut shell); + } let response = if let Err(TestError::RejectProposal(resp)) = shell.process_proposal(request) { @@ -746,14 +695,16 @@ mod test_process_proposal { .to_bytes(); let request = ProcessProposal { txs: vec![tx] }; - let [resp]: [ProcessedTx; 1] = shell - .process_proposal(request.clone()) - .expect("Test failed") - .try_into() - .expect("Test failed"); + if is_bridge_comptime_enabled() { + let [resp]: [ProcessedTx; 1] = shell + .process_proposal(request.clone()) + .expect("Test failed") + .try_into() + .expect("Test failed"); - assert_eq!(resp.result.code, u32::from(ResultCode::Ok)); - deactivate_bridge(&mut shell); + assert_eq!(resp.result.code, u32::from(ResultCode::Ok)); + deactivate_bridge(&mut shell); + } let response = if let Err(TestError::RejectProposal(resp)) = shell.process_proposal(request) { @@ -958,7 +909,7 @@ mod test_process_proposal { outer_tx.header.chain_id = shell.chain_id.clone(); outer_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); outer_tx.set_data(Data::new("transaction data".as_bytes().to_owned())); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( outer_tx.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1032,7 +983,7 @@ mod test_process_proposal { outer_tx.header.chain_id = shell.chain_id.clone(); outer_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); outer_tx.set_data(Data::new("transaction data".as_bytes().to_owned())); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( outer_tx.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1100,7 +1051,7 @@ mod test_process_proposal { outer_tx.header.chain_id = shell.chain_id.clone(); outer_tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); outer_tx.set_data(Data::new("transaction data".as_bytes().to_owned())); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( outer_tx.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1134,190 +1085,6 @@ mod test_process_proposal { ); } - /// Test that if the expected order of decrypted txs is - /// validated, [`process_proposal`] rejects it - #[test] - fn test_decrypted_txs_out_of_order() { - let (mut shell, _recv, _, _) = test_utils::setup_at_height(3u64); - let keypair = gen_keypair(); - let mut txs = vec![]; - for i in 0..3 { - let mut outer_tx = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native( - Amount::native_whole(i as u64), - ), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - outer_tx.header.chain_id = shell.chain_id.clone(); - outer_tx - .set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - outer_tx.set_data(Data::new( - format!("transaction data: {}", i).as_bytes().to_owned(), - )); - let gas_limit = - Gas::from(outer_tx.header().wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(outer_tx.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(outer_tx.clone(), gas_limit); - - outer_tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - txs.push(outer_tx); - } - let response = { - let request = ProcessProposal { - txs: vec![ - txs[0].to_bytes(), - txs[2].to_bytes(), - txs[1].to_bytes(), - ], - }; - if let Err(TestError::RejectProposal(mut resp)) = - shell.process_proposal(request) - { - assert_eq!(resp.len(), 3); - resp.remove(1) - } else { - panic!("Test failed") - } - }; - assert_eq!(response.result.code, u32::from(ResultCode::InvalidOrder)); - assert_eq!( - response.result.info, - String::from( - "Process proposal rejected a decrypted transaction that \ - violated the tx order determined in the previous block" - ), - ); - } - - /// Test that a block containing a tx incorrectly labelled as undecryptable - /// is rejected by [`process_proposal`] - #[test] - fn test_incorrectly_labelled_as_undecryptable() { - let (mut shell, _recv, _, _) = test_utils::setup_at_height(3u64); - let keypair = gen_keypair(); - - let mut tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native( - Default::default(), - ), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - tx.header.chain_id = shell.chain_id.clone(); - tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - tx.set_data(Data::new("transaction data".as_bytes().to_owned())); - let gas_limit = Gas::from(tx.header().wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(tx.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(tx.clone(), gas_limit); - - tx.header.tx_type = TxType::Decrypted(DecryptedTx::Undecryptable); - - let response = { - let request = ProcessProposal { - txs: vec![tx.to_bytes()], - }; - if let Err(TestError::RejectProposal(resp)) = - shell.process_proposal(request) - { - if let [resp] = resp.as_slice() { - resp.clone() - } else { - panic!("Test failed") - } - } else { - panic!("Test failed") - } - }; - assert_eq!(response.result.code, u32::from(ResultCode::InvalidTx)); - assert_eq!( - response.result.info, - String::from( - "The encrypted payload of tx was incorrectly marked as \ - un-decryptable" - ), - ) - } - - /// Test that if a wrapper tx contains marked undecryptable the proposal is - /// rejected - #[test] - fn test_undecryptable() { - let (mut shell, _recv, _, _) = test_utils::setup_at_height(3u64); - let keypair = crate::wallet::defaults::daewon_keypair(); - // not valid tx bytes - let wrapper = WrapperTx { - fee: Fee { - amount_per_gas_unit: DenominatedAmount::native( - Default::default(), - ), - token: shell.state.in_mem().native_token.clone(), - }, - pk: keypair.ref_to(), - epoch: Epoch(0), - gas_limit: GAS_LIMIT_MULTIPLIER.into(), - unshield_section_hash: None, - }; - - let tx = Tx::from_type(TxType::Wrapper(Box::new(wrapper))); - let mut decrypted = tx.clone(); - decrypted.update_header(TxType::Decrypted(DecryptedTx::Undecryptable)); - - let gas_limit = Gas::from(tx.header().wrapper().unwrap().gas_limit) - .checked_sub(Gas::from(tx.to_bytes().len() as u64)) - .unwrap(); - shell.enqueue_tx(tx, gas_limit); - - let request = ProcessProposal { - txs: vec![decrypted.to_bytes()], - }; - shell.process_proposal(request).expect_err("Test failed"); - } - - /// Test that if more decrypted txs are submitted to - /// [`process_proposal`] than expected, they are rejected - #[test] - fn test_too_many_decrypted_txs() { - let (shell, _recv, _, _) = test_utils::setup_at_height(3u64); - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - tx.header.chain_id = shell.chain_id.clone(); - tx.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - tx.set_data(Data::new("transaction data".as_bytes().to_owned())); - - let request = ProcessProposal { - txs: vec![tx.to_bytes()], - }; - let response = if let Err(TestError::RejectProposal(resp)) = - shell.process_proposal(request) - { - if let [resp] = resp.as_slice() { - resp.clone() - } else { - panic!("Test failed") - } - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ResultCode::ExtraTxs)); - assert_eq!( - response.result.info, - String::from("Received more decrypted txs than expected"), - ); - } - /// Process Proposal should reject a block containing a RawTx, but not panic #[test] fn test_raw_tx_rejected() { @@ -1380,7 +1147,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1450,7 +1217,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1504,7 +1271,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1565,7 +1332,7 @@ mod test_process_proposal { wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); let mut new_wrapper = wrapper.clone(); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1581,7 +1348,7 @@ mod test_process_proposal { GAS_LIMIT_MULTIPLIER.into(), None, )))); - new_wrapper.add_section(Section::Signature(Signature::new( + new_wrapper.add_section(Section::Authorization(Authorization::new( new_wrapper.sechashes(), [(0, keypair_2)].into_iter().collect(), None, @@ -1621,7 +1388,7 @@ mod test_process_proposal { wrapper.header.chain_id = wrong_chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1683,7 +1450,7 @@ mod test_process_proposal { wrapper.header.expiration = Some(DateTimeUtc::default()); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1704,55 +1471,6 @@ mod test_process_proposal { } } - /// Test that an expired decrypted transaction is marked as rejected but - /// still allows the block to be accepted - #[test] - fn test_expired_decrypted() { - let (mut shell, _recv, _, _) = test_utils::setup(); - let keypair = crate::wallet::defaults::daewon_keypair(); - - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.header.expiration = Some(DateTimeUtc::default()); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( - wrapper.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - - shell.enqueue_tx(wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); - - let decrypted = - wrapper.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - - // Run validation - let request = ProcessProposal { - txs: vec![decrypted.to_bytes()], - }; - match shell.process_proposal(request) { - Ok(txs) => { - assert_eq!(txs.len(), 1); - assert_eq!( - txs[0].result.code, - u32::from(ResultCode::ExpiredDecryptedTx) - ); - } - Err(_) => panic!("Test failed"), - } - } - /// Check that a tx requiring more gas than the block limit causes a block /// rejection #[test] @@ -1777,7 +1495,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1792,7 +1510,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ResultCode::AllocationError) + u32::from(ResultCode::TxGasLimit) ); } } @@ -1819,7 +1537,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1867,7 +1585,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1910,7 +1628,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -1955,7 +1673,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -2000,7 +1718,7 @@ mod test_process_proposal { wrapper.header.chain_id = shell.chain_id.clone(); wrapper.set_code(Code::new("wasm code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, crate::wallet::defaults::albert_keypair())] .into_iter() @@ -2023,65 +1741,6 @@ mod test_process_proposal { } } - /// Test if we reject wrapper txs when they shouldn't be included in blocks. - /// - /// Currently, the conditions to reject wrapper - /// txs are simply to check if we are at the 2nd - /// or 3rd height offset within an epoch. - #[test] - fn test_include_only_protocol_txs() { - let (mut shell, _recv, _, _) = test_utils::setup_at_height(1u64); - let keypair = gen_keypair(); - let mut wrapper = - Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( - Fee { - amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.state.in_mem().native_token.clone(), - }, - keypair.ref_to(), - Epoch(0), - GAS_LIMIT_MULTIPLIER.into(), - None, - )))); - wrapper.header.chain_id = shell.chain_id.clone(); - wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); - wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - wrapper.add_section(Section::Signature(Signature::new( - wrapper.sechashes(), - [(0, keypair)].into_iter().collect(), - None, - ))); - let wrapper = wrapper.to_bytes(); - for height in [1u64, 2] { - if let Some(b) = shell.state.in_mem_mut().last_block.as_mut() { - b.height = height.into(); - } - let response = { - let request = ProcessProposal { - txs: vec![wrapper.clone()], - }; - if let Err(TestError::RejectProposal(mut resp)) = - shell.process_proposal(request) - { - assert_eq!(resp.len(), 1); - resp.remove(0) - } else { - panic!("Test failed") - } - }; - assert_eq!( - response.result.code, - u32::from(ResultCode::AllocationError) - ); - assert_eq!( - response.result.info, - String::from( - "Wrapper txs not allowed at the current block height" - ), - ); - } - } - /// Test max tx bytes parameter in ProcessProposal #[test] fn test_max_tx_bytes_process_proposal() { @@ -2116,7 +1775,7 @@ mod test_process_proposal { wrapper .set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new(vec![0; size as usize])); - wrapper.add_section(Section::Signature(Signature::new( + wrapper.add_section(Section::Authorization(Authorization::new( wrapper.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -2158,6 +1817,12 @@ mod test_process_proposal { const LAST_HEIGHT: BlockHeight = BlockHeight(3); + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell .state diff --git a/crates/apps/src/lib/node/ledger/shell/queries.rs b/crates/apps/src/lib/node/ledger/shell/queries.rs index fd1ad1f221..672e221b2d 100644 --- a/crates/apps/src/lib/node/ledger/shell/queries.rs +++ b/crates/apps/src/lib/node/ledger/shell/queries.rs @@ -64,6 +64,7 @@ where #[cfg(test)] mod test_queries { use namada::core::storage::{BlockHash, Epoch}; + use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::read_consensus_validator_set_addresses_with_stake; use namada::proof_of_stake::types::WeightedValidator; @@ -162,40 +163,48 @@ mod test_queries { }; } + const fn send_valset(value: bool) -> bool { + if !is_bridge_comptime_enabled() { + false + } else { + value + } + } + test_must_send_valset_upd! { epoch_assertions: [ - // (current epoch, current block height, can send valset upd) + // (current epoch, current block height, must send valset upd) // NOTE: can send valset upd on every 2nd block of an epoch - (0, 1, false), - (0, 2, true), - (0, 3, false), - (0, 4, false), - (0, 5, false), - (0, 6, false), - (0, 7, false), - (0, 8, false), - (0, 9, false), + (0, 1, send_valset(false)), + (0, 2, send_valset(true)), + (0, 3, send_valset(false)), + (0, 4, send_valset(false)), + (0, 5, send_valset(false)), + (0, 6, send_valset(false)), + (0, 7, send_valset(false)), + (0, 8, send_valset(false)), + (0, 9, send_valset(false)), // we will change epoch here - (0, 10, false), - (1, 11, true), - (1, 12, false), - (1, 13, false), - (1, 14, false), - (1, 15, false), - (1, 16, false), - (1, 17, false), - (1, 18, false), - (1, 19, false), + (0, 10, send_valset(false)), + (1, 11, send_valset(true)), + (1, 12, send_valset(false)), + (1, 13, send_valset(false)), + (1, 14, send_valset(false)), + (1, 15, send_valset(false)), + (1, 16, send_valset(false)), + (1, 17, send_valset(false)), + (1, 18, send_valset(false)), + (1, 19, send_valset(false)), // we will change epoch here - (1, 20, false), - (2, 21, true), - (2, 22, false), - (2, 23, false), - (2, 24, false), - (2, 25, false), - (2, 26, false), - (2, 27, false), - (2, 28, false), + (1, 20, send_valset(false)), + (2, 21, send_valset(true)), + (2, 22, send_valset(false)), + (2, 23, send_valset(false)), + (2, 24, send_valset(false)), + (2, 25, send_valset(false)), + (2, 26, send_valset(false)), + (2, 27, send_valset(false)), + (2, 28, send_valset(false)), ], } } diff --git a/crates/apps/src/lib/node/ledger/shell/stats.rs b/crates/apps/src/lib/node/ledger/shell/stats.rs index 0a677ed576..9162bb792b 100644 --- a/crates/apps/src/lib/node/ledger/shell/stats.rs +++ b/crates/apps/src/lib/node/ledger/shell/stats.rs @@ -1,6 +1,7 @@ -use std::collections::HashMap; use std::fmt::Display; +use namada::core::collections::HashMap; + #[derive(Debug, Default)] pub struct InternalStats { successful_tx: u64, diff --git a/crates/apps/src/lib/node/ledger/shell/testing/node.rs b/crates/apps/src/lib/node/ledger/shell/testing/node.rs index f133082f8e..9e3e984f46 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/node.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::fmt::{Debug, Formatter}; use std::future::poll_fn; use std::mem::ManuallyDrop; use std::path::PathBuf; @@ -10,7 +10,9 @@ use color_eyre::eyre::{Report, Result}; use data_encoding::HEXUPPER; use itertools::Either; use lazy_static::lazy_static; +use namada::address::Address; use namada::control_flow::time::Duration; +use namada::core::collections::HashMap; use namada::core::ethereum_events::EthereumEvent; use namada::core::ethereum_structs; use namada::core::hash::Hash; @@ -29,7 +31,9 @@ use namada::proof_of_stake::storage::{ validator_consensus_key_handle, }; use namada::proof_of_stake::types::WeightedValidator; -use namada::state::{LastBlock, Sha256Hasher, EPOCH_SWITCH_BLOCKS_DELAY}; +use namada::state::{ + LastBlock, Sha256Hasher, StorageRead, EPOCH_SWITCH_BLOCKS_DELAY, +}; use namada::tendermint::abci::response::Info; use namada::tendermint::abci::types::VoteInfo; use namada_sdk::queries::Client; @@ -233,7 +237,7 @@ pub fn mock_services(cfg: MockServicesCfg) -> MockServicesPackage { } /// Status of tx -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum NodeResults { /// Success Ok, @@ -253,6 +257,14 @@ pub struct MockNode { pub auto_drive_services: bool, } +impl Debug for MockNode { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MockNode") + .field("shell", &self.shell) + .finish() + } +} + impl Drop for MockNode { fn drop(&mut self) { unsafe { @@ -330,8 +342,10 @@ impl MockNode { locked.state.in_mem().get_last_block_height() + 1; locked.state.in_mem_mut().next_epoch_min_start_height = next_epoch_height; - locked.state.in_mem_mut().next_epoch_min_start_time = - DateTimeUtc::now(); + locked.state.in_mem_mut().next_epoch_min_start_time = { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + }; let next_epoch_min_start_height = locked.state.in_mem().next_epoch_min_start_height; if let Some(LastBlock { height, .. }) = @@ -354,6 +368,11 @@ impl MockNode { .0 } + pub fn native_token(&self) -> Address { + let locked = self.shell.lock().unwrap(); + locked.state.get_native_token().unwrap() + } + /// Get the address of the block proposer and the votes for the block fn prepare_request(&self) -> (Vec, Vec) { let (val1, ck) = { @@ -426,6 +445,7 @@ impl MockNode { hash: BlockHash([0u8; 32]), header: Header { hash: Hash([0; 32]), + #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), next_validators_hash: Hash([0; 32]), }, @@ -501,28 +521,13 @@ impl MockNode { ); } - /// Advance to a block height that allows - /// txs - fn advance_to_allowed_block(&self) { - loop { - let not_allowed = - { self.shell.lock().unwrap().encrypted_txs_not_allowed() }; - if not_allowed { - self.finalize_and_commit(); - } else { - break; - } - } - } - /// Send a tx through Process Proposal and Finalize Block /// and register the results. pub fn submit_txs(&self, txs: Vec>) { - // The block space allocator disallows encrypted txs in certain blocks. - // Advance to block height that allows txs. - self.advance_to_allowed_block(); + self.finalize_and_commit(); let (proposer_address, votes) = self.prepare_request(); + #[allow(clippy::disallowed_methods)] let time = DateTimeUtc::now(); let req = RequestProcessProposal { txs: txs.clone().into_iter().map(|tx| tx.into()).collect(), @@ -558,6 +563,7 @@ impl MockNode { hash: BlockHash([0u8; 32]), header: Header { hash: Hash([0; 32]), + #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), next_validators_hash: Hash([0; 32]), }, @@ -649,6 +655,14 @@ impl MockNode { .all(|r| *r == NodeResults::Ok) } + /// Return a tx result if the tx failed in mempool + pub fn is_broadcast_err(&self) -> Option { + self.results.lock().unwrap().iter().find_map(|r| match r { + NodeResults::Ok | NodeResults::Failed(_) => None, + NodeResults::Rejected(tx_result) => Some(tx_result.clone()), + }) + } + pub fn clear_results(&self) { self.results.lock().unwrap().clear(); } @@ -772,28 +786,15 @@ impl<'a> Client for &'a MockNode { }; let tx_bytes: Vec = tx.into(); self.submit_txs(vec![tx_bytes]); - if !self.success() { - // TODO: submit_txs should return the correct error code + message - resp.code = 1337.into(); - return Ok(resp); - } else { - self.clear_results(); - } - let (proposer_address, _) = self.prepare_request(); - let req = RequestPrepareProposal { - proposer_address: proposer_address.into(), - ..Default::default() - }; - let txs: Vec> = { - let locked = self.shell.lock().unwrap(); - locked.prepare_proposal(req).txs - } - .into_iter() - .map(|tx| tx.into()) - .collect(); - if !txs.is_empty() { - self.submit_txs(txs); + + // If the error happened during broadcasting, attach its result to + // response + if let Some(TxResult { code, info }) = self.is_broadcast_err() { + resp.code = code.into(); + resp.log = info; } + + self.clear_results(); Ok(resp) } @@ -972,7 +973,7 @@ fn parse_tm_query( query: namada::tendermint_rpc::query::Query, ) -> dumb_queries::QueryMatcher { const QUERY_PARSING_REGEX_STR: &str = - r"^tm\.event='NewBlock' AND (accepted|applied)\.hash='([^']+)'$"; + r"^tm\.event='NewBlock' AND applied\.hash='([^']+)'$"; lazy_static! { /// Compiled regular expression used to parse Tendermint queries. @@ -983,13 +984,10 @@ fn parse_tm_query( let captures = QUERY_PARSING_REGEX.captures(&query).unwrap(); match captures.get(0).unwrap().as_str() { - "accepted" => dumb_queries::QueryMatcher::accepted( - captures.get(1).unwrap().as_str().try_into().unwrap(), - ), "applied" => dumb_queries::QueryMatcher::applied( captures.get(1).unwrap().as_str().try_into().unwrap(), ), - _ => unreachable!("We only query accepted or applied txs"), + _ => unreachable!("We only query applied txs"), } } diff --git a/crates/apps/src/lib/node/ledger/shell/testing/utils.rs b/crates/apps/src/lib/node/ledger/shell/testing/utils.rs index c742559b84..52485b9d24 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/utils.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/utils.rs @@ -1,3 +1,4 @@ +use std::fmt::Display; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::pin::Pin; @@ -172,15 +173,29 @@ impl CapturedOutput { CapturedOutput::of(func) } - /// Check if the captured output contains the regex. - pub fn matches(&self, needle: regex::Regex) -> bool { - needle.captures(&self.output).is_some() + /// Return the first capture of the regex from the output. + pub fn matches(&self, needle: &str) -> Option<&str> { + let needle = regex::Regex::new(needle).unwrap(); + needle.find(&self.output).map(|x| x.as_str()) } /// Check if the captured output contains the string. pub fn contains(&self, needle: &str) -> bool { + self.matches(needle).is_some() + } +} + +impl CapturedOutput> { + pub fn err_contains(&self, needle: &str) -> bool { + if self.result.is_ok() { + return false; + } + let err_str = match self.result.as_ref() { + Ok(_) => unreachable!(), + Err(e) => e.to_string(), + }; let needle = regex::Regex::new(needle).unwrap(); - self.matches(needle) + needle.find(&err_str).is_some() } } diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs index 9bf9f9bd1b..fa6fdbfcf9 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -4,6 +4,7 @@ pub mod bridge_pool_vext; pub mod eth_events; pub mod val_set_update; +use drain_filter_polyfill::DrainFilter; use namada::ethereum_bridge::protocol::transactions::bridge_pool_roots::sign_bridge_pool_root; use namada::ethereum_bridge::protocol::transactions::ethereum_events::sign_ethereum_events; use namada::ethereum_bridge::protocol::transactions::validator_set_update::sign_validator_set_update; @@ -115,9 +116,10 @@ where /// ones we could deserialize to vote extension protocol txs. pub fn deserialize_vote_extensions<'shell>( &'shell self, - txs: &'shell [TxBytes], - ) -> impl Iterator + 'shell { - txs.iter().filter_map(move |tx_bytes| { + txs: &'shell mut Vec, + ) -> DrainFilter<'shell, TxBytes, impl FnMut(&mut TxBytes) -> bool + 'shell> + { + drain_filter_polyfill::VecExt::drain_filter(txs, move |tx_bytes| { let tx = match Tx::try_from(tx_bytes.as_ref()) { Ok(tx) => tx, Err(err) => { @@ -126,25 +128,21 @@ where "Failed to deserialize tx in \ deserialize_vote_extensions" ); - return None; + return false; } }; - match (&tx).try_into().ok()? { - EthereumTxData::BridgePoolVext(_) => Some(tx_bytes.clone()), - EthereumTxData::EthEventsVext(ext) => { + match (&tx).try_into().ok() { + Some(EthereumTxData::BridgePoolVext(_)) => true, + Some(EthereumTxData::EthEventsVext(ext)) => { // NB: only propose events with at least // one valid nonce - ext.data - .ethereum_events - .iter() - .any(|event| { - self.state - .ethbridge_queries() - .validate_eth_event_nonce(event) - }) - .then(|| tx_bytes.clone()) + ext.data.ethereum_events.iter().any(|event| { + self.state + .ethbridge_queries() + .validate_eth_event_nonce(event) + }) } - EthereumTxData::ValSetUpdateVext(ext) => { + Some(EthereumTxData::ValSetUpdateVext(ext)) => { // only include non-stale validator set updates // in block proposals. it might be sitting long // enough in the mempool for it to no longer be @@ -154,13 +152,13 @@ where // to remove it from the mempool this way, but it // will eventually be evicted, getting replaced // by newer txs. - (!self + let is_seen = self .state .ethbridge_queries() - .valset_upd_seen(ext.data.signing_epoch.next())) - .then(|| tx_bytes.clone()) + .valset_upd_seen(ext.data.signing_epoch.next()); + !is_seen } - _ => None, + _ => false, } }) } diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index c674d82fc9..de990eac74 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -58,6 +58,7 @@ mod test_bp_vote_extensions { use namada::core::key::*; use namada::core::storage::BlockHeight; use namada::core::token; + use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::ethereum_bridge::protocol::validation::bridge_pool_roots::validate_bp_roots_vext; use namada::ethereum_bridge::storage::bridge_pool::get_key_from_hash; use namada::ethereum_bridge::storage::eth_bridge_queries::EthBridgeQueries; @@ -175,6 +176,11 @@ mod test_bp_vote_extensions { /// payload passes validation. #[test] fn test_happy_flow() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _broadcaster, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell @@ -215,6 +221,11 @@ mod test_bp_vote_extensions { /// in a block proposal by validator address. #[test] fn test_vexts_are_de_duped() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _broadcaster, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell @@ -250,6 +261,11 @@ mod test_bp_vote_extensions { /// even if the vext is signed by a validator #[test] fn test_bp_roots_must_be_signed_by_validator() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _broadcaster, _, _oracle_control_recv) = setup_at_height(1u64); let signing_key = gen_keypair(); @@ -284,6 +300,11 @@ mod test_bp_vote_extensions { /// are from the same validator. #[test] fn test_bp_root_sigs_from_same_validator() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _broadcaster, _, _oracle_control_recv) = setup_at_height(3u64); let address = shell @@ -343,6 +364,11 @@ mod test_bp_vote_extensions { /// block height as greater than the latest block height is rejected. #[test] fn test_block_height_too_high() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup_at_height(3u64); reject_incorrect_block_number( shell.state.in_mem().get_last_block_height() + 1, @@ -354,6 +380,11 @@ mod test_bp_vote_extensions { /// issued at genesis. #[test] fn test_reject_genesis_vexts() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup(); reject_incorrect_block_number(0.into(), &shell); } @@ -362,6 +393,11 @@ mod test_bp_vote_extensions { /// if the nonce is incorrect. #[test] fn test_incorrect_nonce() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup(); let address = shell.mode.get_validator_address().unwrap().clone(); let to_sign = get_bp_bytes_to_sign(); @@ -390,6 +426,11 @@ mod test_bp_vote_extensions { /// if the root is incorrect. #[test] fn test_incorrect_root() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup(); let address = shell.mode.get_validator_address().unwrap().clone(); let to_sign = get_bp_bytes_to_sign(); @@ -418,6 +459,11 @@ mod test_bp_vote_extensions { /// prior. #[test] fn test_vext_for_old_height() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell.mode.get_validator_address().unwrap().clone(); shell.state.in_mem_mut().block.height = 2.into(); @@ -493,6 +539,11 @@ mod test_bp_vote_extensions { /// we reject. #[test] fn test_wrong_height_for_root() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell.mode.get_validator_address().unwrap().clone(); shell.state.in_mem_mut().block.height = 2.into(); diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 881e25e278..d70d0c9d03 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -1,8 +1,9 @@ //! Extend Tendermint votes with Ethereum events seen by a quorum of validators. -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use namada::vote_ext::ethereum_events::MultiSignedEthEvent; +use namada_sdk::collections::HashMap; use super::*; @@ -135,7 +136,6 @@ where #[cfg(test)] mod test_vote_extensions { - use borsh_ext::BorshSerializeExt; use namada::core::address::testing::gen_established_address; use namada::core::ethereum_events::{ @@ -145,6 +145,7 @@ mod test_vote_extensions { use namada::core::key::*; use namada::core::storage::{Epoch, InnerEthEventsQueue}; use namada::eth_bridge::storage::bridge_pool; + use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::{ @@ -274,6 +275,11 @@ mod test_vote_extensions { /// done #[test] fn test_get_eth_events() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _, oracle, _) = setup(); let event_1 = EthereumEvent::TransfersToEthereum { nonce: 0.into(), @@ -344,6 +350,11 @@ mod test_vote_extensions { /// Test that Ethereum events signed by a non-validator are rejected #[test] fn test_eth_events_must_be_signed_by_validator() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup_at_height(3u64); let signing_key = gen_keypair(); let address = shell @@ -383,6 +394,11 @@ mod test_vote_extensions { /// change to the validator set. #[test] fn test_validate_eth_events_vexts() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(3u64); let signing_key = shell.mode.get_protocol_key().expect("Test failed").clone(); @@ -495,6 +511,11 @@ mod test_vote_extensions { /// greater than latest block height. #[test] fn reject_incorrect_block_number() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup_at_height(3u64); let address = shell.mode.get_validator_address().unwrap().clone(); #[allow(clippy::redundant_clone)] @@ -531,6 +552,11 @@ mod test_vote_extensions { /// issued at genesis #[test] fn test_reject_genesis_vexts() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _, _, _) = setup(); let address = shell.mode.get_validator_address().unwrap().clone(); #[allow(clippy::redundant_clone)] diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 1eba475c22..e71c3ba2cd 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -1,7 +1,7 @@ //! Extend Tendermint votes with validator set updates, to be relayed to //! Namada's Ethereum bridge smart contracts. -use std::collections::HashMap; +use namada::core::collections::HashMap; use super::*; @@ -110,6 +110,7 @@ where #[cfg(test)] mod test_vote_extensions { use namada::core::key::RefTo; + use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::{ consensus_validator_set_handle, @@ -131,6 +132,11 @@ mod test_vote_extensions { /// epoch it was included on in a vote extension is rejected #[test] fn test_reject_incorrect_epoch() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _recv, _, _) = test_utils::setup(); let validator_addr = shell.mode.get_validator_address().unwrap().clone(); @@ -174,6 +180,11 @@ mod test_vote_extensions { /// a non-validator are rejected #[test] fn test_valset_upd_must_be_signed_by_validator() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _recv, _, _) = test_utils::setup(); let (eth_bridge_key, _protocol_key, validator_addr) = { let bertha_key = wallet::defaults::bertha_keypair(); @@ -216,6 +227,11 @@ mod test_vote_extensions { /// change to the validator set. #[test] fn test_validate_valset_upd_vexts() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (mut shell, _recv, _, _oracle_control_recv) = test_utils::setup(); // validators from the current epoch sign over validator @@ -341,6 +357,11 @@ mod test_vote_extensions { /// is rejected #[test] fn test_reject_bad_signatures() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } let (shell, _recv, _, _) = test_utils::setup(); let validator_addr = shell.mode.get_validator_address().unwrap().clone(); diff --git a/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 11788fe9d1..d7e1231ad1 100644 --- a/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -143,7 +143,7 @@ impl AbcippShim { proposer from tendermint raw hash", ); - let (processing_results, _) = self.service.process_txs( + let processing_results = self.service.process_txs( &self.delivered_txs, block_time, &block_proposer, @@ -315,8 +315,10 @@ impl AbciService { .map(|res| { // emit a log line stating that we are sleeping until // genesis. + #[allow(clippy::disallowed_methods)] + let now = Utc::now(); if let Some(Ok(sleep_time)) = genesis_time - .map(|t| t.0.signed_duration_since(Utc::now()).to_std()) + .map(|t| t.0.signed_duration_since(now).to_std()) { if !sleep_time.is_zero() { tracing::info!( diff --git a/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs b/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs index 2038beaf19..8f99c7c5bf 100644 --- a/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs +++ b/crates/apps/src/lib/node/ledger/shims/abcipp_shim_types.rs @@ -218,7 +218,7 @@ pub mod shim { #[derive(Debug, Default)] pub struct VerifyHeader; - #[derive(Debug, Default, Clone, PartialEq, Eq)] + #[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] pub struct TxResult { pub code: u32, pub info: String, diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 2e1c383ca4..8dcc303e37 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -53,11 +53,10 @@ fn new_blake2b() -> Blake2b { #[cfg(test)] mod tests { - use std::collections::HashMap; - use borsh::BorshDeserialize; use itertools::Itertools; use namada::core::chain::ChainId; + use namada::core::collections::HashMap; use namada::core::ethereum_events::Uint; use namada::core::hash::Hash; use namada::core::keccak::KeccakHash; @@ -167,11 +166,10 @@ mod tests { implicit_vp_code_hash: Default::default(), epochs_per_year: 365, max_signatures_per_transaction: 10, - staked_ratio: Default::default(), - pos_inflation_amount: Default::default(), fee_unshielding_gas_limit: 0, fee_unshielding_descriptions_limit: 0, minimum_gas_price: Default::default(), + is_native_token_transferable: true, }; parameters::init_storage(¶ms, &mut state).expect("Test failed"); // insert and commit @@ -473,11 +471,6 @@ mod tests { let mut batch = PersistentState::batch(); for (height, key, write_type) in blocks_write_type.clone() { if height != state.in_mem().block.height { - // to check the root later - roots.insert( - state.in_mem().block.height, - state.in_mem().merkle_root(), - ); if state.in_mem().block.height.0 % 5 == 0 { // new epoch every 5 heights state.in_mem_mut().block.epoch = @@ -486,6 +479,11 @@ mod tests { state.in_mem_mut().block.pred_epochs.new_epoch(height); } state.commit_block_from_batch(batch)?; + // to check the root later + roots.insert( + state.in_mem().block.height, + state.in_mem().merkle_root(), + ); let hash = BlockHash::default(); let next_height = state.in_mem().block.height.next_height(); state.in_mem_mut().begin_block(hash, next_height)?; @@ -779,10 +777,6 @@ mod tests { Key::parse("testing2").unwrap() } - fn merkle_tree_key_filter(key: &Key) -> bool { - key == &test_key_1() - } - #[test] fn test_persistent_storage_writing_without_merklizing_or_diffs() { let db_path = @@ -793,7 +787,8 @@ mod tests { ChainId::default(), address::testing::nam(), None, - merkle_tree_key_filter, + // Only merkelize and persist diffs for `test_key_1` + |key: &Key| -> bool { key == &test_key_1() }, ); // Start the first block let first_height = BlockHeight::first(); @@ -869,12 +864,12 @@ mod tests { // need to have diffs for at least 1 block for rollback purposes let res2 = state .db() - .read_diffs_val(&key2, first_height, true) + .read_rollback_val(&key2, first_height, true) .unwrap(); assert!(res2.is_none()); let res2 = state .db() - .read_diffs_val(&key2, first_height, false) + .read_rollback_val(&key2, first_height, false) .unwrap() .unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); @@ -932,12 +927,12 @@ mod tests { // Check that key-val-2 diffs don't exist for block 0 anymore let res2 = state .db() - .read_diffs_val(&key2, first_height, true) + .read_rollback_val(&key2, first_height, true) .unwrap(); assert!(res2.is_none()); let res2 = state .db() - .read_diffs_val(&key2, first_height, false) + .read_rollback_val(&key2, first_height, false) .unwrap(); assert!(res2.is_none()); @@ -945,14 +940,14 @@ mod tests { // val2 and no "new" value let res2 = state .db() - .read_diffs_val(&key2, second_height, true) + .read_rollback_val(&key2, second_height, true) .unwrap() .unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); let res2 = state .db() - .read_diffs_val(&key2, second_height, false) + .read_rollback_val(&key2, second_height, false) .unwrap(); assert!(res2.is_none()); } diff --git a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs index 9e7f642086..aadea16b28 100644 --- a/crates/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/crates/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -7,24 +7,27 @@ //! - `eth_events_queue`: a queue of confirmed ethereum events to be processed //! in order //! - `height`: the last committed block height -//! - `tx_queue`: txs to be decrypted in the next block //! - `next_epoch_min_start_height`: minimum block height from which the next //! epoch can start //! - `next_epoch_min_start_time`: minimum block time from which the next //! epoch can start -//! - `update_epoch_blocks_delay`: number of missing blocks before updating -//! PoS with CometBFT //! - `pred`: predecessor values of the top-level keys of the same name -//! - `tx_queue` //! - `next_epoch_min_start_height` //! - `next_epoch_min_start_time` +//! - `commit_only_data_commitment` //! - `update_epoch_blocks_delay` //! - `conversion_state`: MASP conversion state //! - `subspace`: accounts sub-spaces //! - `{address}/{dyn}`: any byte data associated with accounts -//! - `diffs`: diffs in account subspaces' key-vals -//! - `new/{dyn}`: value set in block height `h` -//! - `old/{dyn}`: value from predecessor block height +//! - `diffs`: diffs in account subspaces' key-vals modified with `persist_diff +//! == true` +//! - `{height}/new/{dyn}`: value set in block height `h` +//! - `{height}/old/{dyn}`: value from predecessor block height +//! - `rollback`: diffs in account subspaces' key-vals for keys modified with +//! `persist_diff == false` which are only kept for 1 block to support +//! rollback +//! - `{height}/new/{dyn}`: value set in block height `h` +//! - `{height}/old/{dyn}`: value from predecessor block height //! - `block`: block state //! - `results/{h}`: block results at height `h` //! - `h`: for each block at height `h`: @@ -58,19 +61,22 @@ use namada::core::time::DateTimeUtc; use namada::core::{decode, encode, ethereum_events, ethereum_structs}; use namada::eth_bridge::storage::proof::BridgePoolRootProof; use namada::ledger::eth_bridge::storage::bridge_pool; -use namada::ledger::storage::tx_queue::TxQueue; use namada::replay_protection; -use namada::state::merkle_tree::{base_tree_key_prefix, subtree_key_prefix}; +use namada::state::merkle_tree::{ + tree_key_prefix_with_epoch, tree_key_prefix_with_height, +}; use namada::state::{ BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, DbError as Error, DbResult as Result, MerkleTreeStoresRead, PatternIterator, PrefixIterator, StoreType, DB, }; use namada::storage::{ - DbColFam, BLOCK_CF, DIFFS_CF, REPLAY_PROTECTION_CF, STATE_CF, SUBSPACE_CF, + DbColFam, BLOCK_CF, DIFFS_CF, REPLAY_PROTECTION_CF, ROLLBACK_CF, STATE_CF, + SUBSPACE_CF, }; use namada::token::ConversionState; use namada_sdk::migrations::DBUpdateVisitor; +use namada_sdk::storage::types::CommitOnlyData; use rayon::prelude::*; use regex::Regex; use rocksdb::{ @@ -160,6 +166,14 @@ pub fn open( diffs_cf_opts.set_block_based_table_factory(&table_opts); cfs.push(ColumnFamilyDescriptor::new(DIFFS_CF, diffs_cf_opts)); + // for non-persisted diffs for rollback (read/update-intensive) + let mut rollback_cf_opts = Options::default(); + rollback_cf_opts.set_compression_type(DBCompressionType::Zstd); + rollback_cf_opts.set_compression_options(0, 0, 0, 1024 * 1024); + rollback_cf_opts.set_compaction_style(DBCompactionStyle::Level); + rollback_cf_opts.set_block_based_table_factory(&table_opts); + cfs.push(ColumnFamilyDescriptor::new(ROLLBACK_CF, rollback_cf_opts)); + // for the ledger state (update-intensive) let mut state_cf_opts = Options::default(); // No compression since the size of the state is small @@ -217,7 +231,11 @@ impl RocksDB { new_value: Option<&[u8]>, persist_diffs: bool, ) -> Result<()> { - let cf = self.get_column_family(DIFFS_CF)?; + let cf = if persist_diffs { + self.get_column_family(DIFFS_CF)? + } else { + self.get_column_family(ROLLBACK_CF)? + }; let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; if let Some(old_value) = old_value { @@ -231,39 +249,6 @@ impl RocksDB { .put_cf(cf, new_val_key, new_value) .map_err(|e| Error::DBError(e.into_string()))?; } - - // If not persisting the diffs, remove the last diffs. - if !persist_diffs && height > BlockHeight::first() { - let mut height = height.prev_height(); - while height >= BlockHeight::first() { - let (old_diff_key, new_diff_key) = - old_and_new_diff_key(key, height)?; - let has_old_diff = self - .0 - .get_cf(cf, &old_diff_key) - .map_err(|e| Error::DBError(e.into_string()))? - .is_some(); - let has_new_diff = self - .0 - .get_cf(cf, &new_diff_key) - .map_err(|e| Error::DBError(e.into_string()))? - .is_some(); - if has_old_diff { - self.0 - .delete_cf(cf, old_diff_key) - .map_err(|e| Error::DBError(e.into_string()))?; - } - if has_new_diff { - self.0 - .delete_cf(cf, new_diff_key) - .map_err(|e| Error::DBError(e.into_string()))?; - } - if has_old_diff || has_new_diff { - break; - } - height = height.prev_height(); - } - } Ok(()) } @@ -278,7 +263,11 @@ impl RocksDB { new_value: Option<&[u8]>, persist_diffs: bool, ) -> Result<()> { - let cf = self.get_column_family(DIFFS_CF)?; + let cf = if persist_diffs { + self.get_column_family(DIFFS_CF)? + } else { + self.get_column_family(ROLLBACK_CF)? + }; let (old_val_key, new_val_key) = old_and_new_diff_key(key, height)?; if let Some(old_value) = old_value { @@ -288,35 +277,6 @@ impl RocksDB { if let Some(new_value) = new_value { batch.0.put_cf(cf, new_val_key, new_value); } - - // If not persisting the diffs, remove the last diffs. - if !persist_diffs && height > BlockHeight::first() { - let mut height = height.prev_height(); - while height >= BlockHeight::first() { - let (old_diff_key, new_diff_key) = - old_and_new_diff_key(key, height)?; - let has_old_diff = self - .0 - .get_cf(cf, &old_diff_key) - .map_err(|e| Error::DBError(e.into_string()))? - .is_some(); - let has_new_diff = self - .0 - .get_cf(cf, &new_diff_key) - .map_err(|e| Error::DBError(e.into_string()))? - .is_some(); - if has_old_diff { - batch.0.delete_cf(cf, old_diff_key); - } - if has_new_diff { - batch.0.delete_cf(cf, new_diff_key); - } - if has_old_diff || has_new_diff { - break; - } - height = height.prev_height(); - } - } Ok(()) } @@ -516,8 +476,8 @@ impl RocksDB { for metadata_key in [ "next_epoch_min_start_height", "next_epoch_min_start_time", + "commit_only_data_commitment", "update_epoch_blocks_delay", - "tx_queue", ] { let previous_key = format!("pred/{}", metadata_key); let previous_value = self @@ -602,6 +562,10 @@ impl RocksDB { }, )?; + let mut batch = batch.into_inner().unwrap(); + + let subspace_cf = self.get_column_family(SUBSPACE_CF)?; + let diffs_cf = self.get_column_family(DIFFS_CF)?; // Look for diffs in this block to find what has been deleted let diff_new_key_prefix = Key { segments: vec![ @@ -609,24 +573,46 @@ impl RocksDB { NEW_DIFF_PREFIX.to_string().to_db_key(), ], }; + for (key_str, val, _) in + iter_diffs_prefix(self, diffs_cf, last_block.height, None, true) { - let mut batch_guard = batch.lock().unwrap(); - let subspace_cf = self.get_column_family(SUBSPACE_CF)?; - for (key, val, _) in - iter_diffs_prefix(self, last_block.height, None, true) - { - let key = Key::parse(key).unwrap(); - let diff_new_key = diff_new_key_prefix.join(&key); - if self.read_subspace_val(&diff_new_key)?.is_none() { - // If there is no new value, it has been deleted in this - // block and we have to restore it - batch_guard.put_cf(subspace_cf, key.to_string(), val) - } + let key = Key::parse(&key_str).unwrap(); + let diff_new_key = diff_new_key_prefix.join(&key); + if self.read_subspace_val(&diff_new_key)?.is_none() { + // If there is no new value, it has been deleted in this + // block and we have to restore it + batch.put_cf(subspace_cf, key_str, val) + } + } + + // Look for non-persisted diffs for rollback + let rollback_cf = self.get_column_family(ROLLBACK_CF)?; + + // We don't need deterministic iter order + #[allow(clippy::disallowed_types)] + // Iterate the old keys first and keep a set of keys that have old val + let mut keys_with_old_value = + std::collections::HashSet::::new(); + for (key_str, val, _) in + iter_diffs_prefix(self, rollback_cf, last_block.height, None, true) + { + // If there is no new value, it has been deleted in this + // block and we have to restore it + keys_with_old_value.insert(key_str.clone()); + batch.put_cf(subspace_cf, key_str, val) + } + // Then the new keys + for (key_str, _val, _) in + iter_diffs_prefix(self, rollback_cf, last_block.height, None, false) + { + if !keys_with_old_value.contains(&key_str) { + // If there was no old value it means that the key was newly + // written in the last block and we have to delete it + batch.delete_cf(subspace_cf, key_str) } } tracing::info!("Deleting keys prepended with the last height"); - let mut batch = batch.into_inner().unwrap(); let prefix = last_block.height.to_string(); let mut delete_keys = |cf: &ColumnFamily| { let read_opts = make_iter_read_opts(Some(prefix.clone())); @@ -652,6 +638,27 @@ impl RocksDB { tracing::info!("Flushing restored state to disk"); self.exec_batch(batch) } + + /// Read diffs of non-persisted key-vals that are only kept for rollback of + /// one block height. + #[cfg(test)] + pub fn read_rollback_val( + &self, + key: &Key, + height: BlockHeight, + is_old: bool, + ) -> Result>> { + let rollback_cf = self.get_column_family(ROLLBACK_CF)?; + let key = if is_old { + old_and_new_diff_key(key, height)?.0 + } else { + old_and_new_diff_key(key, height)?.1 + }; + + self.0 + .get_cf(rollback_cf, key) + .map_err(|e| Error::DBError(e.into_string())) + } } impl DB for RocksDB { @@ -741,25 +748,27 @@ impl DB for RocksDB { return Ok(None); } }; - let conversion_state: ConversionState = match self + let commit_only_data: CommitOnlyData = match self .0 - .get_cf(state_cf, "conversion_state") + .get_cf(state_cf, "commit_only_data_commitment") .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { - tracing::error!("Couldn't load conversion state from the DB"); + tracing::error!( + "Couldn't load commit only data commitment from the DB" + ); return Ok(None); } }; - let tx_queue: TxQueue = match self + let conversion_state: ConversionState = match self .0 - .get_cf(state_cf, "tx_queue") + .get_cf(state_cf, "conversion_state") .map_err(|e| Error::DBError(e.into_string()))? { Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => { - tracing::error!("Couldn't load tx queue from the DB"); + tracing::error!("Couldn't load conversion state from the DB"); return Ok(None); } }; @@ -823,7 +832,8 @@ impl DB for RocksDB { path.split(KEY_SEGMENT_SEPARATOR).collect(); match segments.get(1) { Some(prefix) => match *prefix { - // Restore the base tree of Merkle tree + // Restore the base tree and the CommitData tree of Merkle + // tree "tree" => match segments.get(2) { Some(s) => { let st = StoreType::from_str(s)?; @@ -868,7 +878,11 @@ impl DB for RocksDB { // Restore subtrees of Merkle tree if let Some(epoch) = epoch { for st in StoreType::iter_subtrees() { - let key_prefix = subtree_key_prefix(st, epoch); + if *st == StoreType::CommitData { + // CommitData tree has been already restored + continue; + } + let key_prefix = tree_key_prefix_with_epoch(st, epoch); let root_key = key_prefix.clone().with_segment("root".to_owned()); if let Some(bytes) = self @@ -911,9 +925,9 @@ impl DB for RocksDB { next_epoch_min_start_time, update_epoch_blocks_delay, address_gen, - tx_queue, ethereum_height, eth_events_queue, + commit_only_data, })), _ => Err(Error::Temporary { error: "Essential data couldn't be read from the DB" @@ -942,9 +956,9 @@ impl DB for RocksDB { address_gen, results, conversion_state, - tx_queue, ethereum_height, eth_events_queue, + commit_only_data, }: BlockStateWrite = state; // Epoch start height and time @@ -1002,6 +1016,25 @@ impl DB for RocksDB { encode(&update_epoch_blocks_delay), ); + // Commitment to the only data commit + if let Some(current_value) = self + .0 + .get_cf(state_cf, "commit_only_data_commitment") + .map_err(|e| Error::DBError(e.into_string()))? + { + // Write the predecessor value for rollback + batch.0.put_cf( + state_cf, + "pred/commit_only_data_commitment", + current_value, + ); + } + batch.0.put_cf( + state_cf, + "commit_only_data_commitment", + commit_only_data.serialize(), + ); + // Save the conversion state when the epoch is updated if is_full_commit { if let Some(current_value) = self @@ -1023,16 +1056,6 @@ impl DB for RocksDB { ); } - // Tx queue - if let Some(pred_tx_queue) = self - .0 - .get_cf(state_cf, "tx_queue") - .map_err(|e| Error::DBError(e.into_string()))? - { - // Write the predecessor value for rollback - batch.0.put_cf(state_cf, "pred/tx_queue", pred_tx_queue); - } - batch.0.put_cf(state_cf, "tx_queue", encode(&tx_queue)); batch .0 .put_cf(state_cf, "ethereum_height", encode(ðereum_height)); @@ -1041,15 +1064,18 @@ impl DB for RocksDB { .put_cf(state_cf, "eth_events_queue", encode(ð_events_queue)); let block_cf = self.get_column_family(BLOCK_CF)?; - let prefix_key = Key::from(height.to_db_key()); // Merkle tree { for st in StoreType::iter() { - if *st == StoreType::Base || is_full_commit { - let key_prefix = if *st == StoreType::Base { - base_tree_key_prefix(height) - } else { - subtree_key_prefix(st, epoch) + if *st == StoreType::Base + || *st == StoreType::CommitData + || is_full_commit + { + let key_prefix = match st { + StoreType::Base | StoreType::CommitData => { + tree_key_prefix_with_height(st, height) + } + _ => tree_key_prefix_with_epoch(st, epoch), }; let root_key = key_prefix.clone().with_segment("root".to_owned()); @@ -1067,7 +1093,9 @@ impl DB for RocksDB { } } } + // Block header + let prefix_key = Key::from(height.to_db_key()); { if let Some(h) = header { let key = prefix_key @@ -1162,10 +1190,11 @@ impl DB for RocksDB { .map(|st| Either::Left(std::iter::once(st))) .unwrap_or_else(|| Either::Right(StoreType::iter())); for st in store_types { - let key_prefix = if *st == StoreType::Base { - base_tree_key_prefix(base_height) - } else { - subtree_key_prefix(st, epoch) + let key_prefix = match st { + StoreType::Base | StoreType::CommitData => { + tree_key_prefix_with_height(st, base_height) + } + _ => tree_key_prefix_with_epoch(st, epoch), }; let root_key = key_prefix.clone().with_segment("root".to_owned()); let bytes = self @@ -1498,7 +1527,7 @@ impl DB for RocksDB { epoch: Epoch, ) -> Result<()> { let block_cf = self.get_column_family(BLOCK_CF)?; - let key_prefix = subtree_key_prefix(store_type, epoch); + let key_prefix = tree_key_prefix_with_epoch(store_type, epoch); let root_key = key_prefix.clone().with_segment("root".to_owned()); batch.0.delete_cf(block_cf, root_key.to_string()); let store_key = key_prefix.with_segment("store".to_owned()); @@ -1572,6 +1601,39 @@ impl DB for RocksDB { Ok(()) } + fn prune_non_persisted_diffs( + &mut self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + ) -> Result<()> { + let rollback_cf = self.get_column_family(ROLLBACK_CF)?; + + let diff_old_key_prefix = Key { + segments: vec![ + height.to_db_key(), + OLD_DIFF_PREFIX.to_string().to_db_key(), + ], + }; + for (key_str, _val, _) in + iter_prefix(self, rollback_cf, None, Some(&diff_old_key_prefix)) + { + batch.0.delete_cf(rollback_cf, key_str) + } + + let diff_new_key_prefix = Key { + segments: vec![ + height.to_db_key(), + NEW_DIFF_PREFIX.to_string().to_db_key(), + ], + }; + for (key_str, _val, _) in + iter_prefix(self, rollback_cf, None, Some(&diff_new_key_prefix)) + { + batch.0.delete_cf(rollback_cf, key_str) + } + Ok(()) + } + #[inline] fn overwrite_entry( &self, @@ -1765,7 +1827,10 @@ impl<'iter> DBIter<'iter> for RocksDB { height: BlockHeight, prefix: Option<&'iter Key>, ) -> PersistentPrefixIterator<'iter> { - iter_diffs_prefix(self, height, prefix, true) + let diffs_cf = self + .get_column_family(DIFFS_CF) + .expect("{DIFFS_CF} column family should exist"); + iter_diffs_prefix(self, diffs_cf, height, prefix, true) } fn iter_new_diffs( @@ -1773,7 +1838,10 @@ impl<'iter> DBIter<'iter> for RocksDB { height: BlockHeight, prefix: Option<&'iter Key>, ) -> PersistentPrefixIterator<'iter> { - iter_diffs_prefix(self, height, prefix, false) + let diffs_cf = self + .get_column_family(DIFFS_CF) + .expect("{DIFFS_CF} column family should exist"); + iter_diffs_prefix(self, diffs_cf, height, prefix, false) } fn iter_replay_protection(&'iter self) -> Self::PrefixIter { @@ -1820,13 +1888,11 @@ fn iter_subspace_pattern<'iter>( fn iter_diffs_prefix<'a>( db: &'a RocksDB, + cf: &'a ColumnFamily, height: BlockHeight, prefix: Option<&Key>, is_old: bool, ) -> PersistentPrefixIterator<'a> { - let diffs_cf = db - .get_column_family(DIFFS_CF) - .expect("{DIFFS_CF} column family should exist"); let kind = if is_old { OLD_DIFF_PREFIX } else { @@ -1838,7 +1904,7 @@ fn iter_diffs_prefix<'a>( .unwrap(), ); // get keys without the `stripped_prefix` - iter_prefix(db, diffs_cf, stripped_prefix.as_ref(), prefix) + iter_prefix(db, cf, stripped_prefix.as_ref(), prefix) } /// Create an iterator over key-vals in the given CF matching the given @@ -2237,174 +2303,191 @@ mod test { #[test] fn test_rollback() { - let dir = tempdir().unwrap(); - let mut db = open(dir.path(), None).unwrap(); - - // A key that's gonna be added on a second block - let add_key = Key::parse("add").unwrap(); - // A key that's gonna be deleted on a second block - let delete_key = Key::parse("delete").unwrap(); - // A key that's gonna be overwritten on a second block - let overwrite_key = Key::parse("overwrite").unwrap(); - - // Write first block - let mut batch = RocksDB::batch(); - let height_0 = BlockHeight(100); - let mut pred_epochs = Epochs::default(); - pred_epochs.new_epoch(height_0); - let conversion_state_0 = ConversionState::default(); - let to_delete_val = vec![1_u8, 1, 0, 0]; - let to_overwrite_val = vec![1_u8, 1, 1, 0]; - db.batch_write_subspace_val( - &mut batch, - height_0, - &delete_key, - &to_delete_val, - true, - ) - .unwrap(); - db.batch_write_subspace_val( - &mut batch, - height_0, - &overwrite_key, - &to_overwrite_val, - true, - ) - .unwrap(); - for tx in [b"tx1", b"tx2"] { - db.write_replay_protection_entry( + for persist_diffs in [true, false] { + println!("Running with persist_diffs: {persist_diffs}"); + + let dir = tempdir().unwrap(); + let mut db = open(dir.path(), None).unwrap(); + + // A key that's gonna be added on a second block + let add_key = Key::parse("add").unwrap(); + // A key that's gonna be deleted on a second block + let delete_key = Key::parse("delete").unwrap(); + // A key that's gonna be overwritten on a second block + let overwrite_key = Key::parse("overwrite").unwrap(); + + // Write first block + let mut batch = RocksDB::batch(); + let height_0 = BlockHeight(100); + let mut pred_epochs = Epochs::default(); + pred_epochs.new_epoch(height_0); + let conversion_state_0 = ConversionState::default(); + let to_delete_val = vec![1_u8, 1, 0, 0]; + let to_overwrite_val = vec![1_u8, 1, 1, 0]; + db.batch_write_subspace_val( &mut batch, - &replay_protection::all_key(&Hash::sha256(tx)), + height_0, + &delete_key, + &to_delete_val, + persist_diffs, ) .unwrap(); - db.write_replay_protection_entry( + db.batch_write_subspace_val( &mut batch, - &replay_protection::buffer_key(&Hash::sha256(tx)), + height_0, + &overwrite_key, + &to_overwrite_val, + persist_diffs, ) .unwrap(); - } + for tx in [b"tx1", b"tx2"] { + db.write_replay_protection_entry( + &mut batch, + &replay_protection::all_key(&Hash::sha256(tx)), + ) + .unwrap(); + db.write_replay_protection_entry( + &mut batch, + &replay_protection::buffer_key(&Hash::sha256(tx)), + ) + .unwrap(); + } - for tx in [b"tx3", b"tx4"] { - db.write_replay_protection_entry( + for tx in [b"tx3", b"tx4"] { + db.write_replay_protection_entry( + &mut batch, + &replay_protection::last_key(&Hash::sha256(tx)), + ) + .unwrap(); + } + + add_block_to_batch( + &db, &mut batch, - &replay_protection::last_key(&Hash::sha256(tx)), + height_0, + Epoch(1), + pred_epochs.clone(), + &conversion_state_0, ) .unwrap(); - } - - add_block_to_batch( - &db, - &mut batch, - height_0, - Epoch(1), - pred_epochs.clone(), - &conversion_state_0, - ) - .unwrap(); - db.exec_batch(batch.0).unwrap(); - - // Write second block - let mut batch = RocksDB::batch(); - let height_1 = BlockHeight(101); - pred_epochs.new_epoch(height_1); - let conversion_state_1 = ConversionState::default(); - let add_val = vec![1_u8, 0, 0, 0]; - let overwrite_val = vec![1_u8, 1, 1, 1]; - db.batch_write_subspace_val( - &mut batch, height_1, &add_key, &add_val, true, - ) - .unwrap(); - db.batch_write_subspace_val( - &mut batch, - height_1, - &overwrite_key, - &overwrite_val, - true, - ) - .unwrap(); - db.batch_delete_subspace_val(&mut batch, height_1, &delete_key, true) + db.exec_batch(batch.0).unwrap(); + + // Write second block + let mut batch = RocksDB::batch(); + let height_1 = BlockHeight(101); + pred_epochs.new_epoch(height_1); + let conversion_state_1 = ConversionState::default(); + let add_val = vec![1_u8, 0, 0, 0]; + let overwrite_val = vec![1_u8, 1, 1, 1]; + db.batch_write_subspace_val( + &mut batch, + height_1, + &add_key, + &add_val, + persist_diffs, + ) .unwrap(); - - db.prune_replay_protection_buffer(&mut batch).unwrap(); - db.write_replay_protection_entry( - &mut batch, - &replay_protection::all_key(&Hash::sha256(b"tx3")), - ) - .unwrap(); - - for tx in [b"tx3", b"tx4"] { - db.delete_replay_protection_entry( + db.batch_write_subspace_val( &mut batch, - &replay_protection::last_key(&Hash::sha256(tx)), + height_1, + &overwrite_key, + &overwrite_val, + persist_diffs, ) .unwrap(); - db.write_replay_protection_entry( + db.batch_delete_subspace_val( &mut batch, - &replay_protection::buffer_key(&Hash::sha256(tx)), + height_1, + &delete_key, + persist_diffs, ) .unwrap(); - } - for tx in [b"tx5", b"tx6"] { + db.prune_replay_protection_buffer(&mut batch).unwrap(); db.write_replay_protection_entry( &mut batch, - &replay_protection::last_key(&Hash::sha256(tx)), + &replay_protection::all_key(&Hash::sha256(b"tx3")), ) .unwrap(); - } - - add_block_to_batch( - &db, - &mut batch, - height_1, - Epoch(2), - pred_epochs, - &conversion_state_1, - ) - .unwrap(); - db.exec_batch(batch.0).unwrap(); - - // Check that the values are as expected from second block - let added = db.read_subspace_val(&add_key).unwrap(); - assert_eq!(added, Some(add_val)); - let overwritten = db.read_subspace_val(&overwrite_key).unwrap(); - assert_eq!(overwritten, Some(overwrite_val)); - let deleted = db.read_subspace_val(&delete_key).unwrap(); - assert_eq!(deleted, None); - for tx in [b"tx1", b"tx2", b"tx3", b"tx5", b"tx6"] { - assert!(db.has_replay_protection_entry(&Hash::sha256(tx)).unwrap()); - } - assert!( - !db.has_replay_protection_entry(&Hash::sha256(b"tx4")) - .unwrap() - ); + for tx in [b"tx3", b"tx4"] { + db.delete_replay_protection_entry( + &mut batch, + &replay_protection::last_key(&Hash::sha256(tx)), + ) + .unwrap(); + db.write_replay_protection_entry( + &mut batch, + &replay_protection::buffer_key(&Hash::sha256(tx)), + ) + .unwrap(); + } - // Rollback to the first block height - db.rollback(height_0).unwrap(); - - // Check that the values are back to the state at the first block - let added = db.read_subspace_val(&add_key).unwrap(); - assert_eq!(added, None); - let overwritten = db.read_subspace_val(&overwrite_key).unwrap(); - assert_eq!(overwritten, Some(to_overwrite_val)); - let deleted = db.read_subspace_val(&delete_key).unwrap(); - assert_eq!(deleted, Some(to_delete_val)); - // Check the conversion state - let state_cf = db.get_column_family(STATE_CF).unwrap(); - let conversion_state = - db.0.get_cf(state_cf, "conversion_state".as_bytes()) - .unwrap() + for tx in [b"tx5", b"tx6"] { + db.write_replay_protection_entry( + &mut batch, + &replay_protection::last_key(&Hash::sha256(tx)), + ) .unwrap(); - assert_eq!(conversion_state, encode(&conversion_state_0)); - for tx in [b"tx1", b"tx2", b"tx3", b"tx4"] { - assert!(db.has_replay_protection_entry(&Hash::sha256(tx)).unwrap()); - } + } - for tx in [b"tx5", b"tx6"] { + add_block_to_batch( + &db, + &mut batch, + height_1, + Epoch(2), + pred_epochs, + &conversion_state_1, + ) + .unwrap(); + db.exec_batch(batch.0).unwrap(); + + // Check that the values are as expected from second block + let added = db.read_subspace_val(&add_key).unwrap(); + assert_eq!(added, Some(add_val)); + let overwritten = db.read_subspace_val(&overwrite_key).unwrap(); + assert_eq!(overwritten, Some(overwrite_val)); + let deleted = db.read_subspace_val(&delete_key).unwrap(); + assert_eq!(deleted, None); + + for tx in [b"tx1", b"tx2", b"tx3", b"tx5", b"tx6"] { + assert!( + db.has_replay_protection_entry(&Hash::sha256(tx)).unwrap() + ); + } assert!( - !db.has_replay_protection_entry(&Hash::sha256(tx)).unwrap() + !db.has_replay_protection_entry(&Hash::sha256(b"tx4")) + .unwrap() ); + + // Rollback to the first block height + db.rollback(height_0).unwrap(); + + // Check that the values are back to the state at the first block + let added = db.read_subspace_val(&add_key).unwrap(); + assert_eq!(added, None); + let overwritten = db.read_subspace_val(&overwrite_key).unwrap(); + assert_eq!(overwritten, Some(to_overwrite_val)); + let deleted = db.read_subspace_val(&delete_key).unwrap(); + assert_eq!(deleted, Some(to_delete_val)); + // Check the conversion state + let state_cf = db.get_column_family(STATE_CF).unwrap(); + let conversion_state = + db.0.get_cf(state_cf, "conversion_state".as_bytes()) + .unwrap() + .unwrap(); + assert_eq!(conversion_state, encode(&conversion_state_0)); + for tx in [b"tx1", b"tx2", b"tx3", b"tx4"] { + assert!( + db.has_replay_protection_entry(&Hash::sha256(tx)).unwrap() + ); + } + + for tx in [b"tx5", b"tx6"] { + assert!( + !db.has_replay_protection_entry(&Hash::sha256(tx)).unwrap() + ); + } } } @@ -2442,18 +2525,21 @@ mod test { { let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + let rollback_cf = db.get_column_family(ROLLBACK_CF).unwrap(); - // Diffs new key for `key_with_diffs` at height_0 must be present + // Diffs new key for `key_with_diffs` at height_0 must be + // present let (old_with_h0, new_with_h0) = old_and_new_diff_key(&key_with_diffs, height_0).unwrap(); assert!(db.0.get_cf(diffs_cf, old_with_h0).unwrap().is_none()); assert!(db.0.get_cf(diffs_cf, new_with_h0).unwrap().is_some()); - // Diffs new key for `key_without_diffs` at height_0 must be present + // Diffs new key for `key_without_diffs` at height_0 must be + // present let (old_wo_h0, new_wo_h0) = old_and_new_diff_key(&key_without_diffs, height_0).unwrap(); - assert!(db.0.get_cf(diffs_cf, old_wo_h0).unwrap().is_none()); - assert!(db.0.get_cf(diffs_cf, new_wo_h0).unwrap().is_some()); + assert!(db.0.get_cf(rollback_cf, old_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(rollback_cf, new_wo_h0).unwrap().is_some()); } // Write second block @@ -2475,10 +2561,12 @@ mod test { false, ) .unwrap(); + db.prune_non_persisted_diffs(&mut batch, height_0).unwrap(); db.exec_batch(batch.0).unwrap(); { let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + let rollback_cf = db.get_column_family(ROLLBACK_CF).unwrap(); // Diffs keys for `key_with_diffs` at height_0 must be present let (old_with_h0, new_with_h0) = @@ -2489,8 +2577,8 @@ mod test { // Diffs keys for `key_without_diffs` at height_0 must be gone let (old_wo_h0, new_wo_h0) = old_and_new_diff_key(&key_without_diffs, height_0).unwrap(); - assert!(db.0.get_cf(diffs_cf, old_wo_h0).unwrap().is_none()); - assert!(db.0.get_cf(diffs_cf, new_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(rollback_cf, old_wo_h0).unwrap().is_none()); + assert!(db.0.get_cf(rollback_cf, new_wo_h0).unwrap().is_none()); // Diffs keys for `key_with_diffs` at height_1 must be present let (old_with_h1, new_with_h1) = @@ -2498,11 +2586,12 @@ mod test { assert!(db.0.get_cf(diffs_cf, old_with_h1).unwrap().is_some()); assert!(db.0.get_cf(diffs_cf, new_with_h1).unwrap().is_some()); - // Diffs keys for `key_without_diffs` at height_1 must be present + // Diffs keys for `key_without_diffs` at height_1 must be + // present let (old_wo_h1, new_wo_h1) = old_and_new_diff_key(&key_without_diffs, height_1).unwrap(); - assert!(db.0.get_cf(diffs_cf, old_wo_h1).unwrap().is_some()); - assert!(db.0.get_cf(diffs_cf, new_wo_h1).unwrap().is_some()); + assert!(db.0.get_cf(rollback_cf, old_wo_h1).unwrap().is_some()); + assert!(db.0.get_cf(rollback_cf, new_wo_h1).unwrap().is_some()); } // Write third block @@ -2524,10 +2613,12 @@ mod test { false, ) .unwrap(); + db.prune_non_persisted_diffs(&mut batch, height_1).unwrap(); db.exec_batch(batch.0).unwrap(); { let diffs_cf = db.get_column_family(DIFFS_CF).unwrap(); + let rollback_cf = db.get_column_family(ROLLBACK_CF).unwrap(); // Diffs keys for `key_with_diffs` at height_1 must be present let (old_with_h1, new_with_h1) = @@ -2538,8 +2629,8 @@ mod test { // Diffs keys for `key_without_diffs` at height_1 must be gone let (old_wo_h1, new_wo_h1) = old_and_new_diff_key(&key_without_diffs, height_1).unwrap(); - assert!(db.0.get_cf(diffs_cf, old_wo_h1).unwrap().is_none()); - assert!(db.0.get_cf(diffs_cf, new_wo_h1).unwrap().is_none()); + assert!(db.0.get_cf(rollback_cf, old_wo_h1).unwrap().is_none()); + assert!(db.0.get_cf(rollback_cf, new_wo_h1).unwrap().is_none()); // Diffs keys for `key_with_diffs` at height_2 must be present let (old_with_h2, new_with_h2) = @@ -2547,11 +2638,12 @@ mod test { assert!(db.0.get_cf(diffs_cf, old_with_h2).unwrap().is_some()); assert!(db.0.get_cf(diffs_cf, new_with_h2).unwrap().is_some()); - // Diffs keys for `key_without_diffs` at height_2 must be present + // Diffs keys for `key_without_diffs` at height_2 must be + // present let (old_wo_h2, new_wo_h2) = old_and_new_diff_key(&key_without_diffs, height_2).unwrap(); - assert!(db.0.get_cf(diffs_cf, old_wo_h2).unwrap().is_some()); - assert!(db.0.get_cf(diffs_cf, new_wo_h2).unwrap().is_some()); + assert!(db.0.get_cf(rollback_cf, old_wo_h2).unwrap().is_some()); + assert!(db.0.get_cf(rollback_cf, new_wo_h2).unwrap().is_some()); } } @@ -2567,14 +2659,16 @@ mod test { let merkle_tree = MerkleTree::::default(); let merkle_tree_stores = merkle_tree.stores(); let hash = BlockHash::default(); + #[allow(clippy::disallowed_methods)] let time = DateTimeUtc::now(); let next_epoch_min_start_height = BlockHeight::default(); + #[allow(clippy::disallowed_methods)] let next_epoch_min_start_time = DateTimeUtc::now(); let update_epoch_blocks_delay = None; let address_gen = EstablishedAddressGen::new("whatever"); - let tx_queue = TxQueue::default(); let results = BlockResults::default(); let eth_events_queue = EthEventsQueue::default(); + let commit_only_data = CommitOnlyData::default(); let block = BlockStateWrite { merkle_tree_stores, header: None, @@ -2589,9 +2683,9 @@ mod test { next_epoch_min_start_time, update_epoch_blocks_delay, address_gen: &address_gen, - tx_queue: &tx_queue, ethereum_height: None, eth_events_queue: ð_events_queue, + commit_only_data: &commit_only_data, }; db.add_block_to_batch(block, batch, true) diff --git a/crates/apps/src/lib/wallet/defaults.rs b/crates/apps/src/lib/wallet/defaults.rs index 1885c2ade7..2393b28c6b 100644 --- a/crates/apps/src/lib/wallet/defaults.rs +++ b/crates/apps/src/lib/wallet/defaults.rs @@ -10,13 +10,12 @@ pub use dev::{ #[cfg(any(test, feature = "testing", feature = "benches"))] mod dev { - use std::collections::HashMap; - use lazy_static::lazy_static; use namada::core::address::testing::{ apfel, btc, dot, eth, kartoffel, nam, schnitzel, }; use namada::core::address::Address; + use namada::core::collections::HashMap; use namada::core::key::*; use namada::ledger::{governance, pgf, pos}; use namada_sdk::wallet::alias::Alias; diff --git a/crates/apps/src/lib/wasm_loader/mod.rs b/crates/apps/src/lib/wasm_loader/mod.rs index f7248d74e4..99e29be69a 100644 --- a/crates/apps/src/lib/wasm_loader/mod.rs +++ b/crates/apps/src/lib/wasm_loader/mod.rs @@ -1,12 +1,12 @@ //! A module for loading WASM files and downloading pre-built WASMs. use core::borrow::Borrow; -use std::collections::HashMap; use std::fs; use std::path::Path; use data_encoding::HEXLOWER; use eyre::{eyre, WrapErr}; use futures::future::join_all; +use namada::core::collections::HashMap; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; diff --git a/crates/benches/Cargo.toml b/crates/benches/Cargo.toml index fb556b2e80..3fde0768d6 100644 --- a/crates/benches/Cargo.toml +++ b/crates/benches/Cargo.toml @@ -37,16 +37,34 @@ name = "host_env" harness = false path = "host_env.rs" +[[bench]] +name = "wasm_opcodes" +harness = false +path = "wasm_opcodes.rs" + +[features] +namada-eth-bridge = [ + "namada/namada-eth-bridge", + "namada_apps/namada-eth-bridge", +] + [dependencies] +# NOTE: this crate MUST NOT import any dependency with testing features to prevent benchmarking non-production code [dev-dependencies] -namada = { path = "../namada", features = ["rand", "testing"] } +namada = { path = "../namada", features = ["rand", "benches"] } namada_apps = { path = "../apps", features = ["benches"] } masp_primitives.workspace = true borsh.workspace = true borsh-ext.workspace = true criterion = { version = "0.5", features = ["html_reports"] } +lazy_static.workspace= true +prost.workspace = true rand_core.workspace = true rand.workspace = true tempfile.workspace = true sha2.workspace = true +wasm-instrument.workspace = true +wasmer-compiler-singlepass.workspace = true +wasmer-engine-universal.workspace = true +wasmer.workspace = true diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index a3955d44c4..8b29e75098 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -1,11 +1,10 @@ -use std::collections::{HashMap, HashSet}; - use criterion::{criterion_group, criterion_main, Criterion}; use namada::core::account::AccountPublicKeysMap; use namada::core::address; +use namada::core::collections::{HashMap, HashSet}; use namada::ledger::storage::DB; use namada::token::{Amount, Transfer}; -use namada::tx::Signature; +use namada::tx::Authorization; use namada::vm::wasm::TxCache; use namada_apps::bench_utils::{ BenchShell, TX_INIT_PROPOSAL_WASM, TX_REVEAL_PK_WASM, TX_TRANSFER_WASM, @@ -39,7 +38,7 @@ fn tx_section_signature_validation(c: &mut Criterion) { defaults::albert_keypair().to_public() ]); - let multisig = Signature::new( + let multisig = Authorization::new( vec![section_hash], pkim.index_secret_keys(vec![defaults::albert_keypair()]), None, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index e9c9708b50..6092900eab 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -1,13 +1,18 @@ use std::cell::RefCell; -use std::collections::{BTreeSet, HashMap}; +use std::collections::BTreeSet; +use std::ops::Deref; use std::rc::Rc; use std::str::FromStr; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use masp_primitives::sapling::Node; +use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; +use masp_primitives::transaction::txid::TxIdDigester; use namada::core::address::{self, Address, InternalAddress}; +use namada::core::collections::HashMap; use namada::core::eth_bridge_pool::{GasFee, PendingTransfer}; use namada::core::masp::{TransferSource, TransferTarget}; +use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::eth_bridge::storage::whitelist; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::storage::proposal::ProposalType; @@ -21,9 +26,10 @@ use namada::ibc::core::connection::types::msgs::MsgConnectionOpenInit; use namada::ibc::core::connection::types::version::Version; use namada::ibc::core::connection::types::Counterparty; use namada::ibc::core::host::types::identifiers::{ - ClientId, ClientType, ConnectionId, PortId, + ClientId, ConnectionId, PortId, }; -use namada::ibc::{IbcActions, TransferModule}; +use namada::ibc::primitives::ToProto; +use namada::ibc::{IbcActions, NftTransferModule, TransferModule}; use namada::ledger::eth_bridge::read_native_erc20_address; use namada::ledger::gas::{TxGasMeter, VpGasMeter}; use namada::ledger::governance::GovernanceVp; @@ -40,13 +46,16 @@ use namada::ledger::pgf::PgfVp; use namada::ledger::pos::PosVP; use namada::proof_of_stake; use namada::proof_of_stake::KeySeg; -use namada::sdk::masp::verify_shielded_tx; +use namada::sdk::masp::{ + check_convert, check_output, check_spend, partial_deauthorize, + preload_verifying_keys, PVKs, +}; use namada::sdk::masp_primitives::merkle_tree::CommitmentTree; use namada::sdk::masp_primitives::transaction::Transaction; +use namada::sdk::masp_proofs::sapling::SaplingVerificationContext; use namada::state::{Epoch, StorageRead, StorageWrite, TxIndex}; use namada::token::{Amount, Transfer}; use namada::tx::{Code, Section, Tx}; -use namada::validity_predicate::VpSentinel; use namada_apps::bench_utils::{ generate_foreign_key_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, @@ -119,13 +128,12 @@ fn governance(c: &mut Criterion) { shell.generate_tx( TX_INIT_PROPOSAL_WASM, InitProposalData { - id: 0, content: content_section.get_hash(), author: defaults::albert_address(), - r#type: ProposalType::Default(None), + r#type: ProposalType::Default, voting_start_epoch, voting_end_epoch: voting_start_epoch + 3_u64, - grace_epoch: voting_start_epoch + 9_u64, + activation_epoch: voting_start_epoch + 9_u64, }, None, Some(vec![content_section]), @@ -171,15 +179,14 @@ fn governance(c: &mut Criterion) { shell.generate_tx( TX_INIT_PROPOSAL_WASM, InitProposalData { - id: 1, content: content_section.get_hash(), author: defaults::albert_address(), - r#type: ProposalType::Default(Some( + r#type: ProposalType::DefaultWithWasm( wasm_code_section.get_hash(), - )), + ), voting_start_epoch, voting_end_epoch: voting_start_epoch + 3_u64, - grace_epoch: voting_start_epoch + 9_u64, + activation_epoch: voting_start_epoch + 9_u64, }, None, Some(vec![content_section, wasm_code_section]), @@ -190,17 +197,16 @@ fn governance(c: &mut Criterion) { }; // Run the tx to validate - shell.execute_tx(&signed_tx); + let verifiers_from_tx = shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let governance = GovernanceVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Governance), @@ -208,7 +214,6 @@ fn governance(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -224,7 +229,7 @@ fn governance(c: &mut Criterion) { governance.ctx.keys_changed, governance.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); @@ -251,7 +256,7 @@ fn governance(c: &mut Criterion) { // r#type: ProposalType::Default(None), // voting_start_epoch: 12.into(), // voting_end_epoch: 15.into(), -// grace_epoch: 18.into(), +// activation_epoch: 18.into(), // }, // None, // Some(vec![content_section]), @@ -265,12 +270,12 @@ fn governance(c: &mut Criterion) { // let mut shell = BenchShell::default(); // // Run the tx to validate -// shell.execute_tx(&tx); +// let verifiers_from_tx = shell.execute_tx(&tx); // let (verifiers, keys_changed) = shell // .state // .write_log -// .verifiers_and_changed_keys(&BTreeSet::default()); +// .verifiers_and_changed_keys(&verifiers_from_tx); // let slash_fund = SlashFundVp { // ctx: Ctx::new( @@ -306,88 +311,132 @@ fn governance(c: &mut Criterion) { // group.finish(); // } +fn prepare_ibc_tx_and_ctx(bench_name: &str) -> (BenchShieldedCtx, Tx) { + match bench_name { + "open_connection" => { + let mut shielded_ctx = BenchShieldedCtx::default(); + let _ = shielded_ctx.shell.init_ibc_client_state( + namada::core::storage::Key::from( + Address::Internal(InternalAddress::Ibc).to_db_key(), + ), + ); + let msg = MsgConnectionOpenInit { + client_id_on_a: ClientId::new("07-tendermint", 1).unwrap(), + counterparty: Counterparty::new( + ClientId::from_str("07-tendermint-1").unwrap(), + None, + CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), + ), + version: Some(Version::default()), + delay_period: std::time::Duration::new(100, 0), + signer: defaults::albert_address().to_string().into(), + }; + let mut data = vec![]; + prost::Message::encode(&msg.to_any(), &mut data).unwrap(); + let open_connection = + shielded_ctx.shell.generate_ibc_tx(TX_IBC_WASM, data); + + (shielded_ctx, open_connection) + } + "open_channel" => { + let mut shielded_ctx = BenchShieldedCtx::default(); + let _ = shielded_ctx.shell.init_ibc_connection(); + // Channel handshake + let msg = MsgChannelOpenInit { + port_id_on_a: PortId::transfer(), + connection_hops_on_a: vec![ConnectionId::new(1)], + port_id_on_b: PortId::transfer(), + ordering: Order::Unordered, + signer: defaults::albert_address().to_string().into(), + version_proposal: ChannelVersion::new("ics20-1".to_string()), + }; + + // Avoid serializing the data again with borsh + let mut data = vec![]; + prost::Message::encode(&msg.to_any(), &mut data).unwrap(); + let open_channel = + shielded_ctx.shell.generate_ibc_tx(TX_IBC_WASM, data); + + (shielded_ctx, open_channel) + } + "outgoing_transfer" => { + let mut shielded_ctx = BenchShieldedCtx::default(); + shielded_ctx.shell.init_ibc_channel(); + shielded_ctx.shell.enable_ibc_transfer(); + let outgoing_transfer = + shielded_ctx.shell.generate_ibc_transfer_tx(); + + (shielded_ctx, outgoing_transfer) + } + "outgoing_shielded_action" => { + let mut shielded_ctx = BenchShieldedCtx::default(); + shielded_ctx.shell.init_ibc_channel(); + shielded_ctx.shell.enable_ibc_transfer(); + + let albert_payment_addr = shielded_ctx + .wallet + .find_payment_addr(ALBERT_PAYMENT_ADDRESS) + .unwrap() + .to_owned(); + let albert_spending_key = shielded_ctx + .wallet + .find_spending_key(ALBERT_SPENDING_KEY, None) + .unwrap() + .to_owned(); + // Shield some tokens for Albert + let (mut shielded_ctx, shield_tx) = shielded_ctx.generate_masp_tx( + Amount::native_whole(500), + TransferSource::Address(defaults::albert_address()), + TransferTarget::PaymentAddress(albert_payment_addr), + ); + shielded_ctx.shell.execute_tx(&shield_tx); + shielded_ctx.shell.commit_masp_tx(shield_tx); + shielded_ctx.shell.commit_block(); + shielded_ctx.generate_shielded_action( + Amount::native_whole(10), + TransferSource::ExtendedSpendingKey(albert_spending_key), + TransferTarget::Address(defaults::bertha_address()), + ) + } + _ => panic!("Unexpected bench test"), + } +} + fn ibc(c: &mut Criterion) { let mut group = c.benchmark_group("vp_ibc"); - let shell = BenchShell::default(); // NOTE: Ibc encompass a variety of different messages that can be executed, // here we only benchmark a few of those Connection handshake - let msg = MsgConnectionOpenInit { - client_id_on_a: ClientId::new( - ClientType::new("01-tendermint").unwrap(), - 1, - ) - .unwrap(), - counterparty: Counterparty::new( - ClientId::from_str("01-tendermint-1").unwrap(), - None, - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ), - version: Some(Version::default()), - delay_period: std::time::Duration::new(100, 0), - signer: defaults::albert_address().to_string().into(), - }; - let open_connection = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Channel handshake - let msg = MsgChannelOpenInit { - port_id_on_a: PortId::transfer(), - connection_hops_on_a: vec![ConnectionId::new(1)], - port_id_on_b: PortId::transfer(), - ordering: Order::Unordered, - signer: defaults::albert_address().to_string().into(), - version_proposal: ChannelVersion::new("ics20-1".to_string()), - }; - - // Avoid serializing the data again with borsh - let open_channel = shell.generate_ibc_tx(TX_IBC_WASM, msg); - // Ibc transfer - let outgoing_transfer = shell.generate_ibc_transfer_tx(); - - for (signed_tx, bench_name) in - [open_connection, open_channel, outgoing_transfer] - .iter() - .zip(["open_connection", "open_channel", "outgoing_transfer"]) - { - let mut shell = BenchShell::default(); + for bench_name in [ + "open_connection", + "open_channel", + "outgoing_transfer", + "outgoing_shielded_action", + ] { // Initialize the state according to the target tx - match bench_name { - "open_connection" => { - let _ = shell.init_ibc_client_state( - namada::core::storage::Key::from( - Address::Internal(InternalAddress::Ibc).to_db_key(), - ), - ); - } - "open_channel" => { - let _ = shell.init_ibc_connection(); - } - "outgoing_transfer" => shell.init_ibc_channel(), - _ => panic!("Unexpected bench test"), - } + let (mut shielded_ctx, signed_tx) = prepare_ibc_tx_and_ctx(bench_name); - shell.execute_tx(signed_tx); - let (verifiers, keys_changed) = shell + let verifiers_from_tx = shielded_ctx.shell.execute_tx(&signed_tx); + let (verifiers, keys_changed) = shielded_ctx + .shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.state, - signed_tx, + &shielded_ctx.shell.state, + &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, - shell.vp_wasm_cache.clone(), + shielded_ctx.shell.vp_wasm_cache.clone(), ), }; @@ -395,11 +444,11 @@ fn ibc(c: &mut Criterion) { b.iter(|| { assert!( ibc.validate_tx( - signed_tx, + &signed_tx, ibc.ctx.keys_changed, ibc.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); @@ -435,16 +484,15 @@ fn vp_multitoken(c: &mut Criterion) { .zip(["foreign_key_write", "transfer"]) { let mut shell = BenchShell::default(); - shell.execute_tx(signed_tx); + let verifiers_from_tx = shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let multitoken = MultitokenVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Multitoken), @@ -452,7 +500,6 @@ fn vp_multitoken(c: &mut Criterion) { signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -468,17 +515,18 @@ fn vp_multitoken(c: &mut Criterion) { multitoken.ctx.keys_changed, multitoken.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); } } -// Generate and run masp transaction to be verified +// Generate and run masp transaction to be verified. Returns the verifier set +// from tx and the tx. fn setup_storage_for_masp_verification( bench_name: &str, -) -> (BenchShieldedCtx, Tx) { +) -> (BenchShieldedCtx, BTreeSet
, Tx) { let amount = Amount::native_whole(500); let mut shielded_ctx = BenchShieldedCtx::default(); @@ -504,6 +552,7 @@ fn setup_storage_for_masp_verification( TransferSource::Address(defaults::albert_address()), TransferTarget::PaymentAddress(albert_payment_addr), ); + shielded_ctx.shell.execute_tx(&shield_tx); shielded_ctx.shell.commit_masp_tx(shield_tx); @@ -535,9 +584,9 @@ fn setup_storage_for_masp_verification( ), _ => panic!("Unexpected bench test"), }; - shielded_ctx.shell.execute_tx(&signed_tx); + let verifiers_from_tx = shielded_ctx.shell.execute_tx(&signed_tx); - (shielded_ctx, signed_tx) + (shielded_ctx, verifiers_from_tx, signed_tx) } fn masp(c: &mut Criterion) { @@ -545,18 +594,17 @@ fn masp(c: &mut Criterion) { for bench_name in ["shielding", "unshielding", "shielded"] { group.bench_function(bench_name, |b| { - let (shielded_ctx, signed_tx) = + let (shielded_ctx, verifiers_from_tx, signed_tx) = setup_storage_for_masp_verification(bench_name); let (verifiers, keys_changed) = shielded_ctx .shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let masp = MaspVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Masp), @@ -564,7 +612,6 @@ fn masp(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shielded_ctx.shell.vp_wasm_cache.clone(), @@ -578,7 +625,7 @@ fn masp(c: &mut Criterion) { masp.ctx.keys_changed, masp.ctx.verifiers, ) - .unwrap() + .is_ok() ); }) }); @@ -587,32 +634,187 @@ fn masp(c: &mut Criterion) { group.finish(); } -fn masp_verify_shielded_tx(c: &mut Criterion) { - let mut group = c.benchmark_group("vp_masp_verify_shielded_tx"); +fn masp_check_spend(c: &mut Criterion) { + let spend_vk = &preload_verifying_keys().spend_vk; + + c.bench_function("vp_masp_check_spend", |b| { + b.iter_batched_ref( + || { + let (_, _verifiers_from_tx, signed_tx) = + setup_storage_for_masp_verification("shielded"); + + let transaction = signed_tx + .sections + .into_iter() + .filter_map(|section| match section { + Section::MaspTx(transaction) => Some(transaction), + _ => None, + }) + .collect::>() + .first() + .unwrap() + .to_owned(); + let spend = transaction + .sapling_bundle() + .unwrap() + .shielded_spends + .first() + .unwrap() + .to_owned(); + let ctx = SaplingVerificationContext::new(true); + let tx_data = transaction.deref(); + // Partially deauthorize the transparent bundle + let unauth_tx_data = partial_deauthorize(tx_data).unwrap(); + let txid_parts = unauth_tx_data.digest(TxIdDigester); + let sighash = signature_hash( + &unauth_tx_data, + &SignableInput::Shielded, + &txid_parts, + ); - for bench_name in ["shielding", "unshielding", "shielded"] { - group.bench_function(bench_name, |b| { - let (_, signed_tx) = - setup_storage_for_masp_verification(bench_name); + (ctx, spend, sighash) + }, + |(ctx, spend, sighash)| { + assert!(check_spend(spend, sighash.as_ref(), ctx, spend_vk)); + }, + BatchSize::SmallInput, + ) + }); +} - let transaction = signed_tx - .sections - .into_iter() - .filter_map(|section| match section { - Section::MaspTx(transaction) => Some(transaction), - _ => None, - }) - .collect::>() - .first() - .unwrap() - .to_owned(); - b.iter(|| { - assert!(verify_shielded_tx(&transaction)); - }) - }); - } +fn masp_check_convert(c: &mut Criterion) { + let convert_vk = &preload_verifying_keys().convert_vk; + + c.bench_function("vp_masp_check_convert", |b| { + b.iter_batched_ref( + || { + let (_, _verifiers_from_tx, signed_tx) = + setup_storage_for_masp_verification("shielded"); + + let transaction = signed_tx + .sections + .into_iter() + .filter_map(|section| match section { + Section::MaspTx(transaction) => Some(transaction), + _ => None, + }) + .collect::>() + .first() + .unwrap() + .to_owned(); + let convert = transaction + .sapling_bundle() + .unwrap() + .shielded_converts + .first() + .unwrap() + .to_owned(); + let ctx = SaplingVerificationContext::new(true); - group.finish(); + (ctx, convert) + }, + |(ctx, convert)| { + assert!(check_convert(convert, ctx, convert_vk)); + }, + BatchSize::SmallInput, + ) + }); +} + +fn masp_check_output(c: &mut Criterion) { + let output_vk = &preload_verifying_keys().output_vk; + + c.bench_function("masp_vp_check_output", |b| { + b.iter_batched_ref( + || { + let (_, _verifiers_from_tx, signed_tx) = + setup_storage_for_masp_verification("shielded"); + + let transaction = signed_tx + .sections + .into_iter() + .filter_map(|section| match section { + Section::MaspTx(transaction) => Some(transaction), + _ => None, + }) + .collect::>() + .first() + .unwrap() + .to_owned(); + let output = transaction + .sapling_bundle() + .unwrap() + .shielded_outputs + .first() + .unwrap() + .to_owned(); + let ctx = SaplingVerificationContext::new(true); + + (ctx, output) + }, + |(ctx, output)| { + assert!(check_output(output, ctx, output_vk)); + }, + BatchSize::SmallInput, + ) + }); +} + +fn masp_final_check(c: &mut Criterion) { + let PVKs { + spend_vk, + convert_vk, + output_vk, + } = preload_verifying_keys(); + + let (_, _verifiers_from_tx, signed_tx) = + setup_storage_for_masp_verification("shielded"); + + let transaction = signed_tx + .sections + .into_iter() + .filter_map(|section| match section { + Section::MaspTx(transaction) => Some(transaction), + _ => None, + }) + .collect::>() + .first() + .unwrap() + .to_owned(); + let sapling_bundle = transaction.sapling_bundle().unwrap(); + let mut ctx = SaplingVerificationContext::new(true); + // Partially deauthorize the transparent bundle + let unauth_tx_data = partial_deauthorize(transaction.deref()).unwrap(); + let txid_parts = unauth_tx_data.digest(TxIdDigester); + let sighash = + signature_hash(&unauth_tx_data, &SignableInput::Shielded, &txid_parts); + + // Check spends, converts and outputs before the final check + assert!(sapling_bundle.shielded_spends.iter().all(|spend| { + check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) + })); + assert!( + sapling_bundle + .shielded_converts + .iter() + .all(|convert| check_convert(convert, &mut ctx, convert_vk)) + ); + assert!( + sapling_bundle + .shielded_outputs + .iter() + .all(|output| check_output(output, &mut ctx, output_vk)) + ); + + c.bench_function("vp_masp_final_check", |b| { + b.iter(|| { + assert!(ctx.final_check( + sapling_bundle.value_balance.clone(), + sighash.as_ref(), + sapling_bundle.authorization.binding_sig + )) + }) + }); } fn pgf(c: &mut Criterion) { @@ -663,17 +865,16 @@ fn pgf(c: &mut Criterion) { }; // Run the tx to validate - shell.execute_tx(&signed_tx); + let verifiers_from_tx = shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let pgf = PgfVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Pgf), @@ -681,7 +882,6 @@ fn pgf(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -696,7 +896,7 @@ fn pgf(c: &mut Criterion) { pgf.ctx.keys_changed, pgf.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); @@ -706,6 +906,10 @@ fn pgf(c: &mut Criterion) { } fn eth_bridge_nut(c: &mut Criterion) { + if !is_bridge_comptime_enabled() { + return; + } + let mut shell = BenchShell::default(); let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); @@ -735,19 +939,18 @@ fn eth_bridge_nut(c: &mut Criterion) { }; // Run the tx to validate - shell.execute_tx(&signed_tx); + let verifiers_from_tx = shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let vp_address = Address::Internal(InternalAddress::Nut(native_erc20_addres)); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let nut = NonUsableTokens { ctx: Ctx::new( &vp_address, @@ -755,7 +958,6 @@ fn eth_bridge_nut(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -770,13 +972,17 @@ fn eth_bridge_nut(c: &mut Criterion) { nut.ctx.keys_changed, nut.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); } fn eth_bridge(c: &mut Criterion) { + if !is_bridge_comptime_enabled() { + return; + } + let mut shell = BenchShell::default(); let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); @@ -806,18 +1012,17 @@ fn eth_bridge(c: &mut Criterion) { }; // Run the tx to validate - shell.execute_tx(&signed_tx); + let verifiers_from_tx = shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let vp_address = Address::Internal(InternalAddress::EthBridge); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let eth_bridge = EthBridge { ctx: Ctx::new( &vp_address, @@ -825,7 +1030,6 @@ fn eth_bridge(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -841,13 +1045,17 @@ fn eth_bridge(c: &mut Criterion) { eth_bridge.ctx.keys_changed, eth_bridge.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); } fn eth_bridge_pool(c: &mut Criterion) { + if !is_bridge_comptime_enabled() { + return; + } + // NOTE: this vp is one of the most expensive but its cost comes from the // numerous accesses to storage that we already account for, so no need to // benchmark specific sections of it like for the ibc native vp @@ -902,18 +1110,17 @@ fn eth_bridge_pool(c: &mut Criterion) { }; // Run the tx to validate - shell.execute_tx(&signed_tx); + let verifiers_from_tx = shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let vp_address = Address::Internal(InternalAddress::EthBridgePool); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let bridge_pool = BridgePoolVp { ctx: Ctx::new( &vp_address, @@ -921,7 +1128,6 @@ fn eth_bridge_pool(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -937,7 +1143,7 @@ fn eth_bridge_pool(c: &mut Criterion) { bridge_pool.ctx.keys_changed, bridge_pool.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); @@ -949,12 +1155,12 @@ fn parameters(c: &mut Criterion) { for bench_name in ["foreign_key_write", "parameter_change"] { let mut shell = BenchShell::default(); - let signed_tx = match bench_name { + let (verifiers_from_tx, signed_tx) = match bench_name { "foreign_key_write" => { let tx = generate_foreign_key_tx(&defaults::albert_keypair()); // Run the tx to validate - shell.execute_tx(&tx); - tx + let verifiers_from_tx = shell.execute_tx(&tx); + (verifiers_from_tx, tx) } "parameter_change" => { // Simulate governance proposal to modify a parameter @@ -966,12 +1172,10 @@ fn parameters(c: &mut Criterion) { shell.state.write(&proposal_key, 0).unwrap(); // Return a dummy tx for validation - let mut tx = - Tx::from_type(namada::tx::data::TxType::Decrypted( - namada::tx::data::DecryptedTx::Decrypted, - )); + let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); tx.set_data(namada::tx::Data::new(borsh::to_vec(&0).unwrap())); - tx + let verifiers_from_tx = BTreeSet::default(); + (verifiers_from_tx, tx) } _ => panic!("Unexpected bench test"), }; @@ -979,13 +1183,12 @@ fn parameters(c: &mut Criterion) { let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let vp_address = Address::Internal(InternalAddress::Parameters); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let parameters = ParametersVp { ctx: Ctx::new( &vp_address, @@ -993,7 +1196,6 @@ fn parameters(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1009,7 +1211,7 @@ fn parameters(c: &mut Criterion) { parameters.ctx.keys_changed, parameters.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); @@ -1024,12 +1226,12 @@ fn pos(c: &mut Criterion) { for bench_name in ["foreign_key_write", "parameter_change"] { let mut shell = BenchShell::default(); - let signed_tx = match bench_name { + let (verifiers_from_tx, signed_tx) = match bench_name { "foreign_key_write" => { let tx = generate_foreign_key_tx(&defaults::albert_keypair()); // Run the tx to validate - shell.execute_tx(&tx); - tx + let verifiers_from_tx = shell.execute_tx(&tx); + (verifiers_from_tx, tx) } "parameter_change" => { // Simulate governance proposal to modify a parameter @@ -1041,12 +1243,10 @@ fn pos(c: &mut Criterion) { shell.state.write(&proposal_key, 0).unwrap(); // Return a dummy tx for validation - let mut tx = - Tx::from_type(namada::tx::data::TxType::Decrypted( - namada::tx::data::DecryptedTx::Decrypted, - )); + let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); tx.set_data(namada::tx::Data::new(borsh::to_vec(&0).unwrap())); - tx + let verifiers_from_tx = BTreeSet::default(); + (verifiers_from_tx, tx) } _ => panic!("Unexpected bench test"), }; @@ -1054,13 +1254,12 @@ fn pos(c: &mut Criterion) { let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let vp_address = Address::Internal(InternalAddress::PoS); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let pos = PosVP { ctx: Ctx::new( &vp_address, @@ -1068,7 +1267,6 @@ fn pos(c: &mut Criterion) { &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1083,7 +1281,7 @@ fn pos(c: &mut Criterion) { pos.ctx.keys_changed, pos.ctx.verifiers, ) - .unwrap() + .is_ok() ) }) }); @@ -1094,95 +1292,51 @@ fn pos(c: &mut Criterion) { fn ibc_vp_validate_action(c: &mut Criterion) { let mut group = c.benchmark_group("vp_ibc_validate_action"); - let shell = BenchShell::default(); - - // Connection handshake - let msg = MsgConnectionOpenInit { - client_id_on_a: ClientId::new( - ClientType::new("01-tendermint").unwrap(), - 1, - ) - .unwrap(), - counterparty: Counterparty::new( - ClientId::from_str("01-tendermint-1").unwrap(), - None, - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ), - version: Some(Version::default()), - delay_period: std::time::Duration::new(100, 0), - signer: defaults::albert_address().to_string().into(), - }; - let open_connection = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Channel handshake - let msg = MsgChannelOpenInit { - port_id_on_a: PortId::transfer(), - connection_hops_on_a: vec![ConnectionId::new(1)], - port_id_on_b: PortId::transfer(), - ordering: Order::Unordered, - signer: defaults::albert_address().to_string().into(), - version_proposal: ChannelVersion::new("ics20-1".to_string()), - }; - // Avoid serializing the data again with borsh - let open_channel = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Ibc transfer - let outgoing_transfer = shell.generate_ibc_transfer_tx(); - - for (signed_tx, bench_name) in - [open_connection, open_channel, outgoing_transfer] - .iter() - .zip(["open_connection", "open_channel", "outgoing_transfer"]) - { - let mut shell = BenchShell::default(); - // Initialize the state according to the target tx - match bench_name { - "open_connection" => { - let _ = shell.init_ibc_client_state( - namada::core::storage::Key::from( - Address::Internal(InternalAddress::Ibc).to_db_key(), - ), - ); - } - "open_channel" => { - let _ = shell.init_ibc_connection(); - } - "outgoing_transfer" => shell.init_ibc_channel(), - _ => panic!("Unexpected bench test"), - } + for bench_name in [ + "open_connection", + "open_channel", + "outgoing_transfer", + "outgoing_shielded_action", + ] { + let (mut shielded_ctx, signed_tx) = prepare_ibc_tx_and_ctx(bench_name); - shell.execute_tx(signed_tx); + let verifiers_from_tx = shielded_ctx.shell.execute_tx(&signed_tx); let tx_data = signed_tx.data().unwrap(); - let (verifiers, keys_changed) = shell + let (verifiers, keys_changed) = shielded_ctx + .shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.state, - signed_tx, + &shielded_ctx.shell.state, + &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, - shell.vp_wasm_cache.clone(), + shielded_ctx.shell.vp_wasm_cache.clone(), ), }; + // Use an empty verifiers set placeholder for validation, this is only + // needed in actual txs to addresses whose VPs should be triggered + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + let exec_ctx = PseudoExecutionContext::new(ibc.ctx.pre()); let ctx = Rc::new(RefCell::new(exec_ctx)); - let mut actions = IbcActions::new(ctx.clone()); + let mut actions = IbcActions::new(ctx.clone(), verifiers.clone()); actions.set_validation_params(ibc.validation_params().unwrap()); - let module = TransferModule::new(ctx); - actions.add_transfer_module(module.module_id(), module); + let module = TransferModule::new(ctx.clone(), verifiers); + actions.add_transfer_module(module); + let module = NftTransferModule::new(ctx); + actions.add_transfer_module(module); group.bench_function(bench_name, |b| { b.iter(|| actions.validate(&tx_data).unwrap()) @@ -1194,95 +1348,52 @@ fn ibc_vp_validate_action(c: &mut Criterion) { fn ibc_vp_execute_action(c: &mut Criterion) { let mut group = c.benchmark_group("vp_ibc_execute_action"); - let shell = BenchShell::default(); - - // Connection handshake - let msg = MsgConnectionOpenInit { - client_id_on_a: ClientId::new( - ClientType::new("01-tendermint").unwrap(), - 1, - ) - .unwrap(), - counterparty: Counterparty::new( - ClientId::from_str("01-tendermint-1").unwrap(), - None, - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ), - version: Some(Version::default()), - delay_period: std::time::Duration::new(100, 0), - signer: defaults::albert_address().to_string().into(), - }; - let open_connection = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Channel handshake - let msg = MsgChannelOpenInit { - port_id_on_a: PortId::transfer(), - connection_hops_on_a: vec![ConnectionId::new(1)], - port_id_on_b: PortId::transfer(), - ordering: Order::Unordered, - signer: defaults::albert_address().to_string().into(), - version_proposal: ChannelVersion::new("ics20-1".to_string()), - }; - // Avoid serializing the data again with borsh - let open_channel = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Ibc transfer - let outgoing_transfer = shell.generate_ibc_transfer_tx(); - - for (signed_tx, bench_name) in - [open_connection, open_channel, outgoing_transfer] - .iter() - .zip(["open_connection", "open_channel", "outgoing_transfer"]) - { - let mut shell = BenchShell::default(); - // Initialize the state according to the target tx - match bench_name { - "open_connection" => { - let _ = shell.init_ibc_client_state( - namada::core::storage::Key::from( - Address::Internal(InternalAddress::Ibc).to_db_key(), - ), - ); - } - "open_channel" => { - let _ = shell.init_ibc_connection(); - } - "outgoing_transfer" => shell.init_ibc_channel(), - _ => panic!("Unexpected bench test"), - } + for bench_name in [ + "open_connection", + "open_channel", + "outgoing_transfer", + "outgoing_shielded_action", + ] { + let (mut shielded_ctx, signed_tx) = prepare_ibc_tx_and_ctx(bench_name); - shell.execute_tx(signed_tx); + let verifiers_from_tx = shielded_ctx.shell.execute_tx(&signed_tx); let tx_data = signed_tx.data().unwrap(); - let (verifiers, keys_changed) = shell + let (verifiers, keys_changed) = shielded_ctx + .shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.state, - signed_tx, + &shielded_ctx.shell.state, + &signed_tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, - shell.vp_wasm_cache.clone(), + shielded_ctx.shell.vp_wasm_cache.clone(), ), }; + // Use an empty verifiers set placeholder for validation, this is only + // needed in actual txs to addresses whose VPs should be triggered + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + let exec_ctx = PseudoExecutionContext::new(ibc.ctx.pre()); let ctx = Rc::new(RefCell::new(exec_ctx)); - let mut actions = IbcActions::new(ctx.clone()); + + let mut actions = IbcActions::new(ctx.clone(), verifiers.clone()); actions.set_validation_params(ibc.validation_params().unwrap()); - let module = TransferModule::new(ctx); - actions.add_transfer_module(module.module_id(), module); + let module = TransferModule::new(ctx.clone(), verifiers); + actions.add_transfer_module(module); + let module = NftTransferModule::new(ctx); + actions.add_transfer_module(module); group.bench_function(bench_name, |b| { b.iter(|| actions.execute(&tx_data).unwrap()) @@ -1298,7 +1409,10 @@ criterion_group!( // slash_fund, ibc, masp, - masp_verify_shielded_tx, + masp_check_spend, + masp_check_convert, + masp_check_output, + masp_final_check, vp_multitoken, pgf, eth_bridge_nut, diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index da26796e78..a15539b9f5 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -5,7 +5,7 @@ use namada::core::storage::BlockHeight; use namada::core::time::DateTimeUtc; use namada::token::{Amount, DenominatedAmount, Transfer}; use namada::tx::data::{Fee, WrapperTx}; -use namada::tx::Signature; +use namada::tx::Authorization; use namada_apps::bench_utils::{BenchShell, TX_TRANSFER_WASM}; use namada_apps::node::ledger::shell::process_proposal::ValidationMeta; use namada_apps::wallet::defaults; @@ -46,20 +46,20 @@ fn process_tx(c: &mut Criterion) { None, ), ))); - tx.add_section(namada::tx::Section::Signature(Signature::new( + tx.add_section(namada::tx::Section::Authorization(Authorization::new( tx.sechashes(), [(0, defaults::albert_keypair())].into_iter().collect(), None, ))); let wrapper = tx.to_bytes(); + #[allow(clippy::disallowed_methods)] let datetime = DateTimeUtc::now(); c.bench_function("wrapper_tx_validation", |b| { b.iter_batched( || { ( - shell.state.in_mem().tx_queue.clone(), // Prevent block out of gas and replay protection shell.state.with_temp_write_log(), ValidationMeta::from(shell.state.read_only()), @@ -69,7 +69,6 @@ fn process_tx(c: &mut Criterion) { ) }, |( - tx_queue, mut temp_state, mut validation_meta, mut vp_wasm_cache, @@ -81,7 +80,6 @@ fn process_tx(c: &mut Criterion) { shell .check_proposal_tx( &wrapper, - &mut tx_queue.iter(), &mut validation_meta, &mut temp_state, datetime, diff --git a/crates/benches/txs.rs b/crates/benches/txs.rs index 04a40f74b7..ad68d3e1ab 100644 --- a/crates/benches/txs.rs +++ b/crates/benches/txs.rs @@ -1,9 +1,9 @@ -use std::collections::HashMap; use std::str::FromStr; use criterion::{criterion_group, criterion_main, Criterion}; use namada::account::{InitAccount, UpdateAccount}; use namada::core::address::{self, Address}; +use namada::core::collections::HashMap; use namada::core::eth_bridge_pool::{GasFee, PendingTransfer}; use namada::core::hash::Hash; use namada::core::key::{ @@ -12,6 +12,7 @@ use namada::core::key::{ }; use namada::core::masp::{TransferSource, TransferTarget}; use namada::core::storage::Key; +use namada::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::storage::proposal::ProposalType; use namada::governance::storage::vote::ProposalVote; @@ -24,8 +25,9 @@ use namada::ibc::core::connection::types::msgs::MsgConnectionOpenInit; use namada::ibc::core::connection::types::version::Version; use namada::ibc::core::connection::types::Counterparty; use namada::ibc::core::host::types::identifiers::{ - ClientId, ClientType, ConnectionId, PortId, + ClientId, ConnectionId, PortId, }; +use namada::ibc::primitives::ToProto; use namada::ledger::eth_bridge::read_native_erc20_address; use namada::proof_of_stake::storage::read_pos_params; use namada::proof_of_stake::types::SlashType; @@ -459,13 +461,12 @@ fn init_proposal(c: &mut Criterion) { shell.generate_tx( TX_INIT_PROPOSAL_WASM, InitProposalData { - id: 0, content: content_section.get_hash(), author: defaults::albert_address(), - r#type: ProposalType::Default(None), + r#type: ProposalType::Default, voting_start_epoch: 12.into(), voting_end_epoch: 15.into(), - grace_epoch: 18.into(), + activation_epoch: 18.into(), }, None, Some(vec![content_section]), @@ -509,15 +510,14 @@ fn init_proposal(c: &mut Criterion) { shell.generate_tx( TX_INIT_PROPOSAL_WASM, InitProposalData { - id: 1, content: content_section.get_hash(), author: defaults::albert_address(), - r#type: ProposalType::Default(Some( + r#type: ProposalType::DefaultWithWasm( wasm_code_section.get_hash(), - )), + ), voting_start_epoch: 12.into(), voting_end_epoch: 15.into(), - grace_epoch: 18.into(), + activation_epoch: 18.into(), }, None, Some(vec![content_section, wasm_code_section]), @@ -705,7 +705,7 @@ fn change_consensus_key(c: &mut Criterion) { b.iter_batched_ref( BenchShell::default, |shell| shell.execute_tx(&signed_tx), - criterion::BatchSize::LargeInput, + criterion::BatchSize::SmallInput, ) }); } @@ -735,78 +735,107 @@ fn change_validator_metadata(c: &mut Criterion) { b.iter_batched_ref( BenchShell::default, |shell| shell.execute_tx(&signed_tx), - criterion::BatchSize::LargeInput, + criterion::BatchSize::SmallInput, ) }); } fn ibc(c: &mut Criterion) { let mut group = c.benchmark_group("tx_ibc"); - let shell = BenchShell::default(); - - // Connection handshake - let msg = MsgConnectionOpenInit { - client_id_on_a: ClientId::new( - ClientType::new("01-tendermint").unwrap(), - 1, - ) - .unwrap(), - counterparty: Counterparty::new( - ClientId::from_str("01-tendermint-1").unwrap(), - None, - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ), - version: Some(Version::default()), - delay_period: std::time::Duration::new(100, 0), - signer: defaults::albert_address().to_string().into(), - }; - let open_connection = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Channel handshake - let msg = MsgChannelOpenInit { - port_id_on_a: PortId::transfer(), - connection_hops_on_a: vec![ConnectionId::new(1)], - port_id_on_b: PortId::transfer(), - ordering: Order::Unordered, - signer: defaults::albert_address().to_string().into(), - version_proposal: ChannelVersion::new("ics20-1".to_string()), - }; - - // Avoid serializing the data again with borsh - let open_channel = shell.generate_ibc_tx(TX_IBC_WASM, msg); - - // Ibc transfer - let outgoing_transfer = shell.generate_ibc_transfer_tx(); // NOTE: Ibc encompass a variety of different messages that can be executed, // here we only benchmark a few of those - for (signed_tx, bench_name) in - [open_connection, open_channel, outgoing_transfer] - .iter() - .zip(["open_connection", "open_channel", "outgoing_transfer"]) - { + for bench_name in [ + "open_connection", + "open_channel", + "outgoing_transfer", + "outgoing_shielded_action", + ] { group.bench_function(bench_name, |b| { b.iter_batched_ref( || { - let mut shell = BenchShell::default(); + let mut shielded_ctx = BenchShieldedCtx::default(); // Initialize the state according to the target tx - match bench_name { + let (shielded_ctx, signed_tx) = match bench_name { "open_connection" => { - let _ = shell.init_ibc_client_state( + let _ = shielded_ctx.shell.init_ibc_client_state( namada::core::storage::Key::from( Address::Internal(namada::core::address::InternalAddress::Ibc).to_db_key(), ), ); + // Connection handshake + let msg = MsgConnectionOpenInit { + client_id_on_a: ClientId::new("07-tendermint", 1).unwrap(), + counterparty: Counterparty::new( + ClientId::from_str("07-tendermint-1").unwrap(), + None, + CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), + ), + version: Some(Version::default()), + delay_period: std::time::Duration::new(100, 0), + signer: defaults::albert_address().to_string().into(), + }; + let mut data = vec![]; + prost::Message::encode(&msg.to_any(), &mut data).unwrap(); + let open_connection = shielded_ctx.shell.generate_ibc_tx(TX_IBC_WASM, data); + (shielded_ctx, open_connection) } "open_channel" => { - let _ = shell.init_ibc_connection(); + let _ = shielded_ctx.shell.init_ibc_connection(); + // Channel handshake + let msg = MsgChannelOpenInit { + port_id_on_a: PortId::transfer(), + connection_hops_on_a: vec![ConnectionId::new(1)], + port_id_on_b: PortId::transfer(), + ordering: Order::Unordered, + signer: defaults::albert_address().to_string().into(), + version_proposal: ChannelVersion::new("ics20-1".to_string()), + }; + + // Avoid serializing the data again with borsh + let mut data = vec![]; + prost::Message::encode(&msg.to_any(), &mut data).unwrap(); + let open_channel = shielded_ctx.shell.generate_ibc_tx(TX_IBC_WASM, data); + (shielded_ctx, open_channel) + } + "outgoing_transfer" => { + shielded_ctx.shell.init_ibc_channel(); + let outgoing_transfer = shielded_ctx.shell.generate_ibc_transfer_tx(); + (shielded_ctx, outgoing_transfer) + } + "outgoing_shielded_action" => { + shielded_ctx.shell.init_ibc_channel(); + let albert_payment_addr = shielded_ctx + .wallet + .find_payment_addr(ALBERT_PAYMENT_ADDRESS) + .unwrap() + .to_owned(); + let albert_spending_key = shielded_ctx + .wallet + .find_spending_key(ALBERT_SPENDING_KEY, None) + .unwrap() + .to_owned(); + // Shield some tokens for Albert + let (mut shielded_ctx, shield_tx) = shielded_ctx.generate_masp_tx( + Amount::native_whole(500), + TransferSource::Address(defaults::albert_address()), + TransferTarget::PaymentAddress(albert_payment_addr), + ); + shielded_ctx.shell.execute_tx(&shield_tx); + shielded_ctx.shell.commit_masp_tx(shield_tx); + shielded_ctx.shell.commit_block(); + + shielded_ctx.generate_shielded_action( + Amount::native_whole(10), + TransferSource::ExtendedSpendingKey(albert_spending_key), + TransferTarget::Address(defaults::bertha_address()), + ) } - "outgoing_transfer" => shell.init_ibc_channel(), _ => panic!("Unexpected bench test"), - } - shell + }; + (shielded_ctx, signed_tx) }, - |shell| shell.execute_tx(signed_tx), + |(shielded_ctx, signed_tx)| shielded_ctx.shell.execute_tx(signed_tx), criterion::BatchSize::SmallInput, ) }); @@ -862,6 +891,10 @@ fn unjail_validator(c: &mut Criterion) { } fn tx_bridge_pool(c: &mut Criterion) { + if !is_bridge_comptime_enabled() { + return; + } + let shell = BenchShell::default(); let data = PendingTransfer { @@ -973,7 +1006,7 @@ fn deactivate_validator(c: &mut Criterion) { b.iter_batched_ref( BenchShell::default, |shell| shell.execute_tx(&signed_tx), - criterion::BatchSize::LargeInput, + criterion::BatchSize::SmallInput, ) }); } @@ -1013,7 +1046,7 @@ fn reactivate_validator(c: &mut Criterion) { shell }, |shell| shell.execute_tx(&signed_tx), - criterion::BatchSize::LargeInput, + criterion::BatchSize::SmallInput, ) }); } @@ -1066,7 +1099,7 @@ fn claim_rewards(c: &mut Criterion) { shell }, |shell| shell.execute_tx(signed_tx), - criterion::BatchSize::LargeInput, + criterion::BatchSize::SmallInput, ) }); } diff --git a/crates/benches/vps.rs b/crates/benches/vps.rs index 48e9229443..1faf6ba5c9 100644 --- a/crates/benches/vps.rs +++ b/crates/benches/vps.rs @@ -1,5 +1,4 @@ use std::cell::RefCell; -use std::collections::BTreeSet; use criterion::{criterion_group, criterion_main, Criterion}; use namada::account::UpdateAccount; @@ -27,150 +26,6 @@ use sha2::Digest; const VP_IMPLICIT_WASM: &str = "vp_implicit.wasm"; -fn vp_user(c: &mut Criterion) { - let mut group = c.benchmark_group("vp_user"); - let shell = BenchShell::default(); - let vp_code_hash: Hash = shell - .read_storage_key(&Key::wasm_hash(VP_USER_WASM)) - .unwrap(); - - let foreign_key_write = - generate_foreign_key_tx(&defaults::albert_keypair()); - - let transfer = shell.generate_tx( - TX_TRANSFER_WASM, - Transfer { - source: defaults::albert_address(), - target: defaults::bertha_address(), - token: address::testing::nam(), - amount: Amount::native_whole(1000).native_denominated(), - key: None, - shielded: None, - }, - None, - None, - vec![&defaults::albert_keypair()], - ); - - let received_transfer = shell.generate_tx( - TX_TRANSFER_WASM, - Transfer { - source: defaults::bertha_address(), - target: defaults::albert_address(), - token: address::testing::nam(), - amount: Amount::native_whole(1000).native_denominated(), - key: None, - shielded: None, - }, - None, - None, - vec![&defaults::bertha_keypair()], - ); - - let vp_validator_hash = shell - .read_storage_key(&Key::wasm_hash(VP_USER_WASM)) - .unwrap(); - let extra_section = Section::ExtraData(Code::from_hash( - vp_validator_hash, - Some(VP_USER_WASM.to_string()), - )); - let data = UpdateAccount { - addr: defaults::albert_address(), - vp_code_hash: Some(Hash( - extra_section - .hash(&mut sha2::Sha256::new()) - .finalize_reset() - .into(), - )), - public_keys: vec![defaults::albert_keypair().to_public()], - threshold: None, - }; - let vp = shell.generate_tx( - TX_UPDATE_ACCOUNT_WASM, - data, - None, - Some(vec![extra_section]), - vec![&defaults::albert_keypair()], - ); - - let vote = shell.generate_tx( - TX_VOTE_PROPOSAL_WASM, - VoteProposalData { - id: 0, - vote: ProposalVote::Yay, - voter: defaults::albert_address(), - delegations: vec![defaults::validator_address()], - }, - None, - None, - vec![&defaults::albert_keypair()], - ); - - let pos = shell.generate_tx( - TX_UNBOND_WASM, - Bond { - validator: defaults::validator_address(), - amount: Amount::native_whole(1000), - source: Some(defaults::albert_address()), - }, - None, - None, - vec![&defaults::albert_keypair()], - ); - - for (signed_tx, bench_name) in [ - foreign_key_write, - transfer, - received_transfer, - vote, - pos, - vp, - ] - .iter() - .zip([ - "foreign_key_write", - "transfer", - "received_transfer", - "governance_vote", - "pos", - "vp", - ]) { - let mut shell = BenchShell::default(); - shell.execute_tx(signed_tx); - let (verifiers, keys_changed) = shell - .state - .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); - - group.bench_function(bench_name, |b| { - b.iter(|| { - let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - )); - assert!( - // NOTE: the wasm code is always in cache so we don't - // include here the cost to read and compile the vp code - run::vp( - vp_code_hash, - signed_tx, - &TxIndex(0), - &defaults::albert_address(), - &shell.state, - &gas_meter, - &keys_changed, - &verifiers, - shell.vp_wasm_cache.clone(), - ) - .unwrap(), - "VP \"{bench_name}\" bench call failed" - ); - }) - }); - } - - group.finish(); -} - fn vp_implicit(c: &mut Criterion) { let mut group = c.benchmark_group("vp_implicit"); @@ -284,17 +139,17 @@ fn vp_implicit(c: &mut Criterion) { } // Run the tx to validate - shell.execute_tx(tx); + let verifiers_from_tx = shell.execute_tx(tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); group.bench_function(bench_name, |b| { b.iter(|| { - let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - )); assert!( run::vp( vp_code_hash, @@ -307,7 +162,7 @@ fn vp_implicit(c: &mut Criterion) { &verifiers, shell.vp_wasm_cache.clone(), ) - .unwrap() + .is_ok() ) }) }); @@ -316,12 +171,12 @@ fn vp_implicit(c: &mut Criterion) { group.finish(); } -fn vp_validator(c: &mut Criterion) { +fn vp_user(c: &mut Criterion) { let shell = BenchShell::default(); let vp_code_hash: Hash = shell .read_storage_key(&Key::wasm_hash(VP_USER_WASM)) .unwrap(); - let mut group = c.benchmark_group("vp_validator"); + let mut group = c.benchmark_group("vp_user"); let foreign_key_write = generate_foreign_key_tx(&defaults::validator_account_keypair()); @@ -436,17 +291,19 @@ fn vp_validator(c: &mut Criterion) { ]) { let mut shell = BenchShell::default(); - shell.execute_tx(signed_tx); + let verifiers_from_tx = shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell .state .write_log() - .verifiers_and_changed_keys(&BTreeSet::default()); + .verifiers_and_changed_keys(&verifiers_from_tx); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); group.bench_function(bench_name, |b| { b.iter(|| { - let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - )); + // NOTE: the wasm code is always in cache so we don't + // include here the cost to read and compile the vp code assert!( run::vp( vp_code_hash, @@ -459,7 +316,7 @@ fn vp_validator(c: &mut Criterion) { &verifiers, shell.vp_wasm_cache.clone(), ) - .unwrap() + .is_ok() ); }) }); @@ -468,5 +325,5 @@ fn vp_validator(c: &mut Criterion) { group.finish(); } -criterion_group!(allowed_vps, vp_user, vp_implicit, vp_validator,); +criterion_group!(allowed_vps, vp_user, vp_implicit); criterion_main!(allowed_vps); diff --git a/crates/benches/wasm_opcodes.rs b/crates/benches/wasm_opcodes.rs new file mode 100644 index 0000000000..b9ce3bfdb2 --- /dev/null +++ b/crates/benches/wasm_opcodes.rs @@ -0,0 +1,767 @@ +//! Module to benchmark the wasm instructions. To do so we: +//! - Generate a benchmark for an empty module to serve as a reference since +//! we expect the function call itself to represent the majority of the +//! cost +//! - All instruction (expect the empty function call) must be repeated a +//! certain amount of time because the default iteratrions of criterion +//! don't apply in this case +//! - Some operations require some other instructions to run correctly, in +//! this case we need to subtract these costs +//! - From all operations we must subtract the cost of the empy function call + +use std::fmt::Display; + +use criterion::{criterion_group, criterion_main, Criterion}; +use lazy_static::lazy_static; +use wasm_instrument::parity_wasm::elements::Instruction::*; +use wasm_instrument::parity_wasm::elements::{ + BlockType, BrTableData, SignExtInstruction, +}; +use wasmer::{imports, Instance, Module, Store, Value}; + +// Don't reduce this value too much or it will be impossible to see the +// differences in execution times between the diffent instructions +const ITERATIONS: u64 = 10_000; +const ENTRY_POINT: &str = "op"; + +lazy_static! { + static ref WASM_OPTS: Vec = vec![ + // Unreachable unconditionally traps, so no need to divide its cost by ITERATIONS because we only execute it once + Unreachable, + Nop, + Block(BlockType::NoResult), + Loop(BlockType::NoResult), + // remove the cost of i32.const and nop + If(BlockType::NoResult), + // Use 0 to exit the current block (branching in a block goes to the end of it, i.e. exits). Remove the cost of block + Br(0u32), + // Use 0 to exit the current block. Remove the cost of block and i32.const + BrIf(0u32), + // If 0 on top of the stack exit the current block. Remove the cost of block and i32.const: + BrTable(Box::new(BrTableData { + table: Box::new([1, 0]), + default: 0u32, + })), + // remove cost of call, i32.const and drop + Return, + // remove the cost of nop + Call(0u32), + // remove cost of i32.const + CallIndirect(0u32, 0u8), + // remove cost of i32.const + Drop, + // remove cost of three i32.const and a drop + Select, + // remove cost of drop + GetLocal(0u32), + // remove the cost of i32.const + SetLocal(0u32), + // remove the cost of i32.const and drop + TeeLocal(0u32), + // remove cost of drop + GetGlobal(0u32), + // remove cost of i32.const + SetGlobal(0u32), + // remove the cost of i32.const and drop + I32Load(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load(0u32, 0u32), + // remove the cost of i32.const and drop + F32Load(0u32, 0u32), + // remove the cost of i32.const and drop + F64Load(0u32, 0u32), + // remove the cost of i32.const and drop + I32Load8S(0u32, 0u32), + // remove the cost of i32.const and drop + I32Load8U(0u32, 0u32), + // remove the cost of i32.const and drop + I32Load16S(0u32, 0u32), + // remove the cost of i32.const and drop + I32Load16U(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load8S(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load8U(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load16S(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load16U(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load32S(0u32, 0u32), + // remove the cost of i32.const and drop + I64Load32U(0u32, 0u32), + // remove the cost of two i32.const + I32Store(0u32, 0u32), + // remove the cost of a i32.const and a i64.const + I64Store(0u32, 0u32), + // remove the cost of a i32.const and a f32.const + F32Store(0u32, 0u32), + // remove the cost of a i32.const and a f64.const + F64Store(0u32, 0u32), + // remove the cost of two i32.const + I32Store8(0u32, 0u32), + // remove the cost of two i32.const + I32Store16(0u32, 0u32), + // remove the cost of a i32.const and a i64.const + I64Store8(0u32, 0u32), + // remove the cost of a i32.const and a i64.const + I64Store16(0u32, 0u32), + // remove the cost of a i32.const and a i64.const + I64Store32(0u32, 0u32), + // remove cost of a drop + CurrentMemory(0u8), + // remove the cost of a i32.const and a drop + GrowMemory(0u8), + // remove the cost of a drop + I32Const(0i32), + // remove the cost of a drop + I64Const(0i64), + // remove the cost of a drop + F32Const(0u32), + // remove the cost of a drop + F64Const(0u64), + // remove the cost of a i32.const and a drop + I32Eqz, + // remove the cost of two i32.const and a drop + I32Eq, + // remove the cost of two i32.const and a drop + I32Ne, + // remove the cost of two i32.const and a drop + I32LtS, + // remove the cost of two i32.const and a drop + I32LtU, + // remove the cost of two i32.const and a drop + I32GtS, + // remove the cost of two i32.const and a drop + I32GtU, + // remove the cost of two i32.const and a drop + I32LeS, + // remove the cost of two i32.const and a drop + I32LeU, + // remove the cost of two i32.const and a drop + I32GeS, + // remove the cost of two i32.const and a drop + I32GeU, + // remove the cost of a i64.const and a drop + I64Eqz, + // remove the cost of two i64.const and a drop + I64Eq, + // remove the cost of two i64.const and a drop + I64Ne, + // remove the cost of two i64.const and a drop + I64LtS, + // remove the cost of two i64.const and a drop + I64LtU, + // remove the cost of two i64.const and a drop + I64GtS, + // remove the cost of two i64.const and a drop + I64GtU, + // remove the cost of two i64.const and a drop + I64LeS, + // remove the cost of two i64.const and a drop + I64LeU, + // remove the cost of two i64.const and a drop + I64GeS, + // remove the cost of two i64.const and a drop + I64GeU, + // remove the cost of two f32.const and a drop + F32Eq, + // remove the cost of two f32.const and a drop + F32Ne, + // remove the cost of two f32.const and a drop + F32Lt, + // remove the cost of two f32.const and a drop + F32Gt, + // remove the cost of two f32.const and a drop + F32Le, + // remove the cost of two f32.const and a drop + F32Ge, + // remove the cost of two f64.const and a drop + F64Eq, + // remove the cost of two f64.const and a drop + F64Ne, + // remove the cost of two f64.const and a drop + F64Lt, + // remove the cost of two f64.const and a drop + F64Gt, + // remove the cost of two f64.const and a drop + F64Le, + // remove the cost of two f64.const and a drop + F64Ge, + // remove the cost of i32.const and a drop + I32Clz, + // remove the cost of i32.const and a drop + I32Ctz, + // remove the cost of i32.const and a drop + I32Popcnt, + // remove the cost of two i32.const and a drop + I32Add, + // remove the cost of two i32.const and a drop + I32Sub, + // remove the cost of two i32.const and a drop + I32Mul, + // remove the cost of two i32.const and a drop + I32DivS, + // remove the cost of two i32.const and a drop + I32DivU, + // remove the cost of two i32.const and a drop + I32RemS, + // remove the cost of two i32.const and a drop + I32RemU, + // remove the cost of two i32.const and a drop + I32And, + // remove the cost of two i32.const and a drop + I32Or, + // remove the cost of two i32.const and a drop + I32Xor, + // remove the cost of two i32.const and a drop + I32Shl, + // remove the cost of two i32.const and a drop + I32ShrS, + // remove the cost of two i32.const and a drop + I32ShrU, + // remove the cost of two i32.const and a drop + I32Rotl, + // remove the cost of two i32.const and a drop + I32Rotr, + // remove cost of i64.const and a drop + I64Clz, + // remove cost of i64.const and a drop + I64Ctz, + // remove cost of i64.const and a drop + I64Popcnt, + // remove cost of two i64.const and a drop + I64Add, + // remove cost of two i64.const and a drop + I64Sub, + // remove cost of two i64.const and a drop + I64Mul, + // remove cost of two i64.const and a drop + I64DivS, + // remove cost of two i64.const and a drop + I64DivU, + // remove cost of two i64.const and a drop + I64RemS, + // remove cost of two i64.const and a drop + I64RemU, + // remove cost of two i64.const and a drop + I64And, + // remove cost of two i64.const and a drop + I64Or, + // remove cost of two i64.const and a drop + I64Xor, + // remove cost of two i64.const and a drop + I64Shl, + // remove cost of two i64.const and a drop + I64ShrS, + // remove cost of two i64.const and a drop + I64ShrU, + // remove cost of two i64.const and a drop + I64Rotl, + // remove cost of two i64.const and a drop + I64Rotr, + // remove cost of a f32.const and a drop + F32Abs, + // remove cost of a f32.const and a drop + F32Neg, + // remove cost of a f32.const and a drop + F32Ceil, + // remove cost of a f32.const and a drop + F32Floor, + // remove cost of a f32.const and a drop + F32Trunc, + // remove cost of a f32.const and a drop + F32Nearest, + // remove cost of a f32.const and a drop + F32Sqrt, + // remove cost of two f32.const and a drop + F32Add, + // remove cost of two f32.const and a drop + F32Sub, + // remove cost of two f32.const and a drop + F32Mul, + // remove cost of two f32.const and a drop + F32Div, + // remove cost of two f32.const and a drop + F32Min, + // remove cost of two f32.const and a drop + F32Max, + // remove cost of two f32.const and a drop + F32Copysign, + // remove cost of a f64.const and a drop + F64Abs, + // remove cost of a f64.const and a drop + F64Neg, + // remove cost of a f64.const and a drop + F64Ceil, + // remove cost of a f64.const and a drop + F64Floor, + // remove cost of a f64.const and a drop + F64Trunc, + // remove cost of a f64.const and a drop + F64Nearest, + // remove cost of a f64.const and a drop + F64Sqrt, + // remove cost of two f64.const and a drop + F64Add, + // remove cost of two f64.const and a drop + F64Sub, + // remove cost of two f64.const and a drop + F64Mul, + // remove cost of two f64.const and a drop + F64Div, + // remove cost of two f64.const and a drop + F64Min, + // remove cost of two f64.const and a drop + F64Max, + // remove cost of two f64.const and a drop + F64Copysign, + // remove the cost of a i64.const and a drop + I32WrapI64, + // remove the cost of a f32.const and a drop + I32TruncSF32, + // remove the cost of a f32.const and a drop + I32TruncUF32, + // remove the cost of a f64.const and a drop + I32TruncSF64, + // remove the cost of a f64.const and a drop + I32TruncUF64, + // remove the cost of a i32.const and a drop + I64ExtendSI32, + // remove the cost of a i32.const and a drop + I64ExtendUI32, + // remove the cost of a f32.const and a drop + I64TruncSF32, + // remove the cost of a f32.const and a drop + I64TruncUF32, + // remove the cost of a f64.const and a drop + I64TruncSF64, + // remove the cost of a f64.const and a drop + I64TruncUF64, + // remove the cost of a i32.const and a drop + F32ConvertSI32, + // remove the cost of a i32.const and a drop + F32ConvertUI32, + // remove the cost of a i64.const and a drop + F32ConvertSI64, + // remove the cost of a i64.const and a drop + F32ConvertUI64, + // remove the cost of a f64.const and a drop + F32DemoteF64, + // remove the cost of a i32.const and a drop + F64ConvertSI32, + // remove the cost of a i32.const and a drop + F64ConvertUI32, + // remove the cost of a i64.const and a drop + F64ConvertSI64, + // remove the cost of a i64.const and a drop + F64ConvertUI64, + // remove the cost of a f32.const and a drop + F64PromoteF32, + // remove the cost of a f32.const and a drop + I32ReinterpretF32, + // remove the cost of a f64.const and a drop + I64ReinterpretF64, + // remove the cost of a i32.const and a drop + F32ReinterpretI32, + // remove the cost of a i64.const and a drop + F64ReinterpretI64, + // remove the cost of a i32.load8_s, a i32.const and a drop + SignExt(SignExtInstruction::I32Extend8S), + // remove the cost of a i32.load16_s, a i32.const and a drop + SignExt(SignExtInstruction::I32Extend16S), + // remove the cost of a i64.load8_s, a i32.const and a drop + SignExt(SignExtInstruction::I64Extend8S), + // remove the cost of a i64.load16_s, a i32.cons and a drop + SignExt(SignExtInstruction::I64Extend16S), + // remove the cost of a i64.load32_s, a i32.const and a drop + SignExt(SignExtInstruction::I64Extend32S), +]; + } + +struct WatBuilder { + wat: String, + instruction: wasm_instrument::parity_wasm::elements::Instruction, +} + +impl Display for WatBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + r#" + (module + (func $f0 nop) + (func $f1 (result i32) i32.const 1 return) + (table 1 funcref) + (elem (i32.const 0) $f0) + (global $iter (mut i32) (i32.const 0)) + (memory 1) + (func (export "{ENTRY_POINT}") (param $local_var i32)"# + )?; + + for _ in 0..ITERATIONS { + writeln!(f, r#"{}"#, self.wat)?; + } + write!(f, r#"))"#) + } +} + +// Use singlepass compiler (the same one used in protocol) to prevent +// optimizations that would compile out the benchmarks since most of them are +// trivial operations +fn get_wasm_store() -> Store { + wasmer::Store::new( + &wasmer_engine_universal::Universal::new( + wasmer_compiler_singlepass::Singlepass::default(), + ) + .engine(), + ) +} + +// An empty wasm module to serve as the base reference for all the other +// instructions since the bigger part of the cost is the function call itself +fn empty_module(c: &mut Criterion) { + let module_wat = format!( + r#" + (module + (func (export "{ENTRY_POINT}") (param $local_var i32)) + ) + "#, + ); + let module = Module::new(&get_wasm_store(), module_wat).unwrap(); + let instance = Instance::new(&module, &imports! {}).unwrap(); + let function = instance.exports.get_function(ENTRY_POINT).unwrap(); + + c.bench_function("empty_module", |b| { + b.iter(|| function.call(&[Value::I32(0)]).unwrap()); + }); +} + +fn ops(c: &mut Criterion) { + let mut group = c.benchmark_group("wasm_opts"); + + for builder in bench_functions() { + let module = + Module::new(&get_wasm_store(), builder.to_string()).unwrap(); + let instance = Instance::new(&module, &imports! {}).unwrap(); + let function = instance.exports.get_function(ENTRY_POINT).unwrap(); + + group.bench_function(format!("{}", builder.instruction), |b| { + if let Unreachable = builder.instruction { + b.iter(|| function.call(&[Value::I32(0)]).unwrap_err()); + } else { + b.iter(|| function.call(&[Value::I32(0)]).unwrap()); + } + }); + } + + group.finish(); +} + +fn bench_functions() -> Vec { + let instructions = + WASM_OPTS + .clone() + .into_iter() + .map(|instruction| match instruction { + Unreachable | Nop => WatBuilder { + wat: format!(r#"{instruction}"#), + instruction, + }, + Block(_) | Loop(_) => WatBuilder { + wat: format!(r#"({instruction})"#), + instruction, + }, + If(_) => WatBuilder { + wat: format!( + r#" + i32.const 1 + ({instruction} + (then + nop + ) + )"# + ), + instruction, + }, + Br(_) => WatBuilder { + wat: format!( + r#" + (block {instruction}) + "# + ), + instruction, + }, + BrIf(_) | BrTable(_) => WatBuilder { + wat: format!( + r#" + (block + i32.const 1 + {instruction} + ) + "# + ), + instruction, + }, + Return => { + // To benchmark the return opcode we need to call a function + // that returns something and then subtract the cost of + // everything. This way we can run the return + // opcode ITERATIONS times + WatBuilder { + wat: r#" + call $f1 + drop + "# + .to_string(), + instruction, + } + } + Call(_) => WatBuilder { + wat: r#" + call $f0 + "# + .to_string(), + instruction, + }, + CallIndirect(_, _) | Drop => WatBuilder { + wat: format!( + r#" + i32.const 0 + {instruction} + "# + ), + instruction, + }, + Select => WatBuilder { + wat: format!( + r#" + i32.const 10 + i32.const 20 + i32.const 0 + {instruction} + drop + "# + ), + instruction, + }, + GetLocal(_) | GetGlobal(_) | CurrentMemory(_) | I32Const(_) + | I64Const(_) | F32Const(_) | F64Const(_) => WatBuilder { + wat: format!( + r#" + {instruction} + drop + "# + ), + instruction, + }, + SetLocal(_) | SetGlobal(_) => WatBuilder { + wat: format!( + r#" + i32.const 10 + {instruction} + "# + ), + instruction, + }, + I32Load(_, _) + | I64Load(_, _) + | F32Load(_, _) + | F64Load(_, _) + | I32Load8S(_, _) + | I32Load8U(_, _) + | I32Load16S(_, _) + | I32Load16U(_, _) + | I64Load8S(_, _) + | I64Load8U(_, _) + | I64Load16S(_, _) + | I64Load16U(_, _) + | I64Load32S(_, _) + | I64Load32U(_, _) + | TeeLocal(_) + | GrowMemory(_) => WatBuilder { + wat: format!( + r#" + i32.const 1 + {instruction} + drop + "# + ), + instruction, + }, + I32Store(_, _) + | I64Store(_, _) + | F32Store(_, _) + | F64Store(_, _) + | I32Store8(_, _) + | I32Store16(_, _) + | I64Store8(_, _) + | I64Store16(_, _) + | I64Store32(_, _) => { + let ty = match instruction { + I32Store(_, _) | I32Store8(_, _) | I32Store16(_, _) => { + "i32" + } + I64Store(_, _) + | I64Store8(_, _) + | I64Store16(_, _) + | I64Store32(_, _) => "i64", + F32Store(_, _) => "f32", + F64Store(_, _) => "f64", + _ => unreachable!(), + }; + + WatBuilder { + wat: format!( + r#" + i32.const 0 + {ty}.const 10000 + {instruction} + "# + ), + instruction, + } + } + I32Eqz | I64Eqz | I32Clz | I32Ctz | I32Popcnt | I64Clz + | I64Ctz | I64Popcnt | F32Abs | F64Abs | F32Neg | F32Ceil + | F32Floor | F32Trunc | F32Nearest | F32Sqrt | F64Neg + | F64Ceil | F64Floor | F64Trunc | F64Nearest | F64Sqrt + | I32WrapI64 | I32TruncSF32 | I32TruncUF32 | I32TruncSF64 + | I32TruncUF64 | I64ExtendSI32 | I64ExtendUI32 + | I64TruncSF32 | I64TruncUF32 | I64TruncSF64 | I64TruncUF64 + | F32ConvertSI32 | F32ConvertUI32 | F32ConvertSI64 + | F32ConvertUI64 | F32DemoteF64 | F64ConvertSI32 + | F64ConvertUI32 | F64ConvertSI64 | F64ConvertUI64 + | F64PromoteF32 | I32ReinterpretF32 | I64ReinterpretF64 + | F32ReinterpretI32 | F64ReinterpretI64 => { + let ty = + match instruction { + I32Eqz | I32Clz | I32Ctz | I32Popcnt => "i32", + I64Eqz | I32WrapI64 => "i64", + I64Clz | I64Ctz | I64Popcnt => "i64", + F32Abs | F32Neg | F32Ceil | F32Floor | F32Trunc + | F32Nearest | F32Sqrt => "f32", + F64Abs | F64Neg | F64Ceil | F64Floor | F64Trunc + | F64Nearest | F64Sqrt => "f64", + I32TruncSF32 | I32TruncUF32 | I32ReinterpretF32 => { + "f32" + } + I32TruncSF64 | I32TruncUF64 => "f64", + I64ExtendSI32 | I64ExtendUI32 => "i32", + I64TruncSF32 | I64TruncUF32 => "f32", + I64TruncSF64 | I64TruncUF64 | I64ReinterpretF64 => { + "f64" + } + F32ConvertSI32 | F32ConvertUI32 + | F32ReinterpretI32 => "i32", + F32ConvertSI64 | F32ConvertUI64 => "i64", + F32DemoteF64 => "f64", + F64ConvertSI32 | F64ConvertUI32 => "i32", + F64ConvertSI64 | F64ConvertUI64 + | F64ReinterpretI64 => "i64", + F64PromoteF32 => "f32", + _ => unreachable!(), + }; + WatBuilder { + wat: format!( + r#" + {ty}.const 1000 + {instruction} + drop + "# + ), + instruction, + } + } + I32Eq | I64Eq | F32Eq | F64Eq | I32Ne | I64Ne | F32Ne + | F64Ne | I32LtS | I64LtS | F32Lt | F64Lt | I32LtU | I32GtS + | I32GtU | I32LeS | I32LeU | I32GeS | I32GeU | I64LtU + | I64GtS | I64GtU | I64LeS | I64LeU | I64GeS | I64GeU + | F32Gt | F32Le | F32Ge | F64Gt | F64Le | F64Ge | I32Add + | I64Add | I32Sub | I64Sub | I32Mul | I32DivS | I32DivU + | I32RemS | I32RemU | I32And | I32Or | I32Xor | I32Shl + | I32ShrS | I32ShrU | I32Rotl | I32Rotr | I64Mul | I64DivS + | I64DivU | I64RemS | I64RemU | I64And | I64Or | I64Xor + | I64Shl | I64ShrS | I64ShrU | I64Rotl | I64Rotr | F32Add + | F32Sub | F32Mul | F32Div | F32Min | F32Max | F32Copysign + | F64Add | F64Sub | F64Mul | F64Div | F64Min | F64Max + | F64Copysign => { + let ty = match instruction { + I32Eq | I32Ne | I32LtS | I32LtU | I32GtS | I32GtU + | I32LeS | I32LeU | I32GeS | I32GeU | I32Add + | I32Sub | I32Mul | I32DivS | I32DivU | I32RemS + | I32RemU | I32And | I32Or | I32Xor | I32Shl + | I32ShrS | I32ShrU | I32Rotl | I32Rotr => "i32", + I64Eq | I64Ne | I64LtS | I64LtU | I64GtS | I64GtU + | I64LeS | I64LeU | I64GeS | I64GeU => "i64", + F32Eq | F32Ne | F32Lt | F32Gt | F32Le | F32Ge => "f32", + F64Eq | F64Ne | F64Lt | F64Gt | F64Le | F64Ge => "f64", + I64Add | I64Sub | I64Mul | I64DivS | I64DivU + | I64RemS | I64RemU | I64And | I64Or | I64Xor + | I64Shl | I64ShrS | I64ShrU | I64Rotl | I64Rotr => { + "i64" + } + F32Add | F32Sub | F32Mul | F32Div | F32Min | F32Max + | F32Copysign => "f32", + F64Add | F64Sub | F64Mul | F64Div | F64Min | F64Max + | F64Copysign => "f64", + _ => unreachable!(), + }; + WatBuilder { + wat: format!( + r#" + {ty}.const 2000 + {ty}.const 1000 + {instruction} + drop + "# + ), + instruction, + } + } + + SignExt(SignExtInstruction::I32Extend8S) + | SignExt(SignExtInstruction::I32Extend16S) + | SignExt(SignExtInstruction::I64Extend8S) + | SignExt(SignExtInstruction::I64Extend16S) + | SignExt(SignExtInstruction::I64Extend32S) => { + let load = match instruction { + SignExt(SignExtInstruction::I32Extend8S) => { + "i32.load8_s" + } + SignExt(SignExtInstruction::I32Extend16S) => { + "i32.load16_s" + } + SignExt(SignExtInstruction::I64Extend8S) => { + "i64.load8_s" + } + SignExt(SignExtInstruction::I64Extend16S) => { + "i64.load16_s" + } + SignExt(SignExtInstruction::I64Extend32S) => { + "i64.load32_s" + } + _ => unreachable!(), + }; + WatBuilder { + wat: format!( + r#" + i32.const 1000 + {load} + {instruction} + drop + "# + ), + instruction, + } + } + _ => { + panic!("Found an instruction not covered by the benchmarks") + } + }); + + instructions.collect() +} + +criterion_group!(wasm_opcodes, ops, empty_module); +criterion_main!(wasm_opcodes); diff --git a/crates/controller/Cargo.toml b/crates/controller/Cargo.toml new file mode 100644 index 0000000000..925766a900 --- /dev/null +++ b/crates/controller/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "namada_controller" +description = "Namada controller" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_core = { path = "../core" } diff --git a/crates/controller/src/lib.rs b/crates/controller/src/lib.rs new file mode 100644 index 0000000000..4f752ac16e --- /dev/null +++ b/crates/controller/src/lib.rs @@ -0,0 +1,94 @@ +use namada_core::dec::Dec; +use namada_core::uint::Uint; + +#[derive(Clone, Debug)] +pub struct PDController { + total_native_amount: Uint, + max_reward_rate: Dec, + last_inflation_amount: Uint, + p_gain_nom: Dec, + d_gain_nom: Dec, + epochs_per_year: u64, + target_metric: Dec, + last_metric: Dec, +} + +impl PDController { + #[allow(clippy::too_many_arguments)] + pub fn new( + total_native_amount: Uint, + max_reward_rate: Dec, + last_inflation_amount: Uint, + p_gain_nom: Dec, + d_gain_nom: Dec, + epochs_per_year: u64, + target_metric: Dec, + last_metric: Dec, + ) -> PDController { + PDController { + total_native_amount, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_metric, + last_metric, + } + } + + pub fn compute_inflation( + &self, + control_coeff: Dec, + current_metric: Dec, + ) -> Uint { + let control = self.compute_control(control_coeff, current_metric); + self.compute_inflation_aux(control) + } + + pub fn get_total_native_dec(&self) -> Dec { + Dec::try_from(self.total_native_amount) + .expect("Should not fail to convert Uint to Dec") + } + + pub fn get_epochs_per_year(&self) -> u64 { + self.epochs_per_year + } + + fn get_max_inflation(&self) -> Uint { + let total_native = self.get_total_native_dec(); + let epochs_py: Dec = self.epochs_per_year.into(); + + let max_inflation = total_native * self.max_reward_rate / epochs_py; + max_inflation + .to_uint() + .expect("Should not fail to convert Dec to Uint") + } + + // TODO: could possibly use I256 instead of Dec here (need to account for + // negative vals) + fn compute_inflation_aux(&self, control: Dec) -> Uint { + let last_inflation_amount = Dec::try_from(self.last_inflation_amount) + .expect("Should not fail to convert Uint to Dec"); + let new_inflation_amount = last_inflation_amount + control; + let new_inflation_amount = if new_inflation_amount.is_negative() { + Uint::zero() + } else { + new_inflation_amount + .to_uint() + .expect("Should not fail to convert Dec to Uint") + }; + + let max_inflation = self.get_max_inflation(); + std::cmp::min(new_inflation_amount, max_inflation) + } + + // NOTE: This formula is the comactification of all the old intermediate + // computations that were done in multiple steps (as in the specs) + fn compute_control(&self, coeff: Dec, current_metric: Dec) -> Dec { + let val = current_metric * (self.d_gain_nom - self.p_gain_nom) + + (self.target_metric * self.p_gain_nom) + - (self.last_metric * self.d_gain_nom); + coeff * val + } +} diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index f6a7a4ff5f..14f29989d8 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -29,6 +29,7 @@ migrations = [ "namada_migrations", "linkme", ] +benches = ["proptest"] [dependencies] namada_macros = {path = "../macros"} @@ -47,6 +48,7 @@ ibc.workspace = true ics23.workspace = true impl-num-traits = "0.1.2" index-set.workspace = true +indexmap.workspace = true k256.workspace = true linkme = {workspace = true, optional = true} masp_primitives.workspace = true diff --git a/crates/core/src/account.rs b/crates/core/src/account.rs index 3d0eedf35a..66fb9094fa 100644 --- a/crates/core/src/account.rs +++ b/crates/core/src/account.rs @@ -1,6 +1,6 @@ //! Account types -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use borsh::{BorshDeserialize, BorshSerialize}; use namada_macros::BorshDeserializer; @@ -9,6 +9,7 @@ use namada_migrations::*; use serde::{Deserialize, Serialize}; use super::key::{common, RefTo}; +use crate::collections::HashMap; use crate::hints; #[derive( diff --git a/crates/core/src/address.rs b/crates/core/src/address.rs index 71d66ac664..b4427a8bd5 100644 --- a/crates/core/src/address.rs +++ b/crates/core/src/address.rs @@ -8,7 +8,6 @@ use std::hash::Hash; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -60,12 +59,18 @@ pub const POS_SLASH_POOL: Address = Address::Internal(InternalAddress::PosSlashPool); /// Internal Governance address pub const GOV: Address = Address::Internal(InternalAddress::Governance); +/// Internal Public Goods funding address +pub const PGF: Address = Address::Internal(InternalAddress::Pgf); /// Internal MASP address pub const MASP: Address = Address::Internal(InternalAddress::Masp); /// Internal Multitoken address pub const MULTITOKEN: Address = Address::Internal(InternalAddress::Multitoken); /// Internal Eth bridge address pub const ETH_BRIDGE: Address = Address::Internal(InternalAddress::EthBridge); +/// Address with temporary storage is used to pass data from txs to VPs which is +/// never committed to DB +pub const TEMP_STORAGE: Address = + Address::Internal(InternalAddress::TempStorage); /// Error from decoding address from string pub type DecodeError = string_encoding::DecodeError; @@ -135,6 +140,9 @@ impl From> for Address { InternalAddress::IbcToken(IbcTokenHash(*raw_addr.data())), ), raw::Discriminant::Masp => Address::Internal(InternalAddress::Masp), + raw::Discriminant::TempStorage => { + Address::Internal(InternalAddress::TempStorage) + } } } } @@ -229,6 +237,11 @@ impl<'addr> From<&'addr Address> for raw::Address<'addr, raw::Validated> { .validate() .expect("This raw address is valid") } + Address::Internal(InternalAddress::TempStorage) => { + raw::Address::from_discriminant(raw::Discriminant::TempStorage) + .validate() + .expect("This raw address is valid") + } } } } @@ -303,6 +316,11 @@ impl Address { pub fn is_implicit(&self) -> bool { matches!(self, Address::Implicit(_)) } + + /// If the address internal? + pub fn is_internal(&self) -> bool { + matches!(self, Address::Internal(_)) + } } impl string_encoding::Format for Address { @@ -472,14 +490,14 @@ impl EstablishedAddressGen { &mut self, rng_source: impl AsRef<[u8]>, ) -> Address { - let gen_bytes = self.serialize_to_vec(); - let bytes = [&gen_bytes, rng_source.as_ref()].concat(); - let full_hash = Sha256::digest(&bytes); - // take first 20 bytes of the hash - let mut hash: [u8; HASH_LEN] = Default::default(); - hash.copy_from_slice(&full_hash[..HASH_LEN]); - self.last_hash = full_hash.into(); - Address::Established(EstablishedAddress { hash }) + self.last_hash = { + let mut hasher_state = Sha256::new(); + hasher_state.update(self.last_hash); + hasher_state.update(rng_source); + hasher_state.finalize() + } + .into(); + Address::Established(self.last_hash.into()) } } @@ -557,6 +575,9 @@ pub enum InternalAddress { Pgf, /// Masp Masp, + /// Address with temporary storage is used to pass data from txs to VPs + /// which is never committed to DB + TempStorage, } impl Display for InternalAddress { @@ -578,6 +599,7 @@ impl Display for InternalAddress { Self::Multitoken => "Multitoken".to_string(), Self::Pgf => "PublicGoodFundings".to_string(), Self::Masp => "MASP".to_string(), + Self::TempStorage => "TempStorage".to_string(), } ) } @@ -600,6 +622,7 @@ impl InternalAddress { #[cfg(test)] pub mod tests { + use borsh_ext::BorshSerializeExt; use proptest::prelude::*; use super::*; @@ -666,17 +689,13 @@ pub fn gen_established_address(seed: impl AsRef) -> Address { use rand::prelude::ThreadRng; use rand::{thread_rng, RngCore}; - let mut key_gen = EstablishedAddressGen::new(seed); + EstablishedAddressGen::new(seed).generate_address({ + let mut thread_local_rng: ThreadRng = thread_rng(); + let mut buffer = [0u8; 32]; - let mut rng: ThreadRng = thread_rng(); - let mut rng_bytes = [0u8; 32]; - rng.fill_bytes(&mut rng_bytes[..]); - let rng_source = rng_bytes - .iter() - .map(|b| format!("{:02X}", b)) - .collect::>() - .join(""); - key_gen.generate_address(rng_source) + thread_local_rng.fill_bytes(&mut buffer[..]); + buffer + }) } /// Generate a new established address. Unlike `gen_established_address`, this @@ -687,13 +706,12 @@ pub fn gen_deterministic_established_address(seed: impl AsRef) -> Address { } /// Helpers for testing with addresses. -#[cfg(any(test, feature = "testing"))] +#[cfg(any(test, feature = "testing", feature = "benches"))] pub mod testing { - use std::collections::HashMap; - use proptest::prelude::*; use super::*; + use crate::collections::HashMap; use crate::key::*; use crate::token::Denomination; @@ -802,8 +820,9 @@ pub mod testing { InternalAddress::Nut(_) => {} InternalAddress::Pgf => {} InternalAddress::Masp => {} - InternalAddress::Multitoken => {} /* Add new addresses in the - * `prop_oneof` below. */ + InternalAddress::Multitoken => {} + InternalAddress::TempStorage => {} /* Add new addresses in the + * `prop_oneof` below. */ }; prop_oneof![ Just(InternalAddress::PoS), @@ -819,6 +838,7 @@ pub mod testing { Just(InternalAddress::Multitoken), Just(InternalAddress::Pgf), Just(InternalAddress::Masp), + Just(InternalAddress::TempStorage), ] } diff --git a/crates/core/src/address/raw.rs b/crates/core/src/address/raw.rs index d975d219f9..c252559655 100644 --- a/crates/core/src/address/raw.rs +++ b/crates/core/src/address/raw.rs @@ -65,6 +65,8 @@ pub enum Discriminant { IbcToken = 13, /// MASP raw address. Masp = 14, + /// Temporary storage address. + TempStorage = 15, } /// Raw address representation. diff --git a/crates/core/src/booleans.rs b/crates/core/src/booleans.rs new file mode 100644 index 0000000000..8b96e2a2aa --- /dev/null +++ b/crates/core/src/booleans.rs @@ -0,0 +1,62 @@ +//! Boolean related functionality. + +/// Extend [`bool`] values with the possibility to create +/// [`Result`] values of unit and some error type. +pub trait BoolResultUnitExt { + /// Return `Ok(())` if true, or `error` if false. + fn ok_or(self, error: E) -> Result<(), E>; + + /// Return `Ok(())` if true, or the value returned by + /// `handle_err` if false. + fn ok_or_else(self, handle_err: F) -> Result<(), E> + where + F: FnOnce() -> E; +} + +impl BoolResultUnitExt for bool { + #[inline] + fn ok_or(self, error: E) -> Result<(), E> { + if self { Ok(()) } else { Err(error) } + } + + #[inline] + fn ok_or_else(self, handle_err: F) -> Result<(), E> + where + F: FnOnce() -> E, + { + if self { Ok(()) } else { Err(handle_err()) } + } +} + +/// Extend [`Result`] of [`bool`] values with the possibility to +/// create [`Result`] values of unit and some error type. +pub trait ResultBoolExt { + /// Return `Ok(())` if `Ok(true)`, `Err(error)` if `Ok(false)` + /// or pass back the error if `Err(_)`. + fn true_or(self, error: E) -> Result<(), E>; + + /// Return `Ok(())` if `Ok(true)`, `Err(handle_err())` if `Ok(false)` + /// or pass back the error if `Err(_)`. + fn true_or_else(self, handle_err: F) -> Result<(), E> + where + F: FnOnce() -> E; +} + +impl ResultBoolExt for Result { + /// Return `Ok(())` if `Ok(true)`, `Err(error)` if `Ok(false)` + /// or pass back the error if `Err(_)`. + #[inline] + fn true_or(self, error: E) -> Result<(), E> { + self.and_then(|ok| ok.ok_or(error)) + } + + /// Return `Ok(())` if `Ok(true)`, `Err(handle_err())` if `Ok(false)` + /// or pass back the error if `Err(_)`. + #[inline] + fn true_or_else(self, handle_err: F) -> Result<(), E> + where + F: FnOnce() -> E, + { + self.and_then(|ok| ok.ok_or_else(handle_err)) + } +} diff --git a/crates/core/src/ethereum_events.rs b/crates/core/src/ethereum_events.rs index 61d42c6f15..ec6984ac42 100644 --- a/crates/core/src/ethereum_events.rs +++ b/crates/core/src/ethereum_events.rs @@ -453,7 +453,7 @@ pub mod tests { #[allow(missing_docs)] /// Test helpers -#[cfg(any(test, feature = "testing"))] +#[cfg(any(test, feature = "testing", feature = "benches"))] pub mod testing { use proptest::prop_compose; diff --git a/crates/core/src/event.rs b/crates/core/src/event.rs index 3ae864aa7b..a49cd0f491 100644 --- a/crates/core/src/event.rs +++ b/crates/core/src/event.rs @@ -1,6 +1,7 @@ //! Ledger events -use std::collections::HashMap; +pub mod extend; + use std::fmt::{self, Display}; use std::ops::{Index, IndexMut}; use std::str::FromStr; @@ -11,18 +12,40 @@ use namada_migrations::*; use thiserror::Error; use crate::borsh::{BorshDeserialize, BorshSerialize}; +use crate::collections::HashMap; use crate::ethereum_structs::{BpTransferStatus, EthBridgeEvent}; use crate::ibc::IbcEvent; /// Used in sub-systems that may emit events. pub trait EmitEvents { - /// Emit an event - fn emit(&mut self, value: Event); + /// Emit a single [event](Event). + fn emit(&mut self, event: E) + where + E: Into; + + /// Emit a batch of [events](Event). + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: Into; } impl EmitEvents for Vec { - fn emit(&mut self, value: Event) { - Vec::push(self, value) + #[inline] + fn emit(&mut self, event: E) + where + E: Into, + { + self.push(event.into()); + } + + /// Emit a batch of [events](Event). + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: Into, + { + self.extend(event_batch.into_iter().map(Into::into)); } } @@ -44,6 +67,19 @@ pub enum EventLevel { Tx, } +impl Display for EventLevel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + match self { + EventLevel::Block => "block", + EventLevel::Tx => "tx", + } + ) + } +} + /// Custom events that can be queried from Tendermint /// using a websocket client #[derive( @@ -65,6 +101,13 @@ pub struct Event { pub attributes: HashMap, } +impl Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // TODO: print attributes, too + write!(f, "{} in {}", self.event_type, self.level) + } +} + /// The two types of custom events we currently use #[derive( Clone, @@ -76,11 +119,10 @@ pub struct Event { BorshDeserializer, )] pub enum EventType { - /// The transaction was accepted to be included in a block - Accepted, /// The transaction was applied during block finalization Applied, /// The IBC transaction was applied during block finalization + // TODO: create type-safe wrapper for all ibc event kinds Ibc(String), /// The proposal that has been executed Proposal, @@ -93,7 +135,6 @@ pub enum EventType { impl Display for EventType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - EventType::Accepted => write!(f, "accepted"), EventType::Applied => write!(f, "applied"), EventType::Ibc(t) => write!(f, "{}", t), EventType::Proposal => write!(f, "proposal"), @@ -109,16 +150,16 @@ impl FromStr for EventType { fn from_str(s: &str) -> Result { match s { - "accepted" => Ok(EventType::Accepted), "applied" => Ok(EventType::Applied), "proposal" => Ok(EventType::Proposal), "pgf_payments" => Ok(EventType::PgfPayment), - // IBC + // "update_client" => Ok(EventType::Ibc("update_client".to_string())), "send_packet" => Ok(EventType::Ibc("send_packet".to_string())), "write_acknowledgement" => { Ok(EventType::Ibc("write_acknowledgement".to_string())) } + // "ethereum_bridge" => Ok(EventType::EthereumBridge), _ => Err(EventError::InvalidEventType), } @@ -143,6 +184,15 @@ pub enum EventError { } impl Event { + /// Create an applied tx event with empty attributes. + pub fn applied_tx() -> Self { + Self { + event_type: EventType::Applied, + level: EventLevel::Tx, + attributes: HashMap::new(), + } + } + /// Check if the events keys contains a given string pub fn contains_key(&self, key: &str) -> bool { self.attributes.contains_key(key) @@ -153,6 +203,16 @@ impl Event { pub fn get(&self, key: &str) -> Option<&String> { self.attributes.get(key) } + + /// Extend this [`Event`] with additional data. + #[inline] + pub fn extend(&mut self, data: DATA) -> &mut Self + where + DATA: extend::ExtendEvent, + { + data.extend_event(self); + self + } } impl From for Event { @@ -196,10 +256,8 @@ impl Index<&str> for Event { impl IndexMut<&str> for Event { fn index_mut(&mut self, index: &str) -> &mut Self::Output { - if !self.attributes.contains_key(index) { - self.attributes.insert(String::from(index), String::new()); - } - self.attributes.get_mut(index).unwrap() + let entry = self.attributes.entry(index.into()).or_default(); + &mut *entry } } diff --git a/crates/core/src/event/extend.rs b/crates/core/src/event/extend.rs new file mode 100644 index 0000000000..768090eb56 --- /dev/null +++ b/crates/core/src/event/extend.rs @@ -0,0 +1,180 @@ +//! Extend [events](Event) with additional fields. + +use super::*; +use crate::hash::Hash; +use crate::storage::BlockHeight; + +/// Provides event composition routines. +pub trait ComposeEvent { + /// Compose an [event](Event) with new data. + fn with(self, data: NEW) -> CompositeEvent + where + Self: Sized; +} + +impl ComposeEvent for E +where + E: Into, +{ + #[inline(always)] + fn with(self, data: NEW) -> CompositeEvent { + CompositeEvent::new(self, data) + } +} + +/// Event composed of various other event extensions. +#[derive(Clone, Debug)] +pub struct CompositeEvent { + base_event: E, + data: DATA, +} + +impl CompositeEvent { + /// Create a new composed event. + pub const fn new(base_event: E, data: DATA) -> Self { + Self { base_event, data } + } +} + +impl From> for Event +where + E: Into, + DATA: ExtendEvent, +{ + #[inline] + fn from(composite: CompositeEvent) -> Event { + let CompositeEvent { base_event, data } = composite; + + let mut base_event = base_event.into(); + data.extend_event(&mut base_event); + + base_event + } +} + +/// Extend an [event](Event) with additional fields. +pub trait ExtendEvent { + /// Add additional fields to the specified `event`. + fn extend_event(self, event: &mut Event); +} + +/// Leaves an [`Event`] as is. +pub struct WithNoOp; + +impl ExtendEvent for WithNoOp { + #[inline] + fn extend_event(self, _: &mut Event) {} +} + +/// Extend an [`Event`] with block height information. +pub struct Height(pub BlockHeight); + +impl ExtendEvent for Height { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(height) = self; + event["height"] = height.to_string(); + } +} + +/// Extend an [`Event`] with transaction hash information. +pub struct TxHash(pub Hash); + +impl ExtendEvent for TxHash { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(hash) = self; + event["hash"] = hash.to_string(); + } +} + +/// Extend an [`Event`] with log data. +pub struct Log(pub String); + +impl ExtendEvent for Log { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(log) = self; + event["log"] = log; + } +} + +/// Extend an [`Event`] with info data. +pub struct Info(pub String); + +impl ExtendEvent for Info { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(info) = self; + event["info"] = info; + } +} + +/// Extend an [`Event`] with `is_valid_masp_tx` data. +pub struct ValidMaspTx(pub usize); + +impl ExtendEvent for ValidMaspTx { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(masp_tx_index) = self; + event["is_valid_masp_tx"] = masp_tx_index.to_string(); + } +} + +#[cfg(test)] +mod event_composition_tests { + use super::*; + + #[test] + fn test_event_compose_basic() { + let expected_attrs = { + let mut attrs = HashMap::new(); + attrs.insert("log".to_string(), "this is sparta!".to_string()); + attrs.insert("height".to_string(), "300".to_string()); + attrs.insert("hash".to_string(), Hash::default().to_string()); + attrs + }; + + let base_event: Event = Event::applied_tx() + .with(Log("this is sparta!".to_string())) + .with(Height(300.into())) + .with(TxHash(Hash::default())) + .into(); + + assert_eq!(base_event.attributes, expected_attrs); + } + + #[test] + fn test_event_compose_repeated() { + let expected_attrs = { + let mut attrs = HashMap::new(); + attrs.insert("log".to_string(), "dejavu".to_string()); + attrs + }; + + let base_event: Event = Event::applied_tx() + .with(Log("dejavu".to_string())) + .with(Log("dejavu".to_string())) + .with(Log("dejavu".to_string())) + .into(); + + assert_eq!(base_event.attributes, expected_attrs); + } + + #[test] + fn test_event_compose_last_one_kept() { + let expected_attrs = { + let mut attrs = HashMap::new(); + attrs.insert("log".to_string(), "last".to_string()); + attrs + }; + + let base_event: Event = Event::applied_tx() + .with(Log("fist".to_string())) + .with(Log("second".to_string())) + .with(Log("last".to_string())) + .into(); + + assert_eq!(base_event.attributes, expected_attrs); + } +} diff --git a/crates/core/src/ibc.rs b/crates/core/src/ibc.rs index 42d97f014d..1b880b779d 100644 --- a/crates/core/src/ibc.rs +++ b/crates/core/src/ibc.rs @@ -1,12 +1,10 @@ //! IBC-related data types use std::cmp::Ordering; -use std::collections::HashMap; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use borsh_ext::BorshSerializeExt; -use data_encoding::{DecodePartial, HEXLOWER, HEXLOWER_PERMISSIVE, HEXUPPER}; +use data_encoding::{DecodePartial, HEXLOWER, HEXLOWER_PERMISSIVE}; pub use ibc::*; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -14,21 +12,39 @@ use namada_migrations::*; use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::address::HASH_LEN; -use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; -use crate::ibc::apps::transfer::types::{Memo, PrefixedDenom, TracePath}; +use super::address::{Address, InternalAddress, HASH_LEN}; +use crate::collections::HashMap; +use crate::ibc::apps::nft_transfer::context::{NftClassContext, NftContext}; +use crate::ibc::apps::nft_transfer::types::error::NftTransferError; +use crate::ibc::apps::nft_transfer::types::msgs::transfer::MsgTransfer as IbcMsgNftTransfer; +use crate::ibc::apps::nft_transfer::types::{ + ClassData, ClassId, ClassUri, PrefixedClassId, TokenData, TokenId, + TokenUri, TracePath as NftTracePath, +}; +use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; +use crate::ibc::apps::transfer::types::{PrefixedDenom, TracePath}; +use crate::ibc::core::channel::types::msgs::{ + MsgAcknowledgement as IbcMsgAcknowledgement, + MsgRecvPacket as IbcMsgRecvPacket, MsgTimeout as IbcMsgTimeout, +}; use crate::ibc::core::handler::types::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, }; +use crate::ibc::core::handler::types::msgs::MsgEnvelope; use crate::ibc::primitives::proto::Protobuf; -use crate::masp::PaymentAddress; use crate::tendermint::abci::Event as AbciEvent; use crate::token::Transfer; /// The event type defined in ibc-rs for receiving a token pub const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; -/// The event type defined in ibc-rs for IBC denom -pub const EVENT_TYPE_DENOM_TRACE: &str = "denomination_trace"; +/// The event type defined in ibc-rs for receiving an NFT +pub const EVENT_TYPE_NFT_PACKET: &str = "non_fungible_token_packet"; +/// The event attribute key defined in ibc-rs for receiving result +pub const EVENT_ATTRIBUTE_SUCCESS: &str = "success"; +/// The event attribute value defined in ibc-rs for receiving success +pub const EVENT_VALUE_SUCCESS: &str = "true"; +/// The escrow address for IBC transfer +pub const IBC_ESCROW_ADDRESS: Address = Address::Internal(InternalAddress::Ibc); /// IBC token hash derived from a denomination. #[derive( @@ -115,39 +131,85 @@ impl std::fmt::Display for IbcEvent { } } -/// IBC transfer message to send from a shielded address +/// The different variants of an Ibc message +pub enum IbcMessage { + /// Ibc Envelop + Envelope(Box), + /// Ibc transaprent transfer + Transfer(MsgTransfer), + /// NFT transfer + NftTransfer(MsgNftTransfer), + /// Receiving a packet + RecvPacket(MsgRecvPacket), + /// Acknowledgement + AckPacket(MsgAcknowledgement), + /// Timeout + Timeout(MsgTimeout), +} + +/// IBC transfer message with `Transfer` #[derive(Debug, Clone)] -pub struct MsgShieldedTransfer { +pub struct MsgTransfer { /// IBC transfer message - pub message: MsgTransfer, - /// MASP tx with token transfer - pub shielded_transfer: IbcShieldedTransfer, + pub message: IbcMsgTransfer, + /// Shieleded transfer for MASP transaction + pub transfer: Option, } -impl BorshSerialize for MsgShieldedTransfer { +impl BorshSerialize for MsgTransfer { fn serialize( &self, writer: &mut W, ) -> std::io::Result<()> { let encoded_msg = self.message.clone().encode_vec(); - let members = (encoded_msg, self.shielded_transfer.clone()); + let members = (encoded_msg, self.transfer.clone()); BorshSerialize::serialize(&members, writer) } } -impl BorshDeserialize for MsgShieldedTransfer { +impl BorshDeserialize for MsgTransfer { fn deserialize_reader( reader: &mut R, ) -> std::io::Result { use std::io::{Error, ErrorKind}; - let (msg, shielded_transfer): (Vec, IbcShieldedTransfer) = + let (msg, transfer): (Vec, Option) = BorshDeserialize::deserialize_reader(reader)?; - let message = MsgTransfer::decode_vec(&msg) + let message = IbcMsgTransfer::decode_vec(&msg) .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; - Ok(Self { - message, - shielded_transfer, - }) + Ok(Self { message, transfer }) + } +} + +/// IBC NFT transfer message with `Transfer` +#[derive(Debug, Clone)] +pub struct MsgNftTransfer { + /// IBC NFT transfer message + pub message: IbcMsgNftTransfer, + /// Shieleded transfer for MASP transaction + pub transfer: Option, +} + +impl BorshSerialize for MsgNftTransfer { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + let encoded_msg = self.message.clone().encode_vec(); + let members = (encoded_msg, self.transfer.clone()); + BorshSerialize::serialize(&members, writer) + } +} + +impl BorshDeserialize for MsgNftTransfer { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let (msg, transfer): (Vec, Option) = + BorshDeserialize::deserialize_reader(reader)?; + let message = IbcMsgNftTransfer::decode_vec(&msg) + .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; + Ok(Self { message, transfer }) } } @@ -160,6 +222,107 @@ pub struct IbcShieldedTransfer { pub masp_tx: masp_primitives::transaction::Transaction, } +/// IBC receiving packet message with `Transfer` +#[derive(Debug, Clone)] +pub struct MsgRecvPacket { + /// IBC receiving packet message + pub message: IbcMsgRecvPacket, + /// Shieleded transfer for MASP transaction + pub transfer: Option, +} + +impl BorshSerialize for MsgRecvPacket { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + let encoded_msg = self.message.clone().encode_vec(); + let members = (encoded_msg, self.transfer.clone()); + BorshSerialize::serialize(&members, writer) + } +} + +impl BorshDeserialize for MsgRecvPacket { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let (msg, transfer): (Vec, Option) = + BorshDeserialize::deserialize_reader(reader)?; + let message = IbcMsgRecvPacket::decode_vec(&msg) + .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; + Ok(Self { message, transfer }) + } +} + +/// IBC acknowledgement message with `Transfer` for refunding to a shielded +/// address +#[derive(Debug, Clone)] +pub struct MsgAcknowledgement { + /// IBC acknowledgement message + pub message: IbcMsgAcknowledgement, + /// Shieleded transfer for MASP transaction + pub transfer: Option, +} + +impl BorshSerialize for MsgAcknowledgement { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + let encoded_msg = self.message.clone().encode_vec(); + let members = (encoded_msg, self.transfer.clone()); + BorshSerialize::serialize(&members, writer) + } +} + +impl BorshDeserialize for MsgAcknowledgement { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let (msg, transfer): (Vec, Option) = + BorshDeserialize::deserialize_reader(reader)?; + let message = IbcMsgAcknowledgement::decode_vec(&msg) + .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; + Ok(Self { message, transfer }) + } +} + +/// IBC timeout packet message with `Transfer` for refunding to a shielded +/// address +#[derive(Debug, Clone)] +pub struct MsgTimeout { + /// IBC timeout message + pub message: IbcMsgTimeout, + /// Shieleded transfer for MASP transaction + pub transfer: Option, +} + +impl BorshSerialize for MsgTimeout { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + let encoded_msg = self.message.clone().encode_vec(); + let members = (encoded_msg, self.transfer.clone()); + BorshSerialize::serialize(&members, writer) + } +} + +impl BorshDeserialize for MsgTimeout { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let (msg, transfer): (Vec, Option) = + BorshDeserialize::deserialize_reader(reader)?; + let message = IbcMsgTimeout::decode_vec(&msg) + .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; + Ok(Self { message, transfer }) + } +} + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -206,47 +369,207 @@ pub fn is_ibc_denom(denom: impl AsRef) -> Option<(TracePath, String)> { )) } -impl From for Memo { - fn from(shielded: IbcShieldedTransfer) -> Self { - let bytes = shielded.serialize_to_vec(); - HEXUPPER.encode(&bytes).into() +/// Returns the trace path and the token string if the trace is an NFT one +pub fn is_nft_trace( + trace: impl AsRef, +) -> Option<(NftTracePath, String, String)> { + // The trace should be {port}/{channel}/.../{class_id}/{token_id} + if let Some((class_id, token_id)) = trace.as_ref().rsplit_once('/') { + let prefixed_class_id = PrefixedClassId::from_str(class_id).ok()?; + // The base token isn't decoded because it could be non Namada token + Some(( + prefixed_class_id.trace_path, + prefixed_class_id.base_class_id.to_string(), + token_id.to_string(), + )) + } else { + None } } -impl TryFrom for IbcShieldedTransfer { - type Error = Error; +/// NFT class +#[derive(Clone, Debug)] +pub struct NftClass { + /// NFT class ID + pub class_id: PrefixedClassId, + /// NFT class URI + pub class_uri: Option, + /// NFT class data + pub class_data: Option, +} - fn try_from(memo: Memo) -> Result { - let bytes = HEXUPPER - .decode(memo.as_ref().as_bytes()) - .map_err(Error::DecodingHex)?; - Self::try_from_slice(&bytes).map_err(Error::DecodingShieldedTransfer) +impl BorshSerialize for NftClass { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + BorshSerialize::serialize(&self.class_id.to_string(), writer)?; + match &self.class_uri { + Some(uri) => { + BorshSerialize::serialize(&true, writer)?; + BorshSerialize::serialize(&uri.to_string(), writer)?; + } + None => BorshSerialize::serialize(&false, writer)?, + } + match &self.class_data { + Some(data) => { + BorshSerialize::serialize(&true, writer)?; + BorshSerialize::serialize(&data.to_string(), writer) + } + None => BorshSerialize::serialize(&false, writer), + } } } -/// Get the shielded transfer from the memo -pub fn get_shielded_transfer( - event: &IbcEvent, -) -> Result> { - if event.event_type != EVENT_TYPE_PACKET { - // This event is not for receiving a token - return Ok(None); +impl BorshDeserialize for NftClass { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let class_id: String = BorshDeserialize::deserialize_reader(reader)?; + let class_id = class_id.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?; + + let is_uri: bool = BorshDeserialize::deserialize_reader(reader)?; + let class_uri = if is_uri { + let uri_str: String = BorshDeserialize::deserialize_reader(reader)?; + Some(uri_str.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?) + } else { + None + }; + + let is_data: bool = BorshDeserialize::deserialize_reader(reader)?; + let class_data = if is_data { + let data_str: String = + BorshDeserialize::deserialize_reader(reader)?; + Some(data_str.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?) + } else { + None + }; + + Ok(Self { + class_id, + class_uri, + class_data, + }) } - let is_success = - event.attributes.get("success") == Some(&"true".to_string()); - let receiver = event.attributes.get("receiver"); - let is_shielded = if let Some(receiver) = receiver { - PaymentAddress::from_str(receiver).is_ok() - } else { - false - }; - if !is_success || !is_shielded { - return Ok(None); +} + +impl NftClassContext for NftClass { + fn get_id(&self) -> &ClassId { + &self.class_id.base_class_id + } + + fn get_uri(&self) -> Option<&ClassUri> { + self.class_uri.as_ref() + } + + fn get_data(&self) -> Option<&ClassData> { + self.class_data.as_ref() + } +} + +/// NFT metadata +#[derive(Clone, Debug)] +pub struct NftMetadata { + /// NFT class ID + pub class_id: PrefixedClassId, + /// NFT ID + pub token_id: TokenId, + /// NFT URI + pub token_uri: Option, + /// NFT data + pub token_data: Option, +} + +impl BorshSerialize for NftMetadata { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + BorshSerialize::serialize(&self.class_id.to_string(), writer)?; + BorshSerialize::serialize(&self.token_id.to_string(), writer)?; + match &self.token_uri { + Some(uri) => { + BorshSerialize::serialize(&true, writer)?; + BorshSerialize::serialize(&uri.to_string(), writer)?; + } + None => BorshSerialize::serialize(&false, writer)?, + } + match &self.token_data { + Some(data) => { + BorshSerialize::serialize(&true, writer)?; + BorshSerialize::serialize(&data.to_string(), writer) + } + None => BorshSerialize::serialize(&false, writer), + } } +} - event - .attributes - .get("memo") - .map(|memo| IbcShieldedTransfer::try_from(Memo::from(memo.clone()))) - .transpose() +impl BorshDeserialize for NftMetadata { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let class_id: String = BorshDeserialize::deserialize_reader(reader)?; + let class_id = class_id.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?; + + let token_id: String = BorshDeserialize::deserialize_reader(reader)?; + let token_id = token_id.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?; + + let is_uri: bool = BorshDeserialize::deserialize_reader(reader)?; + let token_uri = if is_uri { + let uri_str: String = BorshDeserialize::deserialize_reader(reader)?; + Some(uri_str.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?) + } else { + None + }; + + let is_data: bool = BorshDeserialize::deserialize_reader(reader)?; + let token_data = if is_data { + let data_str: String = + BorshDeserialize::deserialize_reader(reader)?; + Some(data_str.parse().map_err(|e: NftTransferError| { + Error::new(ErrorKind::InvalidData, e.to_string()) + })?) + } else { + None + }; + + Ok(Self { + class_id, + token_id, + token_uri, + token_data, + }) + } +} + +impl NftContext for NftMetadata { + fn get_class_id(&self) -> &ClassId { + &self.class_id.base_class_id + } + + fn get_id(&self) -> &TokenId { + &self.token_id + } + + fn get_uri(&self) -> Option<&TokenUri> { + self.token_uri.as_ref() + } + + fn get_data(&self) -> Option<&TokenData> { + self.token_data.as_ref() + } } diff --git a/crates/core/src/internal.rs b/crates/core/src/internal.rs index 3399823f4b..5b6cca8440 100644 --- a/crates/core/src/internal.rs +++ b/crates/core/src/internal.rs @@ -39,6 +39,23 @@ impl HostEnvResult { pub fn is_fail(int: i64) -> bool { int == Self::Fail.to_i64() } + + /// Expect [`HostEnvResult::Success`]. + pub fn success_or_else(int: i64, or_else: F) -> Result<(), E> + where + F: FnOnce() -> E, + { + if Self::is_success(int) { + Ok(()) + } else { + Err(or_else()) + } + } + + /// Expect [`HostEnvResult::Success`]. + pub fn success_or(int: i64, or_else: E) -> Result<(), E> { + Self::success_or_else(int, || or_else) + } } impl From for HostEnvResult { diff --git a/crates/core/src/key/common.rs b/crates/core/src/key/common.rs index f73e99a3e8..a88132b010 100644 --- a/crates/core/src/key/common.rs +++ b/crates/core/src/key/common.rs @@ -36,13 +36,16 @@ use crate::{impl_display_and_from_str_via_format, string_encoding}; BorshDeserializer, BorshSchema, )] -pub enum PublicKey { +pub enum CommonPublicKey { /// Encapsulate Ed25519 public keys Ed25519(ed25519::PublicKey), /// Encapsulate Secp256k1 public keys Secp256k1(secp256k1::PublicKey), } +/// Public key +pub type PublicKey = CommonPublicKey; + const ED25519_PK_PREFIX: &str = "ED25519_PK_PREFIX"; const SECP256K1_PK_PREFIX: &str = "SECP256K1_PK_PREFIX"; @@ -311,13 +314,16 @@ impl FromStr for SecretKey { BorshDeserializer, BorshSchema, )] -pub enum Signature { +pub enum CommonSignature { /// Encapsulate Ed25519 signatures Ed25519(ed25519::Signature), /// Encapsulate Secp256k1 signatures Secp256k1(secp256k1::Signature), } +/// Signature +pub type Signature = CommonSignature; + impl string_encoding::Format for Signature { type EncodedBytes<'a> = Vec; diff --git a/crates/core/src/key/mod.rs b/crates/core/src/key/mod.rs index 23af1c05a6..fdfe0195d5 100644 --- a/crates/core/src/key/mod.rs +++ b/crates/core/src/key/mod.rs @@ -487,7 +487,7 @@ impl SignableBytes for &crate::keccak::KeccakHash { } /// Helpers for testing with keys. -#[cfg(any(test, feature = "testing"))] +#[cfg(any(test, feature = "testing", feature = "benches"))] pub mod testing { use proptest::prelude::*; use rand::prelude::{StdRng, ThreadRng}; diff --git a/crates/core/src/ledger/mod.rs b/crates/core/src/ledger/mod.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 667aae67aa..0af456c454 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -17,8 +17,25 @@ pub mod borsh { pub use borsh_ext::*; } +#[allow(missing_docs)] +pub mod collections { + //! Re-exports of collection types. + + pub mod hash_map { + pub use indexmap::map::{IndexMap as HashMap, *}; + } + + pub mod hash_set { + pub use indexmap::set::{IndexSet as HashSet, *}; + } + + pub use hash_map::HashMap; + pub use hash_set::HashSet; +} + pub mod account; pub mod address; +pub mod booleans; pub mod chain; pub mod dec; pub mod eth_abi; diff --git a/crates/core/src/parameters.rs b/crates/core/src/parameters.rs index 38840e3fd3..f63cd10877 100644 --- a/crates/core/src/parameters.rs +++ b/crates/core/src/parameters.rs @@ -8,7 +8,6 @@ use namada_migrations::*; use super::address::Address; use super::chain::ProposalBytes; -use super::dec::Dec; use super::hash::Hash; use super::time::DurationSecs; use super::token; @@ -49,16 +48,14 @@ pub struct Parameters { pub epochs_per_year: u64, /// Maximum number of signature per transaction pub max_signatures_per_transaction: u8, - /// PoS staked ratio (read + write for every epoch) - pub staked_ratio: Dec, - /// PoS inflation amount from the last epoch (read + write for every epoch) - pub pos_inflation_amount: token::Amount, /// Fee unshielding gas limit pub fee_unshielding_gas_limit: u64, /// Fee unshielding descriptions limit pub fee_unshielding_descriptions_limit: u64, /// Map of the cost per gas unit for every token allowed for fee payment pub minimum_gas_price: BTreeMap, + /// Enable the native token transfer if it is true + pub is_native_token_transferable: bool, } /// Epoch duration. A new epoch begins as soon as both the `min_num_of_blocks` diff --git a/crates/core/src/storage.rs b/crates/core/src/storage.rs index 51f5ce3d53..5023ff9f4a 100644 --- a/crates/core/src/storage.rs +++ b/crates/core/src/storage.rs @@ -95,6 +95,8 @@ pub enum DbColFam { STATE, /// Diffs DIFFS, + /// Diffs for rollback (only kept for 1 block) + ROLLBACK, /// Replay protection REPLAYPROT, } @@ -103,6 +105,8 @@ pub enum DbColFam { pub const SUBSPACE_CF: &str = "subspace"; /// Diffs column family name pub const DIFFS_CF: &str = "diffs"; +/// Diffs for rollback (only kept for 1 block) column family name +pub const ROLLBACK_CF: &str = "rollback"; /// State column family name pub const STATE_CF: &str = "state"; /// Block column family name @@ -118,6 +122,7 @@ impl DbColFam { DbColFam::BLOCK => BLOCK_CF, DbColFam::STATE => STATE_CF, DbColFam::DIFFS => DIFFS_CF, + DbColFam::ROLLBACK => ROLLBACK_CF, DbColFam::REPLAYPROT => REPLAY_PROTECTION_CF, } } @@ -130,6 +135,7 @@ impl FromStr for DbColFam { match s.to_lowercase().as_str() { SUBSPACE_CF => Ok(Self::SUBSPACE), DIFFS_CF => Ok(Self::DIFFS), + ROLLBACK_CF => Ok(Self::ROLLBACK), STATE_CF => Ok(Self::STATE), REPLAY_PROTECTION_CF => Ok(Self::REPLAYPROT), BLOCK_CF => Ok(Self::BLOCK), @@ -376,6 +382,11 @@ impl BlockHeight { pub fn prev_height(&self) -> BlockHeight { BlockHeight(self.0 - 1) } + + /// Get the height of the previous block if it won't underflow + pub fn checked_prev(&self) -> Option { + Some(BlockHeight(self.0.checked_sub(1)?)) + } } impl TryFrom<&[u8]> for BlockHash { @@ -653,6 +664,14 @@ impl Key { self.iter_addresses().cloned().collect() } + /// Returns the address from the first key segment if it's an address. + pub fn fst_address(&self) -> Option<&Address> { + self.segments.first().and_then(|s| match s { + DbKeySeg::AddressSeg(addr) => Some(addr), + DbKeySeg::StringSeg(_) => None, + }) + } + /// Iterates over all addresses in the key segments pub fn iter_addresses<'k, 'this: 'k>( &'this self, @@ -1565,6 +1584,9 @@ pub struct IndexedTx { pub height: BlockHeight, /// The index in the block of the tx pub index: TxIndex, + /// A transcation can have up to two shielded transfers. + /// This indicates if the wrapper contained a shielded transfer. + pub is_wrapper: bool, } #[cfg(test)] diff --git a/crates/core/src/time.rs b/crates/core/src/time.rs index 2cffc8188f..38f07ccaf6 100644 --- a/crates/core/src/time.rs +++ b/crates/core/src/time.rs @@ -155,7 +155,10 @@ impl Display for DateTimeUtc { impl DateTimeUtc { /// Returns a DateTimeUtc which corresponds to the current date. pub fn now() -> Self { - Self(Utc::now()) + Self( + #[allow(clippy::disallowed_methods)] + Utc::now(), + ) } /// Returns a [`DateTimeUtc`] corresponding to the provided Unix timestamp. diff --git a/crates/core/src/validity_predicate.rs b/crates/core/src/validity_predicate.rs index 1ea8b3b494..3b5b25eb3a 100644 --- a/crates/core/src/validity_predicate.rs +++ b/crates/core/src/validity_predicate.rs @@ -1,36 +1,31 @@ //! Types that are used in validity predicates. -/// Sentinel used in validity predicates to signal events that require special -/// replay protection handling back to the protocol. -#[derive(Debug, Default)] -pub enum VpSentinel { - /// No action required - #[default] - None, - /// Exceeded gas limit - OutOfGas, - /// Found invalid transaction signature - InvalidSignature, -} +use thiserror::Error; -impl VpSentinel { - /// Check if the Vp ran out of gas - pub fn is_out_of_gas(&self) -> bool { - matches!(self, Self::OutOfGas) - } +use crate::borsh::{BorshDeserialize, BorshSerialize}; - /// Check if the Vp found an invalid signature - pub fn is_invalid_signature(&self) -> bool { - matches!(self, Self::InvalidSignature) - } +/// Helper trait for converting between result types. +pub trait VpErrorExtResult { + /// Convert to a [`Result`] with [`VpError`] errors. + fn into_vp_error(self) -> Result; +} - /// Set the sentinel for an out of gas error - pub fn set_out_of_gas(&mut self) { - *self = Self::OutOfGas +impl VpErrorExtResult for Result +where + E: core::fmt::Display, +{ + #[inline] + fn into_vp_error(self) -> Result { + self.map_err(|err| VpError::Erased(err.to_string())) } +} - /// Set the sentinel for an invalid signature error - pub fn set_invalid_signature(&mut self) { - *self = Self::InvalidSignature - } +/// Error result returned by validity predicates. +#[allow(missing_docs)] +#[derive(Debug, Error, BorshSerialize, BorshDeserialize)] +pub enum VpError { + #[error("Transaction rejected")] + Unspecified, + #[error("{0}")] + Erased(String), // type erased error } diff --git a/crates/encoding_spec/Cargo.toml b/crates/encoding_spec/Cargo.toml index a8ce18b607..487b7916da 100644 --- a/crates/encoding_spec/Cargo.toml +++ b/crates/encoding_spec/Cargo.toml @@ -14,6 +14,7 @@ version.workspace = true [features] default = [] +namada-eth-bridge = ["namada/namada-eth-bridge"] [dependencies] namada = { path = "../namada", features = ["rand", "tendermint-rpc", "download-params"] } diff --git a/crates/encoding_spec/src/main.rs b/crates/encoding_spec/src/main.rs index 70bb8db141..ea5858ae3f 100644 --- a/crates/encoding_spec/src/main.rs +++ b/crates/encoding_spec/src/main.rs @@ -15,7 +15,7 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; use std::io::Write; use borsh::schema::{BorshSchemaContainer, Declaration, Definition}; @@ -25,6 +25,7 @@ use lazy_static::lazy_static; use madato::types::TableRow; use namada::account; use namada::core::address::Address; +use namada::core::collections::HashSet; use namada::core::key::ed25519::{PublicKey, Signature}; use namada::core::storage::{self, Epoch}; use namada::core::token; diff --git a/crates/ethereum_bridge/Cargo.toml b/crates/ethereum_bridge/Cargo.toml index c74d8a3e8f..281113b984 100644 --- a/crates/ethereum_bridge/Cargo.toml +++ b/crates/ethereum_bridge/Cargo.toml @@ -14,6 +14,7 @@ version.workspace = true [features] default = [] +namada-eth-bridge = [] testing = [ "namada_account", "namada_core/testing", diff --git a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 088806bffa..a8dc6ede33 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -1,7 +1,6 @@ -use std::collections::{HashMap, HashSet}; - use eyre::Result; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::keccak::keccak_hash; use namada_core::key::{common, SignableEthMessage}; use namada_core::storage::BlockHeight; diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index 818ec66f10..873a8e0375 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -1,11 +1,12 @@ //! Logic for acting on events -use std::collections::{BTreeSet, HashSet}; +use std::collections::BTreeSet; use std::str::FromStr; use borsh::BorshDeserialize; use eyre::{Result, WrapErr}; use namada_core::address::Address; +use namada_core::collections::HashSet; use namada_core::eth_abi::Encode; use namada_core::eth_bridge_pool::{ erc20_nut_address, erc20_token_address, PendingTransfer, @@ -355,7 +356,7 @@ where balance.spend(&pending_transfer.gas_fee.amount) })?; state.delete(&key)?; - _ = pending_keys.remove(&key); + _ = pending_keys.swap_remove(&key); _ = changed_keys.insert(key); _ = changed_keys.insert(pool_balance_key); _ = changed_keys.insert(relayer_rewards_key); @@ -568,11 +569,10 @@ where #[cfg(test)] mod tests { - use std::collections::HashMap; - use assert_matches::assert_matches; use namada_core::address::gen_established_address; use namada_core::address::testing::{gen_implicit_address, nam, wnam}; + use namada_core::collections::HashMap; use namada_core::eth_bridge_pool::GasFee; use namada_core::ethereum_events::testing::{ arbitrary_keccak_hash, arbitrary_nonce, DAI_ERC20_ETH_ADDRESS, diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index 99dec94cb4..6ad568aa3f 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -3,12 +3,13 @@ mod eth_msgs; mod events; -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::BTreeSet; use borsh::BorshDeserialize; use eth_msgs::EthMsgUpdate; use eyre::Result; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::ethereum_events::EthereumEvent; use namada_core::ethereum_structs::EthBridgeEvent; use namada_core::key::common; diff --git a/crates/ethereum_bridge/src/protocol/transactions/utils.rs b/crates/ethereum_bridge/src/protocol/transactions/utils.rs index 5f57f11849..995df8125d 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/utils.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/utils.rs @@ -1,8 +1,9 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; use eyre::eyre; use itertools::Itertools; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::storage::BlockHeight; use namada_core::token; use namada_proof_of_stake::pos_queries::PosQueries; diff --git a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs index 13de4cd87a..de1f817705 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs @@ -1,9 +1,8 @@ //! Code for handling validator set update protocol txs. -use std::collections::{HashMap, HashSet}; - use eyre::Result; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::key::common; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::Amount; diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes.rs b/crates/ethereum_bridge/src/protocol/transactions/votes.rs index 3e49cba9d3..91388206dd 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes.rs @@ -1,11 +1,12 @@ //! Logic and data types relating to tallying validators' votes for pieces of //! data stored in the ledger, where those pieces of data should only be acted //! on once they have received enough votes -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use eyre::{eyre, Result}; use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token; use namada_core::voting_power::FractionalVotingPower; diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs index d2dc8c9dc5..a1da975531 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -1,8 +1,9 @@ -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::BTreeSet; use borsh::BorshDeserialize; use eyre::{eyre, Result}; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::storage::BlockHeight; use namada_core::token; use namada_proof_of_stake::pos_queries::PosQueries; @@ -60,7 +61,7 @@ impl NewVotes { let mut inner = self.inner; let mut removed = HashSet::default(); for voter in voters { - if inner.remove(voter).is_some() { + if inner.swap_remove(voter).is_some() { removed.insert(voter); } } @@ -69,7 +70,7 @@ impl NewVotes { } impl IntoIterator for NewVotes { - type IntoIter = std::collections::hash_set::IntoIter; + type IntoIter = namada_core::collections::hash_set::IntoIter; type Item = (Address, BlockHeight, token::Amount); fn into_iter(self) -> Self::IntoIter { diff --git a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs index a63326ab4c..59b093eea4 100644 --- a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs +++ b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs @@ -6,7 +6,9 @@ use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_vote_ext::validator_set_update; use super::VoteExtensionError; -use crate::storage::eth_bridge_queries::EthBridgeQueries; +use crate::storage::eth_bridge_queries::{ + is_bridge_comptime_enabled, EthBridgeQueries, +}; /// Validates a validator set update vote extension issued at the /// epoch provided as an argument. @@ -35,13 +37,21 @@ where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { + let signing_epoch = ext.data.signing_epoch; + if !is_bridge_comptime_enabled() { + tracing::debug!( + vext_epoch = ?signing_epoch, + "The Ethereum bridge was not enabled when the validator set \ + update's vote extension was cast", + ); + return Err(VoteExtensionError::EthereumBridgeInactive); + } if state.in_mem().last_block.is_none() { tracing::debug!( "Dropping validator set update vote extension issued at genesis" ); return Err(VoteExtensionError::UnexpectedBlockHeight); } - let signing_epoch = ext.data.signing_epoch; if signing_epoch > last_epoch { tracing::debug!( vext_epoch = ?signing_epoch, @@ -137,12 +147,19 @@ mod tests { use namada_vote_ext::validator_set_update::{EthAddrBook, VotingPowersMap}; use super::*; + use crate::storage::eth_bridge_queries::is_bridge_comptime_enabled; use crate::test_utils; /// Test that we reject vote extensions containing a superset of the /// next validator set in storage. #[test] fn test_superset_valsetupd_rejected() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + let (state, keys) = test_utils::setup_default_storage(); let (validator, validator_stake) = test_utils::default_validator(); diff --git a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs index 5875078638..3d54fd8ab8 100644 --- a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs +++ b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs @@ -25,6 +25,40 @@ use namada_vote_ext::validator_set_update::{ use crate::storage::proof::BridgePoolRootProof; use crate::storage::{active_key, bridge_pool, vote_tallies, whitelist}; +/// Check if the Ethereum Bridge has been enabled at compile time. +pub const fn is_bridge_comptime_enabled() -> bool { + cfg!(feature = "namada-eth-bridge") +} + +/// Check if the bridge is disabled, enabled, or scheduled to be +/// enabled at a specified [`Epoch`]. +pub fn check_bridge_status( + storage: &S, +) -> namada_storage::Result { + if !is_bridge_comptime_enabled() { + return Ok(EthBridgeStatus::Disabled); + } + let status = storage + .read(&active_key())? + .expect("The Ethereum bridge active key should be in storage"); + Ok(status) +} + +/// Returns a boolean indicating whether the bridge is +/// currently active at the specified [`Epoch`]. +pub fn is_bridge_active_at( + storage: &S, + queried_epoch: Epoch, +) -> namada_storage::Result { + Ok(match check_bridge_status(storage)? { + EthBridgeStatus::Disabled => false, + EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis) => true, + EthBridgeStatus::Enabled(EthBridgeEnabled::AtEpoch(enabled_epoch)) => { + queried_epoch >= enabled_epoch + } + }) +} + /// This enum is used as a parameter to /// [`EthBridgeQueriesHook::must_send_valset_upd`]. pub enum SendValsetUpd { @@ -145,36 +179,31 @@ where /// Check if the bridge is disabled, enabled, or /// scheduled to be enabled at a specified epoch. + #[inline] pub fn check_bridge_status(self) -> EthBridgeStatus { - BorshDeserialize::try_from_slice( - self.state - .read_bytes(&active_key()) - .expect( - "Reading the Ethereum bridge active key shouldn't fail.", - ) - .expect("The Ethereum bridge active key should be in storage") - .as_slice(), + check_bridge_status(self.state).expect( + "Failed to read Ethereum bridge activation status from storage", ) - .expect("Deserializing the Ethereum bridge active key shouldn't fail.") } /// Returns a boolean indicating whether the bridge is /// currently active. #[inline] pub fn is_bridge_active(self) -> bool { - self.is_bridge_active_at(self.state.in_mem().get_current_epoch().0) + is_bridge_active_at( + self.state, + self.state.in_mem().get_current_epoch().0, + ) + .expect("Failed to read Ethereum bridge activation status from storage") } /// Behaves exactly like [`Self::is_bridge_active`], but performs /// the check at the given [`Epoch`]. + #[inline] pub fn is_bridge_active_at(self, queried_epoch: Epoch) -> bool { - match self.check_bridge_status() { - EthBridgeStatus::Disabled => false, - EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis) => true, - EthBridgeStatus::Enabled(EthBridgeEnabled::AtEpoch( - enabled_epoch, - )) => queried_epoch >= enabled_epoch, - } + is_bridge_active_at(self.state, queried_epoch).expect( + "Failed to read Ethereum bridge activation status from storage", + ) } /// Get the nonce of the next transfers to Namada event to be processed. @@ -267,7 +296,11 @@ where /// extension at the provided [`BlockHeight`] in [`SendValsetUpd`]. #[inline] pub fn must_send_valset_upd(self, can_send: SendValsetUpd) -> bool { - if matches!(can_send, SendValsetUpd::AtPrevHeight) { + if !is_bridge_comptime_enabled() { + // the bridge is disabled at compile time, therefore + // we must never submit validator set updates + false + } else if matches!(can_send, SendValsetUpd::AtPrevHeight) { // when checking vote extensions in Prepare // and ProcessProposal, we simply return true true diff --git a/crates/ethereum_bridge/src/storage/parameters.rs b/crates/ethereum_bridge/src/storage/parameters.rs index 36121b7a6b..4d01d38ed6 100644 --- a/crates/ethereum_bridge/src/storage/parameters.rs +++ b/crates/ethereum_bridge/src/storage/parameters.rs @@ -1,7 +1,6 @@ //! Parameters for configuring the Ethereum bridge use std::num::NonZeroU64; -use eyre::{eyre, Result}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::ethereum_events::EthAddress; use namada_core::ethereum_structs; @@ -11,7 +10,7 @@ use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_state::{DBIter, StorageHasher, WlState, DB}; -use namada_storage::{StorageRead, StorageWrite}; +use namada_storage::{Error, Result, StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; use super::whitelist; @@ -332,17 +331,10 @@ where S: StorageRead, { let native_erc20 = bridge_storage::native_erc20_key(); - match StorageRead::read(storage, &native_erc20) { - Ok(Some(eth_address)) => Ok(eth_address), - Ok(None) => { - Err(eyre!("The Ethereum bridge storage is not initialized")) - } - Err(e) => Err(eyre!( - "Failed to read storage when fetching the native ERC20 address \ - with: {}", - e.to_string() - )), - } + + storage.read(&native_erc20)?.ok_or_else(|| { + Error::SimpleMessage("The Ethereum bridge storage is not initialized") + }) } /// Reads the value of `key` from `storage` and deserializes it, or panics @@ -371,8 +363,10 @@ where #[cfg(test)] mod tests { use namada_state::testing::TestState; + use namada_storage::ResultExt; use super::*; + use crate::storage::eth_bridge_queries::is_bridge_comptime_enabled; /// Ensure we can serialize and deserialize a [`Config`] struct to and from /// TOML. This can fail if complex fields are ordered before simple fields @@ -391,8 +385,9 @@ mod tests { }, }, }; - let serialized = toml::to_string(&config)?; - let deserialized: EthereumBridgeParams = toml::from_str(&serialized)?; + let serialized = toml::to_string(&config).into_storage_result()?; + let deserialized: EthereumBridgeParams = + toml::from_str(&serialized).into_storage_result()?; assert_eq!(config, deserialized); Ok(()) @@ -400,6 +395,12 @@ mod tests { #[test] fn test_ethereum_bridge_config_read_write_storage() { + if !is_bridge_comptime_enabled() { + // NOTE: this test doesn't work if the ethereum bridge + // is disabled at compile time. + return; + } + let mut state = TestState::default(); let config = EthereumBridgeParams { erc20_whitelist: vec![], @@ -430,6 +431,7 @@ mod tests { } #[test] + #[cfg_attr(not(feature = "namada-eth-bridge"), ignore)] #[should_panic(expected = "Could not read")] fn test_ethereum_bridge_config_storage_corrupt() { let mut state = TestState::default(); @@ -456,6 +458,7 @@ mod tests { } #[test] + #[cfg_attr(not(feature = "namada-eth-bridge"), ignore)] #[should_panic( expected = "Ethereum bridge appears to be only partially configured!" )] diff --git a/crates/ethereum_bridge/src/storage/proof.rs b/crates/ethereum_bridge/src/storage/proof.rs index 2f5895f363..4fee82a3ae 100644 --- a/crates/ethereum_bridge/src/storage/proof.rs +++ b/crates/ethereum_bridge/src/storage/proof.rs @@ -1,9 +1,8 @@ //! Proofs over some arbitrary data. -use std::collections::HashMap; - use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use ethers::abi::Tokenizable; +use namada_core::collections::HashMap; use namada_core::eth_abi::Encode; use namada_core::ethereum_events::Uint; use namada_core::keccak::KeccakHash; diff --git a/crates/ethereum_bridge/src/test_utils.rs b/crates/ethereum_bridge/src/test_utils.rs index 28d42921a6..cc678a79b2 100644 --- a/crates/ethereum_bridge/src/test_utils.rs +++ b/crates/ethereum_bridge/src/test_utils.rs @@ -1,11 +1,11 @@ //! Test utilities for the Ethereum bridge crate. -use std::collections::HashMap; use std::num::NonZeroU64; use namada_account::protocol_pk_key; use namada_core::address::testing::wnam; use namada_core::address::{self, Address}; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; diff --git a/crates/gas/src/event.rs b/crates/gas/src/event.rs new file mode 100644 index 0000000000..25a7fb06e7 --- /dev/null +++ b/crates/gas/src/event.rs @@ -0,0 +1,17 @@ +//! Gas related events. + +use namada_core::event::extend::ExtendEvent; +use namada_core::event::Event; + +use super::Gas; + +/// Extend an [`Event`] with gas used data. +pub struct WithGasUsed(pub Gas); + +impl ExtendEvent for WithGasUsed { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(gas_used) = self; + event["gas_used"] = gas_used.to_string(); + } +} diff --git a/crates/gas/src/lib.rs b/crates/gas/src/lib.rs index f950209b96..9c651e0cfb 100644 --- a/crates/gas/src/lib.rs +++ b/crates/gas/src/lib.rs @@ -1,10 +1,14 @@ //! Gas accounting module to track the gas usage in a block for transactions and //! validity predicates triggered by transactions. +pub mod event; +pub mod storage; + use std::fmt::Display; use std::ops::Div; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::hints; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; @@ -22,47 +26,56 @@ pub enum Error { GasOverflow, } -const COMPILE_GAS_PER_BYTE: u64 = 24; +const COMPILE_GAS_PER_BYTE: u64 = 1_955; const PARALLEL_GAS_DIVIDER: u64 = 10; -const WASM_CODE_VALIDATION_GAS_PER_BYTE: u64 = 1; -const WRAPPER_TX_VALIDATION_GAS: u64 = 58_371; +const WASM_CODE_VALIDATION_GAS_PER_BYTE: u64 = 67; +const WRAPPER_TX_VALIDATION_GAS: u64 = 3_245_500; const STORAGE_OCCUPATION_GAS_PER_BYTE: u64 = - 100 + PHYSICAL_STORAGE_LATENCY_PER_BYTE; + 100_000 + PHYSICAL_STORAGE_LATENCY_PER_BYTE; // NOTE: this accounts for the latency of a physical drive access. For read // accesses we have no way to tell if data was in cache or in storage. Moreover, // the latency shouldn't really be accounted per single byte but rather per // storage blob but this would make it more tedious to compute gas in the // codebase. For these two reasons we just set an arbitrary value (based on // actual SSDs latency) per byte here -const PHYSICAL_STORAGE_LATENCY_PER_BYTE: u64 = 75; +const PHYSICAL_STORAGE_LATENCY_PER_BYTE: u64 = 1_000_000; // This is based on the global average bandwidth -const NETWORK_TRANSMISSION_GAS_PER_BYTE: u64 = 13; +const NETWORK_TRANSMISSION_GAS_PER_BYTE: u64 = 848; /// The cost of accessing data from memory (both read and write mode), per byte -pub const MEMORY_ACCESS_GAS_PER_BYTE: u64 = 2; +pub const MEMORY_ACCESS_GAS_PER_BYTE: u64 = 104; /// The cost of accessing data from storage, per byte pub const STORAGE_ACCESS_GAS_PER_BYTE: u64 = - 3 + PHYSICAL_STORAGE_LATENCY_PER_BYTE; + 163 + PHYSICAL_STORAGE_LATENCY_PER_BYTE; /// The cost of writing data to storage, per byte pub const STORAGE_WRITE_GAS_PER_BYTE: u64 = - MEMORY_ACCESS_GAS_PER_BYTE + 848 + STORAGE_OCCUPATION_GAS_PER_BYTE; + MEMORY_ACCESS_GAS_PER_BYTE + 69_634 + STORAGE_OCCUPATION_GAS_PER_BYTE; /// The cost of verifying a single signature of a transaction -pub const VERIFY_TX_SIG_GAS: u64 = 9_793; +pub const VERIFY_TX_SIG_GAS: u64 = 594_290; /// The cost for requesting one more page in wasm (64KiB) pub const WASM_MEMORY_PAGE_GAS: u32 = MEMORY_ACCESS_GAS_PER_BYTE as u32 * 64 * 1_024; /// The cost to validate an Ibc action -pub const IBC_ACTION_VALIDATE_GAS: u64 = 7_511; +pub const IBC_ACTION_VALIDATE_GAS: u64 = 1_472_023; /// The cost to execute an Ibc action -pub const IBC_ACTION_EXECUTE_GAS: u64 = 47_452; -/// The cost to execute a masp tx verification -pub const MASP_VERIFY_SHIELDED_TX_GAS: u64 = 62_381_957; +pub const IBC_ACTION_EXECUTE_GAS: u64 = 3_678_745; +/// The cost to execute an ibc transaction TODO: remove once ibc tx goes back to +/// wasm +pub const IBC_TX_GAS: u64 = 111_825_500; +/// The cost to verify a masp spend note +pub const MASP_VERIFY_SPEND_GAS: u64 = 66_822_000; +/// The cost to verify a masp convert note +pub const MASP_VERIFY_CONVERT_GAS: u64 = 45_240_000; +/// The cost to verify a masp output note +pub const MASP_VERIFY_OUTPUT_GAS: u64 = 55_023_000; +/// The cost to run the final masp verification +pub const MASP_VERIFY_FINAL_GAS: u64 = 3_475_200; /// Gas module result for functions that may fail pub type Result = std::result::Result; /// Decimal scale of Gas units -const SCALE: u64 = 10_000; +const SCALE: u64 = 100_000_000; /// Representation of gas in sub-units. This effectively decouples gas metering /// from fee payment, allowing higher resolution when accounting for gas while, @@ -186,6 +199,8 @@ pub trait GasMetering { /// Gas metering in a transaction #[derive(Debug)] pub struct TxGasMeter { + /// Track gas overflow + gas_overflow: bool, /// The gas limit for a transaction pub tx_gas_limit: Gas, transaction_gas: Gas, @@ -194,6 +209,8 @@ pub struct TxGasMeter { /// Gas metering in a validity predicate #[derive(Debug, Clone)] pub struct VpGasMeter { + /// Track gas overflow + gas_overflow: bool, /// The transaction gas limit tx_gas_limit: Gas, /// The gas consumed by the transaction before the Vp @@ -221,10 +238,19 @@ pub struct VpsGas { impl GasMetering for TxGasMeter { fn consume(&mut self, gas: u64) -> Result<()> { + if self.gas_overflow { + hints::cold(); + return Err(Error::GasOverflow); + } + self.transaction_gas = self .transaction_gas .checked_add(gas.into()) - .ok_or(Error::GasOverflow)?; + .ok_or_else(|| { + hints::cold(); + self.gas_overflow = true; + Error::GasOverflow + })?; if self.transaction_gas > self.tx_gas_limit { return Err(Error::TransactionGasExceededError); @@ -234,7 +260,12 @@ impl GasMetering for TxGasMeter { } fn get_tx_consumed_gas(&self) -> Gas { - self.transaction_gas + if !self.gas_overflow { + self.transaction_gas + } else { + hints::cold(); + u64::MAX.into() + } } fn get_gas_limit(&self) -> Gas { @@ -247,6 +278,7 @@ impl TxGasMeter { /// wrapper transaction pub fn new(tx_gas_limit: impl Into) -> Self { Self { + gas_overflow: false, tx_gas_limit: tx_gas_limit.into(), transaction_gas: Gas::default(), } @@ -256,6 +288,7 @@ impl TxGasMeter { /// units pub fn new_from_sub_limit(tx_gas_limit: Gas) -> Self { Self { + gas_overflow: false, tx_gas_limit, transaction_gas: Gas::default(), } @@ -295,10 +328,17 @@ impl TxGasMeter { impl GasMetering for VpGasMeter { fn consume(&mut self, gas: u64) -> Result<()> { - self.current_gas = self - .current_gas - .checked_add(gas.into()) - .ok_or(Error::GasOverflow)?; + if self.gas_overflow { + hints::cold(); + return Err(Error::GasOverflow); + } + + self.current_gas = + self.current_gas.checked_add(gas.into()).ok_or_else(|| { + hints::cold(); + self.gas_overflow = true; + Error::GasOverflow + })?; let current_total = self .initial_gas @@ -313,7 +353,12 @@ impl GasMetering for VpGasMeter { } fn get_tx_consumed_gas(&self) -> Gas { - self.initial_gas + if !self.gas_overflow { + self.initial_gas + } else { + hints::cold(); + u64::MAX.into() + } } fn get_gas_limit(&self) -> Gas { @@ -325,6 +370,7 @@ impl VpGasMeter { /// Initialize a new VP gas meter from the `TxGasMeter` pub fn new_from_tx_meter(tx_gas_meter: &TxGasMeter) -> Self { Self { + gas_overflow: false, tx_gas_limit: tx_gas_meter.tx_gas_limit, initial_gas: tx_gas_meter.transaction_gas, current_gas: Gas::default(), @@ -399,10 +445,11 @@ mod tests { proptest! { #[test] fn test_vp_gas_meter_add(gas in 0..BLOCK_GAS_LIMIT) { - let tx_gas_meter = TxGasMeter { - tx_gas_limit: BLOCK_GAS_LIMIT.into(), - transaction_gas: Gas::default(), - }; + let tx_gas_meter = TxGasMeter { + gas_overflow: false, + tx_gas_limit: BLOCK_GAS_LIMIT.into(), + transaction_gas: Gas::default(), + }; let mut meter = VpGasMeter::new_from_tx_meter(&tx_gas_meter); meter.consume(gas).expect("cannot add the gas"); } @@ -412,6 +459,7 @@ mod tests { #[test] fn test_vp_gas_overflow() { let tx_gas_meter = TxGasMeter { + gas_overflow: false, tx_gas_limit: BLOCK_GAS_LIMIT.into(), transaction_gas: (TX_GAS_LIMIT - 1).into(), }; @@ -425,6 +473,7 @@ mod tests { #[test] fn test_vp_gas_limit() { let tx_gas_meter = TxGasMeter { + gas_overflow: false, tx_gas_limit: TX_GAS_LIMIT.into(), transaction_gas: (TX_GAS_LIMIT - 1).into(), }; diff --git a/crates/gas/src/storage.rs b/crates/gas/src/storage.rs new file mode 100644 index 0000000000..e86a08c8a8 --- /dev/null +++ b/crates/gas/src/storage.rs @@ -0,0 +1,15 @@ +//! Gas storage keys + +use namada_core::storage::Key; + +const ERROR_MSG: &str = "Cannot obtain a valid db key"; + +/// Get the transaction hash prefix under the `all` subkey +pub fn pred_prefix() -> Key { + Key::parse("pred/gas").expect(ERROR_MSG) +} + +/// Get the full transaction hash prefix under the `last` subkey +pub fn gas_prefix() -> Key { + Key::parse("gas").expect(ERROR_MSG) +} diff --git a/crates/governance/src/cli/mod.rs b/crates/governance/src/cli/mod.rs index 45b839d1f4..365f44d087 100644 --- a/crates/governance/src/cli/mod.rs +++ b/crates/governance/src/cli/mod.rs @@ -1,5 +1,3 @@ -/// CLi governance offline structures -pub mod offline; /// CLi governance on chain structures pub mod onchain; /// CLi governance validation diff --git a/crates/governance/src/cli/offline.rs b/crates/governance/src/cli/offline.rs deleted file mode 100644 index f508fef4ca..0000000000 --- a/crates/governance/src/cli/offline.rs +++ /dev/null @@ -1,409 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; -use std::fs::{File, ReadDir}; -use std::path::PathBuf; - -use namada_core::account::AccountPublicKeysMap; -use namada_core::address::Address; -use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::hash::Hash; -use namada_core::key::{common, RefTo, SigScheme}; -use namada_core::sign::SignatureIndex; -use namada_core::storage::Epoch; -use namada_macros::BorshDeserializer; -#[cfg(feature = "migrations")] -use namada_migrations::*; -use serde::{Deserialize, Serialize}; - -use super::validation::{is_valid_tally_epoch, ProposalValidation}; -use crate::storage::vote::ProposalVote; - -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - Serialize, - Deserialize, -)] -/// The offline proposal structure -pub struct OfflineProposal { - /// The proposal content - pub content: BTreeMap, - /// The proposal author address - pub author: Address, - /// The epoch from which this changes are executed - pub tally_epoch: Epoch, -} - -impl OfflineProposal { - /// Validate the offline proposal - pub fn validate( - self, - current_epoch: Epoch, - force: bool, - ) -> Result { - if force { - return Ok(self); - } - is_valid_tally_epoch(self.tally_epoch, current_epoch)?; - - Ok(self) - } - - /// Hash an offline proposal - pub fn hash(&self) -> Hash { - let content_serialized = serde_json::to_vec(&self.content) - .expect("Conversion to bytes shouldn't fail."); - let author_serialized = serde_json::to_vec(&self.author) - .expect("Conversion to bytes shouldn't fail."); - let tally_epoch_serialized = serde_json::to_vec(&self.tally_epoch) - .expect("Conversion to bytes shouldn't fail."); - let proposal_serialized = &[ - content_serialized, - author_serialized, - tally_epoch_serialized, - ] - .concat(); - Hash::sha256(proposal_serialized) - } - - /// Sign an offline proposal - pub fn sign( - self, - signing_keys: Vec, - account_public_keys_map: &AccountPublicKeysMap, - ) -> OfflineSignedProposal { - let proposal_hash = self.hash(); - - let signatures_index = compute_signatures_index( - &signing_keys, - account_public_keys_map, - &proposal_hash, - ); - - OfflineSignedProposal { - proposal: self, - signatures: signatures_index, - } - } -} - -impl TryFrom<&[u8]> for OfflineProposal { - type Error = serde_json::Error; - - fn try_from(value: &[u8]) -> Result { - serde_json::from_slice(value) - } -} - -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - Serialize, - Deserialize, -)] -/// The signed offline proposal structure -pub struct OfflineSignedProposal { - /// The proposal content - pub proposal: OfflineProposal, - /// The signatures over proposal data - pub signatures: BTreeSet, -} - -impl TryFrom<&[u8]> for OfflineSignedProposal { - type Error = serde_json::Error; - - fn try_from(value: &[u8]) -> Result { - serde_json::from_slice(value) - } -} - -impl OfflineSignedProposal { - /// Serialize the proposal to file. Returns the filename if successful. - pub fn serialize( - &self, - output_folder: Option, - ) -> Result { - let proposal_filename = - format!("offline_proposal_{}.json", self.proposal.hash()); - - let filepath = match output_folder { - Some(base_path) => base_path - .join(proposal_filename) - .to_str() - .unwrap() - .to_owned(), - None => proposal_filename, - }; - - let out = - File::create(&filepath).expect("Should be able to create a file."); - serde_json::to_writer_pretty(out, self)?; - - Ok(filepath) - } - - /// Check whether the signature is valid or not - fn check_signature( - &self, - account_public_keys_map: &AccountPublicKeysMap, - threshold: u8, - ) -> bool { - let proposal_hash = self.proposal.hash(); - if self.signatures.len() < threshold as usize { - return false; - } - - let valid_signatures = compute_total_valid_signatures( - &self.signatures, - account_public_keys_map, - &proposal_hash, - ); - - valid_signatures >= threshold - } - - /// Validate an offline proposal - pub fn validate( - self, - account_public_keys_map: &AccountPublicKeysMap, - threshold: u8, - force: bool, - ) -> Result { - if force { - return Ok(self); - } - let valid_signature = - self.check_signature(account_public_keys_map, threshold); - if !valid_signature { - Err(ProposalValidation::OkNoSignature) - } else { - Ok(self) - } - } -} - -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - Serialize, - Deserialize, -)] -/// The offline proposal structure -pub struct OfflineVote { - /// The proposal data hash - pub proposal_hash: Hash, - /// The proposal vote - pub vote: ProposalVote, - /// The signature over proposal data - pub signatures: BTreeSet, - /// The address corresponding to the signature pk - pub address: Address, - /// The validators address to which this address delegated to - pub delegations: Vec
, -} - -impl OfflineVote { - /// Create an offline vote for a proposal - pub fn new( - proposal: &OfflineSignedProposal, - vote: ProposalVote, - address: Address, - delegations: Vec
, - ) -> Self { - let proposal_hash = proposal.proposal.hash(); - - Self { - proposal_hash, - vote, - delegations, - signatures: BTreeSet::default(), - address, - } - } - - /// Sign the offline vote - pub fn sign( - self, - keypairs: Vec, - account_public_keys_map: &AccountPublicKeysMap, - ) -> Self { - let proposal_vote_data = self.vote.serialize_to_vec(); - let delegations_hash = self.delegations.serialize_to_vec(); - - let vote_hash = Hash::sha256( - [ - self.proposal_hash.to_vec(), - proposal_vote_data, - delegations_hash, - ] - .concat(), - ); - - let signatures = compute_signatures_index( - &keypairs, - account_public_keys_map, - &vote_hash, - ); - - Self { signatures, ..self } - } - - /// Check if the vote is yay - pub fn is_yay(&self) -> bool { - self.vote.is_yay() - } - - /// Check if the vote is nay - pub fn is_nay(&self) -> bool { - self.vote.is_nay() - } - - /// Check if the vote is abstain - pub fn is_abstain(&self) -> bool { - self.vote.is_abstain() - } - - /// compute the hash of a proposal - pub fn compute_hash(&self) -> Hash { - let proposal_hash_data = self.proposal_hash.serialize_to_vec(); - let proposal_vote_data = self.vote.serialize_to_vec(); - let delegations_hash = self.delegations.serialize_to_vec(); - let vote_serialized = - &[proposal_hash_data, proposal_vote_data, delegations_hash] - .concat(); - - Hash::sha256(vote_serialized) - } - - /// Check whether the signature is valid or not - pub fn check_signature( - &self, - account_public_keys_map: &AccountPublicKeysMap, - threshold: u8, - ) -> bool { - if self.signatures.len() < threshold as usize { - return false; - } - let vote_data_hash = self.compute_hash(); - - let valid_signatures = compute_total_valid_signatures( - &self.signatures, - account_public_keys_map, - &vote_data_hash, - ); - - valid_signatures >= threshold - } - - /// Serialize the proposal to file. Returns the filename if successful. - pub fn serialize( - &self, - output_folder: Option, - ) -> Result { - let vote_filename = format!( - "offline_vote_{}_{}.json", - self.proposal_hash, self.address - ); - let filepath = match output_folder { - Some(base_path) => { - base_path.join(vote_filename).to_str().unwrap().to_owned() - } - None => vote_filename, - }; - let out = File::create(&filepath).unwrap(); - serde_json::to_writer_pretty(out, self)?; - - Ok(filepath) - } -} - -/// Compute the signatures index -fn compute_signatures_index( - keys: &[common::SecretKey], - account_public_keys_map: &AccountPublicKeysMap, - hashed_data: &Hash, -) -> BTreeSet { - account_public_keys_map - .index_secret_keys(keys.to_vec()) - .values() - .map(|signing_key| { - let public_key = signing_key.ref_to(); - let signature = common::SigScheme::sign(signing_key, hashed_data); - SignatureIndex::from_single_signature(public_key, signature) - }) - .collect::>() -} - -/// Compute the total amount of signatures -fn compute_total_valid_signatures( - signatures: &BTreeSet, - account_public_keys_map: &AccountPublicKeysMap, - hashed_data: &Hash, -) -> u8 { - signatures.iter().fold(0_u8, |acc, signature_index| { - if account_public_keys_map - .get_index_from_public_key(&signature_index.pubkey) - .is_some() - { - let sig_check = common::SigScheme::verify_signature( - &signature_index.pubkey, - hashed_data, - &signature_index.signature, - ); - if sig_check.is_ok() { acc + 1 } else { acc } - } else { - acc - } - }) -} - -/// Read all offline files from a folder -pub fn read_offline_files(path: ReadDir) -> Vec { - path.filter_map(|path| { - if let Ok(path) = path { - let file_type = path.file_type(); - if let Ok(file_type) = file_type { - if file_type.is_file() - && path.file_name().to_string_lossy().contains("offline_") - { - Some(path.path()) - } else { - None - } - } else { - None - } - } else { - None - } - }) - .collect::>() -} - -/// Find offline votes from a folder -pub fn find_offline_proposal(files: &[PathBuf]) -> Option { - files - .iter() - .filter(|path| path.to_string_lossy().contains("offline_proposal_")) - .cloned() - .collect::>() - .first() - .cloned() -} - -/// Find offline votes from a folder -pub fn find_offline_votes(files: &[PathBuf]) -> Vec { - files - .iter() - .filter(|path| path.to_string_lossy().contains("offline_vote_")) - .cloned() - .collect::>() -} diff --git a/crates/governance/src/cli/onchain.rs b/crates/governance/src/cli/onchain.rs index b56169e3f0..9c4a1d8452 100644 --- a/crates/governance/src/cli/onchain.rs +++ b/crates/governance/src/cli/onchain.rs @@ -11,10 +11,10 @@ use namada_migrations::*; use serde::{Deserialize, Serialize}; use super::validation::{ - is_valid_author_balance, is_valid_content, is_valid_default_proposal_data, - is_valid_end_epoch, is_valid_grace_epoch, is_valid_pgf_funding_data, - is_valid_pgf_stewards_data, is_valid_proposal_period, is_valid_start_epoch, - ProposalValidation, + is_valid_activation_epoch, is_valid_author_balance, is_valid_content, + is_valid_default_proposal_data, is_valid_end_epoch, + is_valid_pgf_funding_data, is_valid_pgf_stewards_data, + is_valid_proposal_period, is_valid_start_epoch, ProposalValidation, }; use crate::parameters::GovernanceParameters; use crate::storage::proposal::PGFTarget; @@ -30,18 +30,16 @@ use crate::storage::proposal::PGFTarget; )] /// The proposal structure pub struct OnChainProposal { - /// The proposal id - pub id: u64, /// The proposal content pub content: BTreeMap, /// The proposal author address pub author: Address, - /// The epoch from which voting is allowed + /// The epoch in which voting begins pub voting_start_epoch: Epoch, - /// The epoch from which voting is stopped + /// The final epoch in which voting is allowed pub voting_end_epoch: Epoch, - /// The epoch from which this changes are executed - pub grace_epoch: Epoch, + /// The epoch in which any changes are executed and become active + pub activation_epoch: Epoch, } /// Pgf default proposal @@ -86,14 +84,14 @@ impl DefaultProposal { governance_parameters.min_proposal_voting_period, governance_parameters.max_proposal_period, )?; - is_valid_grace_epoch( - self.proposal.grace_epoch, + is_valid_activation_epoch( + self.proposal.activation_epoch, self.proposal.voting_end_epoch, governance_parameters.min_proposal_grace_epochs, )?; is_valid_proposal_period( self.proposal.voting_start_epoch, - self.proposal.grace_epoch, + self.proposal.activation_epoch, governance_parameters.max_proposal_period, )?; is_valid_author_balance( @@ -164,14 +162,14 @@ impl PgfStewardProposal { governance_parameters.min_proposal_voting_period, governance_parameters.max_proposal_period, )?; - is_valid_grace_epoch( - self.proposal.grace_epoch, + is_valid_activation_epoch( + self.proposal.activation_epoch, self.proposal.voting_end_epoch, governance_parameters.min_proposal_grace_epochs, )?; is_valid_proposal_period( self.proposal.voting_start_epoch, - self.proposal.grace_epoch, + self.proposal.activation_epoch, governance_parameters.max_proposal_period, )?; is_valid_author_balance( @@ -237,14 +235,14 @@ impl PgfFundingProposal { governance_parameters.min_proposal_voting_period, governance_parameters.max_proposal_period, )?; - is_valid_grace_epoch( - self.proposal.grace_epoch, + is_valid_activation_epoch( + self.proposal.activation_epoch, self.proposal.voting_end_epoch, governance_parameters.min_proposal_grace_epochs, )?; is_valid_proposal_period( self.proposal.voting_start_epoch, - self.proposal.grace_epoch, + self.proposal.activation_epoch, governance_parameters.max_proposal_period, )?; is_valid_content( diff --git a/crates/governance/src/cli/validation.rs b/crates/governance/src/cli/validation.rs index 07efc82e93..be775b80e6 100644 --- a/crates/governance/src/cli/validation.rs +++ b/crates/governance/src/cli/validation.rs @@ -26,16 +26,16 @@ pub enum ProposalValidation { a multiple of {0}" )] InvalidStartEndDifference(u64, u64), - /// The proposal difference between end and grace epoch is invalid + /// The proposal difference between end and activation epoch is invalid #[error( - "Invalid proposal grace epoch: difference between proposal grace and \ - end epoch must be at least {0}, but found {1}" + "Invalid proposal activation epoch: difference between proposal \ + activation and end epoch must be at least {0}, but found {1}" )] - InvalidEndGraceDifference(u64, u64), - /// The proposal difference between end and grace epoch is invalid + InvalidEndActivationDifference(u64, u64), + /// The proposal difference between end and activation epoch is invalid #[error( - "Invalid proposal period: difference between proposal start and grace \ - epoch must be at most {1}, but found {0}" + "Invalid proposal period: difference between proposal start and \ + activation epoch must be at most {1}, but found {0}" )] InvalidProposalPeriod(u64, u64), /// The proposal author does not have enough balance to pay for proposal @@ -51,12 +51,6 @@ pub enum ProposalValidation { but maximum is {1}" )] InvalidContentLength(u64, u64), - /// Invalid offline proposal tally epoch - #[error( - "Invalid proposal tally epoch: tally epoch ({0}) must be less than \ - current epoch ({1})" - )] - InvalidTallyEPoch(Epoch, Epoch), /// The proposal wasm code is not valid #[error( "Invalid proposal extra data: file doesn't exist or content size \ @@ -130,18 +124,18 @@ pub fn is_valid_end_epoch( } } -pub fn is_valid_grace_epoch( - proposal_grace_epoch: Epoch, +pub fn is_valid_activation_epoch( + proposal_activation_epoch: Epoch, proposal_end_epoch: Epoch, - min_proposal_grace_epoch: u64, + min_proposal_grace_epochs: u64, ) -> Result<(), ProposalValidation> { - let grace_period = proposal_grace_epoch.0 - proposal_end_epoch.0; + let grace_period = proposal_activation_epoch.0 - proposal_end_epoch.0; - if grace_period > 0 && grace_period >= min_proposal_grace_epoch { + if grace_period > 0 && grace_period >= min_proposal_grace_epochs { Ok(()) } else { - Err(ProposalValidation::InvalidEndGraceDifference( - min_proposal_grace_epoch, + Err(ProposalValidation::InvalidEndActivationDifference( + min_proposal_grace_epochs, grace_period, )) } @@ -149,10 +143,10 @@ pub fn is_valid_grace_epoch( pub fn is_valid_proposal_period( proposal_start_epoch: Epoch, - proposal_grace_epoch: Epoch, + proposal_activation_epoch: Epoch, max_proposal_period: u64, ) -> Result<(), ProposalValidation> { - let proposal_period = proposal_grace_epoch.0 - proposal_start_epoch.0; + let proposal_period = proposal_activation_epoch.0 - proposal_start_epoch.0; if proposal_period > 0 && proposal_period <= max_proposal_period { Ok(()) @@ -187,20 +181,6 @@ pub fn is_valid_content( } } -pub fn is_valid_tally_epoch( - tally_epoch: Epoch, - current_epoch: Epoch, -) -> Result<(), ProposalValidation> { - if tally_epoch <= current_epoch { - Ok(()) - } else { - Err(ProposalValidation::InvalidTallyEPoch( - tally_epoch, - current_epoch, - )) - } -} - pub fn is_valid_default_proposal_data( data: &Option>, max_extra_data_size: u64, diff --git a/crates/governance/src/lib.rs b/crates/governance/src/lib.rs index 49d6695a99..f163ffc295 100644 --- a/crates/governance/src/lib.rs +++ b/crates/governance/src/lib.rs @@ -6,6 +6,7 @@ use namada_core::address::{self, Address}; pub mod cli; /// governance parameters pub mod parameters; +/// governance public good fundings pub mod pgf; /// governance storage pub mod storage; diff --git a/crates/governance/src/parameters.rs b/crates/governance/src/parameters.rs index 23d923cedb..8e6cbe76eb 100644 --- a/crates/governance/src/parameters.rs +++ b/crates/governance/src/parameters.rs @@ -31,7 +31,7 @@ pub struct GovernanceParameters { pub max_proposal_period: u64, /// Maximum number of characters for proposal content pub max_proposal_content_size: u64, - /// Minimum epochs between end and grace epochs + /// Minimum number of epochs between the end and activation epochs pub min_proposal_grace_epochs: u64, } @@ -87,10 +87,10 @@ impl GovernanceParameters { storage .write(&max_proposal_content_size_key, max_proposal_content_size)?; - let min_proposal_grace_epoch_key = - goverance_storage::get_min_proposal_grace_epoch_key(); + let min_proposal_grace_epochs_key = + goverance_storage::get_min_proposal_grace_epochs_key(); storage - .write(&min_proposal_grace_epoch_key, min_proposal_grace_epochs)?; + .write(&min_proposal_grace_epochs_key, min_proposal_grace_epochs)?; let counter_key = goverance_storage::get_counter_key(); storage.write(&counter_key, u64::MIN) diff --git a/crates/governance/src/pgf/cli/steward.rs b/crates/governance/src/pgf/cli/steward.rs index bac7482603..c6e0196187 100644 --- a/crates/governance/src/pgf/cli/steward.rs +++ b/crates/governance/src/pgf/cli/steward.rs @@ -1,6 +1,5 @@ -use std::collections::HashMap; - use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use serde::{Deserialize, Serialize}; diff --git a/crates/governance/src/pgf/inflation.rs b/crates/governance/src/pgf/inflation.rs index a77373e4f9..5ce016ddb7 100644 --- a/crates/governance/src/pgf/inflation.rs +++ b/crates/governance/src/pgf/inflation.rs @@ -1,11 +1,9 @@ //! PGF lib code. use namada_core::address::Address; -use namada_core::token; use namada_parameters::storage as params_storage; use namada_storage::{Result, StorageRead, StorageWrite}; -use namada_trans_token::credit_tokens; -use namada_trans_token::storage_key::minted_balance_key; +use namada_trans_token::{credit_tokens, get_effective_total_native_supply}; use crate::pgf::storage::{get_parameters, get_payments, get_stewards}; use crate::storage::proposal::{PGFIbcTarget, PGFTarget}; @@ -25,9 +23,7 @@ where let epochs_per_year: u64 = storage .read(¶ms_storage::get_epochs_per_year_key())? .expect("Epochs per year should exist in storage"); - let total_supply: token::Amount = storage - .read(&minted_balance_key(&staking_token))? - .expect("Total native token balance should exist in storage"); + let total_supply = get_effective_total_native_supply(storage)?; let pgf_inflation_amount = (pgf_parameters.pgf_inflation_rate * total_supply) / epochs_per_year; diff --git a/crates/governance/src/pgf/storage/mod.rs b/crates/governance/src/pgf/storage/mod.rs index 1b523ecf9a..7dbd54f2c1 100644 --- a/crates/governance/src/pgf/storage/mod.rs +++ b/crates/governance/src/pgf/storage/mod.rs @@ -5,9 +5,8 @@ pub mod keys; /// Pgf steward structures pub mod steward; -use std::collections::HashMap; - use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_storage::{Result, StorageRead, StorageWrite}; diff --git a/crates/governance/src/pgf/storage/steward.rs b/crates/governance/src/pgf/storage/steward.rs index 3b5c3648f6..9e39536e88 100644 --- a/crates/governance/src/pgf/storage/steward.rs +++ b/crates/governance/src/pgf/storage/steward.rs @@ -1,7 +1,6 @@ -use std::collections::HashMap; - use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] diff --git a/crates/governance/src/storage/keys.rs b/crates/governance/src/storage/keys.rs index ded546af78..72cc36917a 100644 --- a/crates/governance/src/storage/keys.rs +++ b/crates/governance/src/storage/keys.rs @@ -14,7 +14,7 @@ struct Keys { content: &'static str, start_epoch: &'static str, end_epoch: &'static str, - grace_epoch: &'static str, + activation_epoch: &'static str, funds: &'static str, proposal_code: &'static str, committing_epoch: &'static str, @@ -23,7 +23,7 @@ struct Keys { min_period: &'static str, max_period: &'static str, max_content: &'static str, - min_grace_epoch: &'static str, + min_grace_epochs: &'static str, counter: &'static str, pending: &'static str, result: &'static str, @@ -90,17 +90,17 @@ pub fn is_proposal_code_key(key: &Key) -> bool { } } -/// Check if key is grace epoch key -pub fn is_grace_epoch_key(key: &Key) -> bool { +/// Check if key is activation epoch key +pub fn is_activation_epoch_key(key: &Key) -> bool { match &key.segments[..] { [ DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix), DbKeySeg::StringSeg(id), - DbKeySeg::StringSeg(grace_epoch), + DbKeySeg::StringSeg(activation_epoch), ] if addr == &ADDRESS && prefix == Keys::VALUES.proposal - && grace_epoch == Keys::VALUES.grace_epoch => + && activation_epoch == Keys::VALUES.activation_epoch => { id.parse::().is_ok() } @@ -247,7 +247,7 @@ pub fn is_max_proposal_period_key(key: &Key) -> bool { && max_proposal_period_param == Keys::VALUES.max_period) } -/// Check if key is a min grace epoch key +/// Check if key is a min grace epochs key pub fn is_commit_proposal_key(key: &Key) -> bool { matches!(&key.segments[..], [ DbKeySeg::AddressSeg(addr), @@ -262,12 +262,12 @@ pub fn is_commit_proposal_key(key: &Key) -> bool { } /// Check if key is a commit proposal key -pub fn is_min_grace_epoch_key(key: &Key) -> bool { +pub fn is_min_grace_epochs_key(key: &Key) -> bool { matches!(&key.segments[..], [ DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(min_grace_epoch_param), + DbKeySeg::StringSeg(min_grace_epochs_param), ] if addr == &ADDRESS - && min_grace_epoch_param == Keys::VALUES.min_grace_epoch) + && min_grace_epochs_param == Keys::VALUES.min_grace_epochs) } /// Check if key is parameter key @@ -277,7 +277,7 @@ pub fn is_parameter_key(key: &Key) -> bool { || is_max_proposal_code_size_key(key) || is_min_proposal_voting_period_key(key) || is_max_proposal_period_key(key) - || is_min_grace_epoch_key(key) + || is_min_grace_epochs_key(key) } /// Check if key is start epoch or end epoch key @@ -327,10 +327,10 @@ pub fn get_max_proposal_content_key() -> Key { .expect("Cannot obtain a storage key") } -/// Get min grace epoch proposal key -pub fn get_min_proposal_grace_epoch_key() -> Key { +/// Get min grace epochs proposal key +pub fn get_min_proposal_grace_epochs_key() -> Key { Key::from(ADDRESS.to_db_key()) - .push(&Keys::VALUES.min_grace_epoch.to_owned()) + .push(&Keys::VALUES.min_grace_epochs.to_owned()) .expect("Cannot obtain a storage key") } @@ -395,12 +395,12 @@ pub fn get_funds_key(id: u64) -> Key { .expect("Cannot obtain a storage key") } -/// Get proposal grace epoch key -pub fn get_grace_epoch_key(id: u64) -> Key { +/// Get proposal activation epoch key +pub fn get_activation_epoch_key(id: u64) -> Key { proposal_prefix() .push(&id.to_string()) .expect("Cannot obtain a storage key") - .push(&Keys::VALUES.grace_epoch.to_owned()) + .push(&Keys::VALUES.activation_epoch.to_owned()) .expect("Cannot obtain a storage key") } diff --git a/crates/governance/src/storage/mod.rs b/crates/governance/src/storage/mod.rs index a6cc8bc787..f37be28bf3 100644 --- a/crates/governance/src/storage/mod.rs +++ b/crates/governance/src/storage/mod.rs @@ -7,7 +7,7 @@ pub mod proposal; /// Vote structures pub mod vote; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; @@ -48,15 +48,14 @@ where let proposal_type_key = governance_keys::get_proposal_type_key(proposal_id); match data.r#type { - ProposalType::Default(Some(_)) => { - // Remove wasm code and write it under a different subkey - storage.write(&proposal_type_key, ProposalType::Default(None))?; + ProposalType::DefaultWithWasm(_) => { + storage.write(&proposal_type_key, data.r#type.clone())?; let proposal_code_key = governance_keys::get_proposal_code_key(proposal_id); - let proposal_code = code - .clone() - .ok_or(Error::new_const("Missing proposal code"))?; - storage.write_bytes(&proposal_code_key, proposal_code)? + + let proposal_code = + code.ok_or(Error::new_const("Missing proposal code"))?; + storage.write_bytes(&proposal_code_key, proposal_code)?; } _ => storage.write(&proposal_type_key, data.r#type.clone())?, } @@ -69,16 +68,9 @@ where governance_keys::get_voting_end_epoch_key(proposal_id); storage.write(&voting_end_epoch_key, data.voting_end_epoch)?; - let grace_epoch_key = governance_keys::get_grace_epoch_key(proposal_id); - storage.write(&grace_epoch_key, data.grace_epoch)?; - - if let ProposalType::Default(Some(_)) = data.r#type { - let proposal_code_key = - governance_keys::get_proposal_code_key(proposal_id); - let proposal_code = - code.ok_or(Error::new_const("Missing proposal code"))?; - storage.write_bytes(&proposal_code_key, proposal_code)?; - } + let activation_epoch_key = + governance_keys::get_activation_epoch_key(proposal_id); + storage.write(&activation_epoch_key, data.activation_epoch)?; storage.write(&counter_key, proposal_id + 1)?; @@ -93,7 +85,7 @@ where let committing_proposals_key = governance_keys::get_committing_proposals_key( proposal_id, - data.grace_epoch.0, + data.activation_epoch.0, ); storage.write(&committing_proposals_key, ())?; @@ -148,14 +140,15 @@ where let content = governance_keys::get_content_key(id); let start_epoch_key = governance_keys::get_voting_start_epoch_key(id); let end_epoch_key = governance_keys::get_voting_end_epoch_key(id); - let grace_epoch_key = governance_keys::get_grace_epoch_key(id); + let activation_epoch_key = governance_keys::get_activation_epoch_key(id); let proposal_type_key = governance_keys::get_proposal_type_key(id); let author: Option
= storage.read(&author_key)?; let content: Option> = storage.read(&content)?; let voting_start_epoch: Option = storage.read(&start_epoch_key)?; let voting_end_epoch: Option = storage.read(&end_epoch_key)?; - let grace_epoch: Option = storage.read(&grace_epoch_key)?; + let activation_epoch: Option = + storage.read(&activation_epoch_key)?; let proposal_type: Option = storage.read(&proposal_type_key)?; @@ -166,7 +159,7 @@ where r#type: proposal_type, voting_start_epoch: voting_start_epoch.unwrap(), voting_end_epoch: voting_end_epoch.unwrap(), - grace_epoch: grace_epoch.unwrap(), + activation_epoch: activation_epoch.unwrap(), }); Ok(proposal) @@ -263,7 +256,7 @@ where let min_proposal_fund: token::Amount = storage.read(&key)?.expect("Parameter should be defined."); - let key = governance_keys::get_min_proposal_grace_epoch_key(); + let key = governance_keys::get_min_proposal_grace_epochs_key(); let min_proposal_grace_epochs: u64 = storage.read(&key)?.expect("Parameter should be defined."); @@ -306,3 +299,28 @@ where let proposal_result: Option = storage.read(&key)?; Ok(proposal_result) } + +/// Load proposals for execution in the current epoch. +pub fn load_proposals( + storage: &S, + current_epoch: Epoch, +) -> Result> +where + S: StorageRead, +{ + let mut ids = BTreeSet::::new(); + let proposals_key = + governance_keys::get_commiting_proposals_prefix(current_epoch.0); + for key_val in namada_storage::iter_prefix_bytes(storage, &proposals_key)? { + let (key, _) = key_val?; + let activation_epoch = governance_keys::get_commit_proposal_epoch(&key) + .expect("this key segment should correspond to an epoch number"); + if current_epoch.0 == activation_epoch { + let proposal_id = governance_keys::get_commit_proposal_id(&key) + .expect("ths key segment should correspond to a proposal id"); + ids.insert(proposal_id); + } + } + + Ok(ids) +} diff --git a/crates/governance/src/storage/proposal.rs b/crates/governance/src/storage/proposal.rs index ade0da2448..2ced316360 100644 --- a/crates/governance/src/storage/proposal.rs +++ b/crates/governance/src/storage/proposal.rs @@ -40,27 +40,25 @@ pub enum ProposalError { Deserialize, )] pub struct InitProposalData { - /// The proposal id - pub id: u64, /// The proposal content pub content: Hash, /// The proposal author address pub author: Address, /// The proposal type pub r#type: ProposalType, - /// The epoch from which voting is allowed + /// The epoch in which voting begins pub voting_start_epoch: Epoch, - /// The epoch from which voting is stopped + /// The final epoch in which voting is allowed pub voting_end_epoch: Epoch, - /// The epoch from which this changes are executed - pub grace_epoch: Epoch, + /// The epoch in which any changes are executed and become active + pub activation_epoch: Epoch, } impl InitProposalData { /// Get the hash of the corresponding extra data section pub fn get_section_code_hash(&self) -> Option { match self.r#type { - ProposalType::Default(hash) => hash, + ProposalType::DefaultWithWasm(hash) => Some(hash), _ => None, } } @@ -93,13 +91,15 @@ impl TryFrom for InitProposalData { fn try_from(value: DefaultProposal) -> Result { Ok(InitProposalData { - id: value.proposal.id, content: Hash::default(), author: value.proposal.author, - r#type: ProposalType::Default(None), + r#type: match value.data { + Some(_) => ProposalType::DefaultWithWasm(Hash::default()), + None => ProposalType::Default, + }, voting_start_epoch: value.proposal.voting_start_epoch, voting_end_epoch: value.proposal.voting_end_epoch, - grace_epoch: value.proposal.grace_epoch, + activation_epoch: value.proposal.activation_epoch, }) } } @@ -112,13 +112,12 @@ impl TryFrom for InitProposalData { BTreeSet::>::try_from(value.data).unwrap(); Ok(InitProposalData { - id: value.proposal.id, content: Hash::default(), author: value.proposal.author, r#type: ProposalType::PGFSteward(extra_data), voting_start_epoch: value.proposal.voting_start_epoch, voting_end_epoch: value.proposal.voting_end_epoch, - grace_epoch: value.proposal.grace_epoch, + activation_epoch: value.proposal.activation_epoch, }) } } @@ -152,13 +151,12 @@ impl TryFrom for InitProposalData { continuous_fundings.extend(retro_fundings); Ok(InitProposalData { - id: value.proposal.id, content: Hash::default(), author: value.proposal.author, r#type: ProposalType::PGFPayment(continuous_fundings), /* here continuous_fundings also contains the retro funding */ voting_start_epoch: value.proposal.voting_start_epoch, voting_end_epoch: value.proposal.voting_end_epoch, - grace_epoch: value.proposal.grace_epoch, + activation_epoch: value.proposal.activation_epoch, }) } } @@ -202,8 +200,10 @@ impl StoragePgfFunding { Deserialize, )] pub enum ProposalType { - /// Default governance proposal with the optional wasm code - Default(Option), + /// Default governance proposal + Default, + /// Governance proposal with wasm code + DefaultWithWasm(Hash), /// PGF stewards proposal PGFSteward(BTreeSet>), /// PGF funding proposal @@ -427,13 +427,18 @@ pub enum PGFAction { impl ProposalType { /// Check if the proposal type is default pub fn is_default(&self) -> bool { - matches!(self, ProposalType::Default(_)) + matches!(self, ProposalType::Default) + } + + /// Check if the proposal type is default + pub fn is_default_with_wasm(&self) -> bool { + matches!(self, ProposalType::DefaultWithWasm(_)) } fn format_data(&self) -> String { match self { - ProposalType::Default(Some(hash)) => format!("Hash: {}", &hash), - ProposalType::Default(None) => "".to_string(), + ProposalType::DefaultWithWasm(hash) => format!("Hash: {}", &hash), + ProposalType::Default => "".to_string(), ProposalType::PGFSteward(addresses) => format!( "Addresses:{}", addresses @@ -455,7 +460,8 @@ impl ProposalType { impl Display for ProposalType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ProposalType::Default(_) => write!(f, "Default"), + ProposalType::Default => write!(f, "Default"), + ProposalType::DefaultWithWasm(_) => write!(f, "DefaultWithWasm"), ProposalType::PGFSteward(_) => write!(f, "PGF steward"), ProposalType::PGFPayment(_) => write!(f, "PGF funding"), } @@ -542,7 +548,7 @@ pub struct StorageProposal { /// The epoch from which voting is stopped pub voting_end_epoch: Epoch, /// The epoch from which this changes are executed - pub grace_epoch: Epoch, + pub activation_epoch: Epoch, } impl StorageProposal { @@ -592,7 +598,7 @@ Author: {} Content: {:?} Start Epoch: {} End Epoch: {} -Grace Epoch: {} +Activation Epoch: {} Status: {} Data: {}", self.id, @@ -601,7 +607,7 @@ Data: {}", self.content, self.voting_start_epoch, self.voting_end_epoch, - self.grace_epoch, + self.activation_epoch, self.get_status(current_epoch), self.r#type.format_data() ) @@ -617,7 +623,7 @@ impl Display for StorageProposal { {:2}Author: {} {:2}Start Epoch: {} {:2}End Epoch: {} - {:2}Grace Epoch: {} + {:2}Activation Epoch: {} ", self.id, "", @@ -629,7 +635,7 @@ impl Display for StorageProposal { "", self.voting_end_epoch, "", - self.grace_epoch + self.activation_epoch ) } } @@ -642,7 +648,7 @@ pub mod testing { use namada_core::storage::testing::arb_epoch; use namada_core::token::testing::arb_amount; use proptest::prelude::*; - use proptest::{collection, option, prop_compose}; + use proptest::{collection, prop_compose}; use super::*; use crate::storage::vote::testing::arb_proposal_vote; @@ -726,7 +732,7 @@ pub mod testing { /// Generate an arbitrary proposal type pub fn arb_proposal_type() -> impl Strategy { prop_oneof![ - option::of(arb_hash()).prop_map(ProposalType::Default), + arb_hash().prop_map(ProposalType::DefaultWithWasm), collection::btree_set( arb_add_remove(arb_non_internal_address()), 0..10, @@ -740,22 +746,20 @@ pub mod testing { prop_compose! { /// Generate a proposal initialization pub fn arb_init_proposal()( - id: u64, content in arb_hash(), author in arb_non_internal_address(), r#type in arb_proposal_type(), voting_start_epoch in arb_epoch(), voting_end_epoch in arb_epoch(), - grace_epoch in arb_epoch(), + activation_epoch in arb_epoch(), ) -> InitProposalData { InitProposalData { - id, content, author, r#type, voting_start_epoch, voting_end_epoch, - grace_epoch, + activation_epoch, } } } diff --git a/crates/governance/src/storage/vote.rs b/crates/governance/src/storage/vote.rs index 2f5d6aa443..01327a3960 100644 --- a/crates/governance/src/storage/vote.rs +++ b/crates/governance/src/storage/vote.rs @@ -42,6 +42,18 @@ impl ProposalVote { pub fn is_abstain(&self) -> bool { matches!(self, ProposalVote::Abstain) } + + /// Check if two votes are equal, returns an error if the variants of the + /// two instances are different + #[allow(clippy::match_like_matches_macro)] + pub fn is_same_side(&self, other: &ProposalVote) -> bool { + match (self, other) { + (ProposalVote::Yay, ProposalVote::Yay) => true, + (ProposalVote::Nay, ProposalVote::Nay) => true, + (ProposalVote::Abstain, ProposalVote::Abstain) => true, + _ => false, + } + } } impl Display for ProposalVote { diff --git a/crates/governance/src/utils.rs b/crates/governance/src/utils.rs index bd4b086d8c..1b55ae495b 100644 --- a/crates/governance/src/utils.rs +++ b/crates/governance/src/utils.rs @@ -1,8 +1,8 @@ -use std::collections::HashMap; use std::fmt::Display; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::storage::Epoch; use namada_core::token; @@ -10,7 +10,6 @@ use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use super::cli::offline::OfflineVote; use super::storage::proposal::ProposalType; use super::storage::vote::ProposalVote; @@ -82,7 +81,8 @@ impl TallyType { /// Compute the type of tally for a proposal pub fn from(proposal_type: ProposalType, is_steward: bool) -> Self { match (proposal_type, is_steward) { - (ProposalType::Default(_), _) => TallyType::TwoThirds, + (ProposalType::Default, _) => TallyType::TwoThirds, + (ProposalType::DefaultWithWasm(_), _) => TallyType::TwoThirds, (ProposalType::PGFSteward(_), _) => TallyType::OneHalfOverOneThird, (ProposalType::PGFPayment(_), true) => { TallyType::LessOneHalfOverOneThirdNay @@ -243,77 +243,16 @@ impl Display for ProposalResult { } } -/// General representation of a vote -#[derive(Debug, Clone)] -pub enum TallyVote { - /// Represent a vote for a proposal onchain - OnChain(ProposalVote), - /// Represent a vote for a proposal offline - Offline(OfflineVote), -} - -impl From for TallyVote { - fn from(vote: ProposalVote) -> Self { - Self::OnChain(vote) - } -} - -impl From for TallyVote { - fn from(vote: OfflineVote) -> Self { - Self::Offline(vote) - } -} - -impl TallyVote { - /// Check if a vote is yay - pub fn is_yay(&self) -> bool { - match self { - TallyVote::OnChain(vote) => vote.is_yay(), - TallyVote::Offline(vote) => vote.is_yay(), - } - } - - /// Check if a vote is nay - pub fn is_nay(&self) -> bool { - match self { - TallyVote::OnChain(vote) => vote.is_nay(), - TallyVote::Offline(vote) => vote.is_nay(), - } - } - - /// Check if a vote is abstain - pub fn is_abstain(&self) -> bool { - match self { - TallyVote::OnChain(vote) => vote.is_abstain(), - TallyVote::Offline(vote) => vote.is_abstain(), - } - } - - /// Check if two votes are equal, returns an error if the variants of the - /// two instances are different - pub fn is_same_side(&self, other: &TallyVote) -> bool { - match (self, other) { - (TallyVote::OnChain(vote), TallyVote::OnChain(other_vote)) => { - vote == other_vote - } - (TallyVote::Offline(vote), TallyVote::Offline(other_vote)) => { - vote.vote == other_vote.vote - } - _ => false, - } - } -} - /// Proposal structure holding votes information necessary to compute the /// outcome #[derive(Default, Debug, Clone)] pub struct ProposalVotes { /// Map from validator address to vote - pub validators_vote: HashMap, + pub validators_vote: HashMap, /// Map from validator to their voting power pub validator_voting_power: HashMap, /// Map from delegation address to their vote - pub delegators_vote: HashMap, + pub delegators_vote: HashMap, /// Map from delegator address to the corresponding validator voting power pub delegator_voting_power: HashMap>, } @@ -324,7 +263,7 @@ impl ProposalVotes { &mut self, address: &Address, voting_power: VotePower, - vote: TallyVote, + vote: ProposalVote, ) { match self.validators_vote.insert(address.clone(), vote) { None => { @@ -345,7 +284,7 @@ impl ProposalVotes { address: &Address, validator_address: &Address, voting_power: VotePower, - vote: TallyVote, + vote: ProposalVote, ) { self.delegator_voting_power .entry(address.clone()) @@ -506,7 +445,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); for tally_type in [ @@ -545,7 +484,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_2(); @@ -554,7 +493,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); for tally_type in [ @@ -593,7 +532,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_2(); @@ -602,7 +541,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); for tally_type in [ @@ -644,7 +583,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_2(); @@ -653,7 +592,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let delegator_address_two = address::testing::established_address_3(); @@ -662,7 +601,7 @@ mod test { &delegator_address_two, &validator_address, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); for tally_type in [ @@ -711,7 +650,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_2(); @@ -720,7 +659,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let delegator_address_two = address::testing::established_address_3(); @@ -729,7 +668,7 @@ mod test { &delegator_address_two, &validator_address, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); for tally_type in [ @@ -778,7 +717,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address_two = address::testing::established_address_3(); @@ -787,7 +726,7 @@ mod test { &delegator_address_two, &validator_address, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); for tally_type in [ @@ -835,7 +774,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let validator_address_two = address::testing::established_address_2(); @@ -843,7 +782,7 @@ mod test { proposal_votes.add_validator( &validator_address_two, validator_voting_power_two, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); for tally_type in [ @@ -898,7 +837,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let validator_address_two = address::testing::established_address_2(); @@ -906,7 +845,7 @@ mod test { proposal_votes.add_validator( &validator_address_two, validator_voting_power_two, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let delegator_address_two = address::testing::established_address_3(); @@ -915,7 +854,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); for tally_type in [ @@ -970,7 +909,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let validator_address_two = address::testing::established_address_2(); @@ -978,7 +917,7 @@ mod test { proposal_votes.add_validator( &validator_address_two, validator_voting_power_two, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address_two = address::testing::established_address_3(); @@ -987,7 +926,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); let proposal_result = compute_proposal_result( @@ -1029,7 +968,7 @@ mod test { proposal_votes.add_validator( &validator_address, validator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let validator_address_two = address::testing::established_address_2(); @@ -1037,7 +976,7 @@ mod test { proposal_votes.add_validator( &validator_address_two, validator_voting_power_two, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address_two = address::testing::established_address_3(); @@ -1046,7 +985,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); let delegator_address = address::testing::established_address_4(); @@ -1055,7 +994,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let proposal_result = compute_proposal_result( @@ -1101,7 +1040,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Abstain.into(), + ProposalVote::Abstain, ); let delegator_address = address::testing::established_address_4(); @@ -1110,7 +1049,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let proposal_result = compute_proposal_result( @@ -1153,7 +1092,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_4(); @@ -1162,7 +1101,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let proposal_result = compute_proposal_result( @@ -1207,7 +1146,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_4(); @@ -1216,7 +1155,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let proposal_result = compute_proposal_result( @@ -1261,7 +1200,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let delegator_address = address::testing::established_address_4(); @@ -1270,7 +1209,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Yay.into(), + ProposalVote::Yay, ); let proposal_result = compute_proposal_result( @@ -1315,7 +1254,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let delegator_address = address::testing::established_address_4(); @@ -1324,7 +1263,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let proposal_result = compute_proposal_result( @@ -1371,7 +1310,7 @@ mod test { &delegator_address_two, &validator_address_two, delegator_voting_power_two, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let delegator_address = address::testing::established_address_4(); @@ -1380,7 +1319,7 @@ mod test { &delegator_address, &validator_address, delegator_voting_power, - ProposalVote::Nay.into(), + ProposalVote::Nay, ); let proposal_result = compute_proposal_result( diff --git a/crates/ibc/Cargo.toml b/crates/ibc/Cargo.toml index da368475fe..3d37da69fb 100644 --- a/crates/ibc/Cargo.toml +++ b/crates/ibc/Cargo.toml @@ -34,6 +34,7 @@ masp_primitives.workspace = true primitive-types.workspace = true proptest = { workspace = true, optional = true } prost.workspace = true +serde_json.workspace = true sha2.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 57f0ee5335..71bb4d8bde 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -1,24 +1,23 @@ //! Implementation of `IbcActions` with the protocol storage use std::cell::RefCell; +use std::collections::BTreeSet; use std::rc::Rc; use namada_core::address::{Address, InternalAddress}; -use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; +use namada_core::borsh::BorshSerializeExt; +use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::PrefixedCoin; use namada_core::ibc::core::channel::types::timeout::TimeoutHeight; -use namada_core::ibc::primitives::Msg; -use namada_core::ibc::IbcEvent; +use namada_core::ibc::{IbcEvent, MsgTransfer}; use namada_core::tendermint::Time as TmTime; -use namada_core::token::DenominatedAmount; +use namada_core::token::Amount; use namada_governance::storage::proposal::PGFIbcTarget; use namada_parameters::read_epoch_duration_parameter; -use namada_state::write_log::WriteLog; use namada_state::{ - DBIter, Epochs, InMemory, ResultExt, State, StateRead, StorageError, - StorageHasher, StorageRead, StorageResult, StorageWrite, TxHostEnvState, - WlState, DB, + DBIter, Epochs, ResultExt, State, StateRead, StorageError, StorageHasher, + StorageRead, StorageResult, StorageWrite, TxHostEnvState, WlState, DB, }; use namada_token as token; @@ -117,181 +116,7 @@ where } } -/// Temporary wrapper to have gas cost compatible with v0.31.6. -// TODO: Delete this wrapper and use `TxHostEnvState` directly in a breaking -// release. Differs in `iter_next`. -#[derive(Debug)] -pub struct CompatibleIbcTxHostEnvState<'a, D, H>(pub TxHostEnvState<'a, D, H>) -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher; - -impl StorageRead for CompatibleIbcTxHostEnvState<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, -{ - type PrefixIter<'iter> = namada_state::PrefixIter<'iter, D> where Self: 'iter; - - fn read_bytes( - &self, - key: &namada_storage::Key, - ) -> StorageResult>> { - self.0.read_bytes(key) - } - - fn has_key(&self, key: &namada_storage::Key) -> StorageResult { - self.0.has_key(key) - } - - fn iter_prefix<'iter>( - &'iter self, - prefix: &namada_storage::Key, - ) -> StorageResult> { - let (iter, gas) = namada_state::iter_prefix_post( - self.0.write_log(), - self.0.db(), - prefix, - ); - self.0.charge_gas(gas).into_storage_result()?; - Ok(iter) - } - - fn iter_next<'iter>( - &'iter self, - iter: &mut Self::PrefixIter<'iter>, - ) -> StorageResult)>> { - use namada_state::write_log; - let write_log = self.0.write_log(); - for (key, val, iter_gas) in iter.by_ref() { - let (log_val, log_gas) = write_log.read( - &namada_storage::Key::parse(key.clone()) - .into_storage_result()?, - ); - self.0 - .charge_gas(iter_gas + log_gas) - .into_storage_result()?; - match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - return Ok(Some((key, value.clone()))); - } - Some(&write_log::StorageModification::Delete) => { - // check the next because the key has already deleted - continue; - } - Some(&write_log::StorageModification::InitAccount { - .. - }) => { - // a VP of a new account doesn't need to be iterated - continue; - } - Some(write_log::StorageModification::Temp { ref value }) => { - return Ok(Some((key, value.clone()))); - } - None => { - return Ok(Some((key, val))); - } - } - } - Ok(None) - } - - fn get_chain_id(&self) -> StorageResult { - self.0.get_chain_id() - } - - fn get_block_height(&self) -> StorageResult { - self.0.get_block_height() - } - - fn get_block_header( - &self, - height: namada_storage::BlockHeight, - ) -> StorageResult> { - StorageRead::get_block_header(&self.0, height) - } - - fn get_block_hash(&self) -> StorageResult { - self.0.get_block_hash() - } - - fn get_block_epoch(&self) -> StorageResult { - self.0.get_block_epoch() - } - - fn get_pred_epochs(&self) -> StorageResult { - self.0.get_pred_epochs() - } - - fn get_tx_index(&self) -> StorageResult { - self.0.get_tx_index() - } - - fn get_native_token(&self) -> StorageResult
{ - self.0.get_native_token() - } -} - -impl StorageWrite for CompatibleIbcTxHostEnvState<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, -{ - fn write_bytes( - &mut self, - key: &namada_storage::Key, - val: impl AsRef<[u8]>, - ) -> StorageResult<()> { - self.0.write_bytes(key, val) - } - - fn delete(&mut self, key: &namada_storage::Key) -> StorageResult<()> { - self.0.delete(key) - } -} - -impl StateRead for CompatibleIbcTxHostEnvState<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, -{ - type D = D; - type H = H; - - fn write_log(&self) -> &WriteLog { - self.0.write_log - } - - fn db(&self) -> &D { - self.0.db() - } - - fn in_mem(&self) -> &InMemory { - self.0.in_mem() - } - - fn charge_gas(&self, gas: u64) -> namada_state::Result<()> { - self.0.charge_gas(gas) - } -} - -impl State for CompatibleIbcTxHostEnvState<'_, D, H> -where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, -{ - fn write_log_mut(&mut self) -> &mut WriteLog { - self.0.write_log_mut() - } - - fn split_borrow( - &mut self, - ) -> (&mut WriteLog, &InMemory, &Self::D) { - self.0.split_borrow() - } -} - -impl IbcStorageContext for CompatibleIbcTxHostEnvState<'_, D, H> +impl IbcStorageContext for TxHostEnvState<'_, D, H> where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -320,9 +145,9 @@ where src: &Address, dest: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), StorageError> { - token::transfer(self, token, src, dest, amount.amount()) + token::transfer(self, token, src, dest, amount) } fn handle_masp_tx( @@ -338,9 +163,9 @@ where &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), StorageError> { - token::credit_tokens(self, token, target, amount.amount())?; + token::credit_tokens(self, token, target, amount)?; let minter_key = token::storage_key::minter_key(token); self.write(&minter_key, Address::Internal(InternalAddress::Ibc)) } @@ -349,9 +174,9 @@ where &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), StorageError> { - token::burn_tokens(self, token, target, amount.amount()) + token::burn_tokens(self, token, target, amount) } fn log_string(&self, message: String) { @@ -359,7 +184,7 @@ where } } -impl IbcCommonContext for CompatibleIbcTxHostEnvState<'_, D, H> +impl IbcCommonContext for TxHostEnvState<'_, D, H> where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -396,9 +221,9 @@ where src: &Address, dest: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), StorageError> { - token::transfer(self.state, token, src, dest, amount.amount()) + token::transfer(self.state, token, src, dest, amount) } /// Handle masp tx @@ -415,9 +240,9 @@ where &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), StorageError> { - token::credit_tokens(self.state, token, target, amount.amount())?; + token::credit_tokens(self.state, token, target, amount)?; let minter_key = token::storage_key::minter_key(token); self.state .write(&minter_key, Address::Internal(InternalAddress::Ibc)) @@ -428,9 +253,9 @@ where &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), StorageError> { - token::burn_tokens(self.state, token, target, amount.amount()) + token::burn_tokens(self.state, token, target, amount) } fn log_string(&self, message: String) { @@ -470,18 +295,26 @@ where + read_epoch_duration_parameter(state)?.min_duration; let timeout_timestamp = TmTime::try_from(timeout_timestamp).into_storage_result()?; - let ibc_message = MsgTransfer { + let message = IbcMsgTransfer { port_id_on_a: target.port_id.clone(), chan_id_on_a: target.channel_id.clone(), packet_data, timeout_height_on_b: TimeoutHeight::Never, timeout_timestamp_on_b: timeout_timestamp.into(), }; - let any_msg = ibc_message.to_any(); - let mut data = vec![]; - prost::Message::encode(&any_msg, &mut data).into_storage_result()?; + let data = MsgTransfer { + message, + transfer: None, + } + .serialize_to_vec(); let ctx = IbcProtocolContext { state }; - let mut actions = IbcActions::new(Rc::new(RefCell::new(ctx))); - actions.execute(&data).into_storage_result() + + // Use an empty verifiers set placeholder for validation, this is only + // needed in txs and not protocol + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + let mut actions = IbcActions::new(Rc::new(RefCell::new(ctx)), verifiers); + actions.execute(&data).into_storage_result()?; + + Ok(()) } diff --git a/crates/ibc/src/context/client.rs b/crates/ibc/src/context/client.rs index c17414be50..9d3783279c 100644 --- a/crates/ibc/src/context/client.rs +++ b/crates/ibc/src/context/client.rs @@ -1,6 +1,6 @@ //! AnyClientState and AnyConsensusState for IBC context -use ibc_derive::ConsensusState; +use ibc_derive::IbcConsensusState; #[cfg(feature = "testing")] use ibc_testkit::testapp::ibc::clients::mock::client_state::MockClientContext; #[cfg(feature = "testing")] @@ -85,7 +85,7 @@ impl TryFrom for AnyClientState { } /// ConsensusState for light clients -#[derive(ConsensusState)] +#[derive(IbcConsensusState)] pub enum AnyConsensusState { /// Tendermint consensus state Tendermint(TmConsensusState), @@ -162,7 +162,9 @@ impl TryFrom> for AnyConsensusState { fn try_from(bytes: Vec) -> Result { Any::decode(&bytes[..]) - .map_err(ClientError::Decode)? + .map_err(|e| ClientError::Other { + description: e.to_string(), + })? .try_into() } } diff --git a/crates/ibc/src/context/common.rs b/crates/ibc/src/context/common.rs index 1cb99c7fce..0da0a7a6b7 100644 --- a/crates/ibc/src/context/common.rs +++ b/crates/ibc/src/context/common.rs @@ -2,6 +2,8 @@ use core::time::Duration; +use namada_core::address::Address; +use namada_core::ibc::apps::nft_transfer::types::{PrefixedClassId, TokenId}; use namada_core::ibc::clients::tendermint::consensus_state::ConsensusState as TmConsensusState; use namada_core::ibc::clients::tendermint::types::ConsensusState as TmConsensusStateType; use namada_core::ibc::core::channel::types::channel::ChannelEnd; @@ -24,10 +26,13 @@ use namada_core::ibc::core::host::types::identifiers::{ }; use namada_core::ibc::primitives::proto::{Any, Protobuf}; use namada_core::ibc::primitives::Timestamp; +use namada_core::ibc::{NftClass, NftMetadata}; use namada_core::storage::{BlockHeight, Key}; use namada_core::tendermint::Time as TmTime; use namada_core::time::DurationSecs; use namada_parameters::storage::get_max_expected_time_per_block_key; +use namada_token::storage_key::balance_key; +use namada_token::Amount; use prost::Message; use sha2::Digest; @@ -45,7 +50,9 @@ pub trait IbcCommonContext: IbcStorageContext { let key = storage::client_state_key(client_id); match self.read_bytes(&key)? { Some(value) => Any::decode(&value[..]) - .map_err(ClientError::Decode)? + .map_err(|e| ClientError::Other { + description: e.to_string(), + })? .try_into() .map_err(ContextError::from), None => Err(ClientError::ClientStateNotFound { @@ -75,7 +82,9 @@ pub trait IbcCommonContext: IbcStorageContext { let key = storage::consensus_state_key(client_id, height); match self.read_bytes(&key)? { Some(value) => Any::decode(&value[..]) - .map_err(ClientError::Decode)? + .map_err(|e| ClientError::Other { + description: e.to_string(), + })? .try_into() .map_err(ContextError::from), None => Err(ClientError::ConsensusStateNotFound { @@ -114,7 +123,9 @@ pub trait IbcCommonContext: IbcStorageContext { consensus_state: Vec, ) -> Result { Any::decode(&consensus_state[..]) - .map_err(ClientError::Decode)? + .map_err(|e| ClientError::Other { + description: e.to_string(), + })? .try_into() .map_err(ContextError::from) } @@ -643,26 +654,115 @@ pub trait IbcCommonContext: IbcStorageContext { self.write(key, count).map_err(ContextError::from) } - /// Write the IBC denom. The given address could be a non-Namada token. - fn store_ibc_denom( + /// Write the IBC trace. The given address could be a non-Namada token. + fn store_ibc_trace( &mut self, addr: impl AsRef, trace_hash: impl AsRef, - denom: impl AsRef, + trace: impl AsRef, ) -> Result<()> { - let key = storage::ibc_denom_key(addr, trace_hash.as_ref()); + let key = storage::ibc_trace_key(addr, trace_hash.as_ref()); let has_key = self.has_key(&key).map_err(|_| ChannelError::Other { - description: format!("Reading the IBC denom failed: Key {key}"), + description: format!("Reading the IBC trace failed: Key {key}"), })?; if !has_key { - self.write(&key, denom.as_ref()).map_err(|_| { + self.write(&key, trace.as_ref()).map_err(|_| { ChannelError::Other { description: format!( - "Writing the denom failed: Key {key}", + "Writing the trace failed: Key {key}", ), } })?; } Ok(()) } + + /// Get the NFT class + fn nft_class( + &self, + class_id: &PrefixedClassId, + ) -> Result> { + let key = storage::nft_class_key(class_id); + self.read(&key).map_err(ContextError::from) + } + + /// Store the NFT class + fn store_nft_class(&mut self, class: NftClass) -> Result<()> { + let key = storage::nft_class_key(&class.class_id); + self.write(&key, class).map_err(ContextError::from) + } + + /// Get the NFT metadata + fn nft_metadata( + &self, + class_id: &PrefixedClassId, + token_id: &TokenId, + ) -> Result> { + let key = storage::nft_metadata_key(class_id, token_id); + self.read(&key).map_err(ContextError::from) + } + + /// Store the NFT metadata + fn store_nft_metadata(&mut self, metadata: NftMetadata) -> Result<()> { + let key = + storage::nft_metadata_key(&metadata.class_id, &metadata.token_id); + self.write(&key, metadata).map_err(ContextError::from) + } + + /// Return true if the NFT is owned by the owner + fn is_nft_owned( + &self, + class_id: &PrefixedClassId, + token_id: &TokenId, + owner: &Address, + ) -> Result { + let ibc_token = storage::ibc_token_for_nft(class_id, token_id); + let balance_key = balance_key(&ibc_token, owner); + let amount = self.read::(&balance_key)?; + Ok(amount == Some(Amount::from_u64(1))) + } + + /// Read the mint amount of the given token + fn mint_amount(&self, token: &Address) -> Result { + let key = storage::mint_amount_key(token); + Ok(self.read::(&key)?.unwrap_or_default()) + } + + /// Write the mint amount of the given token + fn store_mint_amount( + &mut self, + token: &Address, + amount: Amount, + ) -> Result<()> { + let key = storage::mint_amount_key(token); + self.write(&key, amount).map_err(ContextError::from) + } + + /// Read the per-epoch deposit of the given token + fn deposit(&self, token: &Address) -> Result { + let key = storage::deposit_key(token); + Ok(self.read::(&key)?.unwrap_or_default()) + } + + /// Write the per-epoch deposit of the given token + fn store_deposit(&mut self, token: &Address, amount: Amount) -> Result<()> { + let key = storage::deposit_key(token); + self.write(&key, amount).map_err(ContextError::from) + } + + /// Read the per-epoch withdraw of the given token + fn withdraw(&self, token: &Address) -> Result { + let key = storage::withdraw_key(token); + Ok(self.read::(&key)?.unwrap_or_default()) + } + + /// Write the per-epoch withdraw of the given token + fn store_withdraw( + &mut self, + token: &Address, + amount: Amount, + ) -> Result<()> { + let key = storage::withdraw_key(token); + self.write(&key, amount).map_err(ContextError::from) + } } diff --git a/crates/ibc/src/context/mod.rs b/crates/ibc/src/context/mod.rs index 60053a73db..47bf76606d 100644 --- a/crates/ibc/src/context/mod.rs +++ b/crates/ibc/src/context/mod.rs @@ -3,6 +3,8 @@ pub mod client; pub mod common; pub mod execution; +pub mod nft_transfer; +pub mod nft_transfer_mod; pub mod router; pub mod storage; pub mod token_transfer; diff --git a/crates/ibc/src/context/nft_transfer.rs b/crates/ibc/src/context/nft_transfer.rs new file mode 100644 index 0000000000..ef06b05ed7 --- /dev/null +++ b/crates/ibc/src/context/nft_transfer.rs @@ -0,0 +1,368 @@ +//! IBC Non-Fungible token transfer context + +use std::cell::RefCell; +use std::rc::Rc; + +use namada_core::address::Address; +use namada_core::ibc::apps::nft_transfer::context::{ + NftTransferExecutionContext, NftTransferValidationContext, +}; +use namada_core::ibc::apps::nft_transfer::types::error::NftTransferError; +use namada_core::ibc::apps::nft_transfer::types::{ + ClassData, ClassUri, Memo, PrefixedClassId, TokenData, TokenId, TokenUri, + PORT_ID_STR, +}; +use namada_core::ibc::core::handler::types::error::ContextError; +use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; +use namada_core::ibc::{NftClass, NftMetadata, IBC_ESCROW_ADDRESS}; +use namada_core::token::Amount; + +use super::common::IbcCommonContext; +use crate::storage; + +/// NFT transfer context to handle tokens +#[derive(Debug)] +pub struct NftTransferContext +where + C: IbcCommonContext, +{ + inner: Rc>, +} + +impl NftTransferContext +where + C: IbcCommonContext, +{ + /// Make new NFT transfer context + pub fn new(inner: Rc>) -> Self { + Self { inner } + } + + /// Update the mint amount of the token + fn update_mint_amount( + &self, + token: &Address, + is_minted: bool, + ) -> Result<(), NftTransferError> { + let mint = self.inner.borrow().mint_amount(token)?; + let updated_mint = if is_minted && mint.is_zero() { + Amount::from_u64(1) + } else if !is_minted && mint == Amount::from_u64(1) { + Amount::zero() + } else { + return Err(NftTransferError::Other( + "The mint amount was invalid".to_string(), + )); + }; + self.inner + .borrow_mut() + .store_mint_amount(token, updated_mint) + .map_err(NftTransferError::from) + } + + /// Add the amount to the per-epoch withdraw of the token + fn add_deposit(&self, token: &Address) -> Result<(), NftTransferError> { + let deposit = self.inner.borrow().deposit(token)?; + let added_deposit = + deposit.checked_add(Amount::from_u64(1)).ok_or_else(|| { + NftTransferError::Other( + "The per-epoch deposit overflowed".to_string(), + ) + })?; + self.inner + .borrow_mut() + .store_deposit(token, added_deposit) + .map_err(NftTransferError::from) + } + + /// Add the amount to the per-epoch withdraw of the token + fn add_withdraw(&self, token: &Address) -> Result<(), NftTransferError> { + let withdraw = self.inner.borrow().withdraw(token)?; + let added_withdraw = + withdraw.checked_add(Amount::from_u64(1)).ok_or_else(|| { + NftTransferError::Other( + "The per-epoch withdraw overflowed".to_string(), + ) + })?; + self.inner + .borrow_mut() + .store_withdraw(token, added_withdraw) + .map_err(NftTransferError::from) + } +} + +impl NftTransferValidationContext for NftTransferContext +where + C: IbcCommonContext, +{ + type AccountId = Address; + type Nft = NftMetadata; + type NftClass = NftClass; + + fn get_port(&self) -> Result { + Ok(PORT_ID_STR.parse().expect("the ID should be parsable")) + } + + fn can_send_nft(&self) -> Result<(), NftTransferError> { + Ok(()) + } + + fn can_receive_nft(&self) -> Result<(), NftTransferError> { + Ok(()) + } + + /// Validates that the NFT can be created or updated successfully. + fn create_or_update_class_validate( + &self, + class_id: &PrefixedClassId, + _class_uri: Option<&ClassUri>, + _class_data: Option<&ClassData>, + ) -> Result<(), NftTransferError> { + match self.get_nft_class(class_id) { + Ok(_) | Err(NftTransferError::NftClassNotFound) => Ok(()), + Err(e) => Err(e), + } + } + + fn escrow_nft_validate( + &self, + from_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, + class_id: &PrefixedClassId, + token_id: &TokenId, + _memo: &Memo, + ) -> Result<(), NftTransferError> { + // The metadata should exist + self.get_nft(class_id, token_id)?; + + // Check the account owns the NFT + if self + .inner + .borrow() + .is_nft_owned(class_id, token_id, from_account)? + { + Ok(()) + } else { + Err(NftTransferError::Other(format!( + "The sender balance is invalid: sender {from_account}, \ + class_id {class_id}, token_id {token_id}" + ))) + } + // Balance changes will be validated by Multitoken VP + } + + fn unescrow_nft_validate( + &self, + _to_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, + class_id: &PrefixedClassId, + token_id: &TokenId, + ) -> Result<(), NftTransferError> { + // The metadata should exist + self.get_nft(class_id, token_id)?; + + // Check the NFT is escrowed + if self.inner.borrow().is_nft_owned( + class_id, + token_id, + &IBC_ESCROW_ADDRESS, + )? { + Ok(()) + } else { + Err(NftTransferError::Other(format!( + "The escrow balance is invalid: class_id {class_id}, token_id \ + {token_id}" + ))) + } + // Balance changes will be validated by Multitoken VP + } + + fn mint_nft_validate( + &self, + _account: &Self::AccountId, + _class_id: &PrefixedClassId, + _token_id: &TokenId, + _token_uri: Option<&TokenUri>, + _token_data: Option<&TokenData>, + ) -> Result<(), NftTransferError> { + // Balance changes will be validated by Multitoken VP + Ok(()) + } + + fn burn_nft_validate( + &self, + account: &Self::AccountId, + class_id: &PrefixedClassId, + token_id: &TokenId, + _memo: &Memo, + ) -> Result<(), NftTransferError> { + // Metadata should exist + self.get_nft(class_id, token_id)?; + + // Check the account owns the NFT + if self + .inner + .borrow() + .is_nft_owned(class_id, token_id, account)? + { + Ok(()) + } else { + Err(NftTransferError::Other(format!( + "The sender balance is invalid: sender {account}, class_id \ + {class_id}, token_id {token_id}" + ))) + } + // Balance changes will be validated by Multitoken VP + } + + fn token_hash_string( + &self, + class_id: &PrefixedClassId, + token_id: &TokenId, + ) -> Option { + Some(storage::calc_hash(format!("{class_id}/{token_id}"))) + } + + /// Returns the NFT + fn get_nft( + &self, + class_id: &PrefixedClassId, + token_id: &TokenId, + ) -> Result { + match self.inner.borrow().nft_metadata(class_id, token_id) { + Ok(Some(nft)) => Ok(nft), + Ok(None) => Err(NftTransferError::NftNotFound), + Err(e) => Err(NftTransferError::ContextError(e)), + } + } + + /// Returns the NFT class + fn get_nft_class( + &self, + class_id: &PrefixedClassId, + ) -> Result { + match self.inner.borrow().nft_class(class_id) { + Ok(Some(class)) => Ok(class), + Ok(None) => Err(NftTransferError::NftClassNotFound), + Err(e) => Err(NftTransferError::ContextError(e)), + } + } +} + +impl NftTransferExecutionContext for NftTransferContext +where + C: IbcCommonContext, +{ + fn create_or_update_class_execute( + &self, + class_id: &PrefixedClassId, + class_uri: Option<&ClassUri>, + class_data: Option<&ClassData>, + ) -> Result<(), NftTransferError> { + let class = NftClass { + class_id: class_id.clone(), + class_uri: class_uri.cloned(), + class_data: class_data.cloned(), + }; + self.inner + .borrow_mut() + .store_nft_class(class) + .map_err(|e| e.into()) + } + + fn escrow_nft_execute( + &mut self, + from_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, + class_id: &PrefixedClassId, + token_id: &TokenId, + _memo: &Memo, + ) -> Result<(), NftTransferError> { + let ibc_token = storage::ibc_token_for_nft(class_id, token_id); + + self.add_withdraw(&ibc_token)?; + + self.inner + .borrow_mut() + .transfer_token( + from_account, + &IBC_ESCROW_ADDRESS, + &ibc_token, + Amount::from_u64(1), + ) + .map_err(|e| ContextError::from(e).into()) + } + + /// Executes the unescrow of the NFT in a user account. + fn unescrow_nft_execute( + &mut self, + to_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, + class_id: &PrefixedClassId, + token_id: &TokenId, + ) -> Result<(), NftTransferError> { + let ibc_token = storage::ibc_token_for_nft(class_id, token_id); + + self.add_deposit(&ibc_token)?; + + self.inner + .borrow_mut() + .transfer_token( + &IBC_ESCROW_ADDRESS, + to_account, + &ibc_token, + Amount::from_u64(1), + ) + .map_err(|e| ContextError::from(e).into()) + } + + fn mint_nft_execute( + &mut self, + account: &Self::AccountId, + class_id: &PrefixedClassId, + token_id: &TokenId, + token_uri: Option<&TokenUri>, + token_data: Option<&TokenData>, + ) -> Result<(), NftTransferError> { + let ibc_token = storage::ibc_token_for_nft(class_id, token_id); + + // create or update the metadata + let metadata = NftMetadata { + class_id: class_id.clone(), + token_id: token_id.clone(), + token_uri: token_uri.cloned(), + token_data: token_data.cloned(), + }; + self.inner.borrow_mut().store_nft_metadata(metadata)?; + + self.update_mint_amount(&ibc_token, true)?; + self.add_deposit(&ibc_token)?; + + self.inner + .borrow_mut() + .mint_token(account, &ibc_token, Amount::from_u64(1)) + .map_err(|e| ContextError::from(e).into()) + } + + fn burn_nft_execute( + &mut self, + account: &Self::AccountId, + class_id: &PrefixedClassId, + token_id: &TokenId, + _memo: &Memo, + ) -> Result<(), NftTransferError> { + let ibc_token = storage::ibc_token_for_nft(class_id, token_id); + + self.update_mint_amount(&ibc_token, false)?; + self.add_withdraw(&ibc_token)?; + + self.inner + .borrow_mut() + .burn_token(account, &ibc_token, Amount::from_u64(1)) + .map_err(|e| ContextError::from(e).into()) + } +} diff --git a/crates/ibc/src/context/nft_transfer_mod.rs b/crates/ibc/src/context/nft_transfer_mod.rs new file mode 100644 index 0000000000..e8af60b523 --- /dev/null +++ b/crates/ibc/src/context/nft_transfer_mod.rs @@ -0,0 +1,528 @@ +//! IBC module for token transfer + +use std::cell::RefCell; +use std::fmt::Debug; +use std::rc::Rc; + +use namada_core::ibc::apps::nft_transfer::context::NftTransferValidationContext; +use namada_core::ibc::apps::nft_transfer::module::{ + on_acknowledgement_packet_execute, on_acknowledgement_packet_validate, + on_chan_close_confirm_execute, on_chan_close_confirm_validate, + on_chan_close_init_execute, on_chan_close_init_validate, + on_chan_open_ack_execute, on_chan_open_ack_validate, + on_chan_open_confirm_execute, on_chan_open_confirm_validate, + on_chan_open_init_execute, on_chan_open_init_validate, + on_chan_open_try_execute, on_chan_open_try_validate, + on_recv_packet_execute, on_timeout_packet_execute, + on_timeout_packet_validate, +}; +use namada_core::ibc::apps::nft_transfer::types::error::NftTransferError; +use namada_core::ibc::apps::nft_transfer::types::MODULE_ID_STR; +use namada_core::ibc::core::channel::types::acknowledgement::Acknowledgement; +use namada_core::ibc::core::channel::types::channel::{Counterparty, Order}; +use namada_core::ibc::core::channel::types::error::{ + ChannelError, PacketError, +}; +use namada_core::ibc::core::channel::types::packet::Packet; +use namada_core::ibc::core::channel::types::Version; +use namada_core::ibc::core::host::types::identifiers::{ + ChannelId, ConnectionId, PortId, +}; +use namada_core::ibc::core::router::module::Module; +use namada_core::ibc::core::router::types::module::{ModuleExtras, ModuleId}; +use namada_core::ibc::primitives::Signer; + +use super::common::IbcCommonContext; +use super::nft_transfer::NftTransferContext; +use super::transfer_mod::ModuleWrapper; + +/// IBC module for NFT transfer +#[derive(Debug)] +pub struct NftTransferModule +where + C: IbcCommonContext, +{ + /// IBC actions + pub ctx: NftTransferContext, +} + +impl NftTransferModule +where + C: IbcCommonContext, +{ + /// Make a new module + pub fn new(ctx: Rc>) -> Self { + Self { + ctx: NftTransferContext::new(ctx), + } + } +} + +impl ModuleWrapper for NftTransferModule +where + C: IbcCommonContext + Debug, +{ + fn as_module(&self) -> &dyn Module { + self + } + + fn as_module_mut(&mut self) -> &mut dyn Module { + self + } + + fn module_id(&self) -> ModuleId { + ModuleId::new(MODULE_ID_STR.to_string()) + } + + fn port_id(&self) -> PortId { + self.ctx.get_port().expect("The port ID should be set") + } +} + +impl Module for NftTransferModule +where + C: IbcCommonContext + Debug, +{ + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_validate( + &self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + version: &Version, + ) -> Result { + on_chan_open_init_validate( + &self.ctx, + order, + connection_hops, + port_id, + channel_id, + counterparty, + version, + ) + .map_err(into_channel_error)?; + Ok(version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_execute( + &mut self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + on_chan_open_init_execute( + &mut self.ctx, + order, + connection_hops, + port_id, + channel_id, + counterparty, + version, + ) + .map_err(into_channel_error) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_validate( + &self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result { + on_chan_open_try_validate( + &self.ctx, + order, + connection_hops, + port_id, + channel_id, + counterparty, + counterparty_version, + ) + .map_err(into_channel_error)?; + Ok(counterparty_version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_execute( + &mut self, + order: Order, + connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + on_chan_open_try_execute( + &mut self.ctx, + order, + connection_hops, + port_id, + channel_id, + counterparty, + counterparty_version, + ) + .map_err(into_channel_error) + } + + fn on_chan_open_ack_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + counterparty_version: &Version, + ) -> Result<(), ChannelError> { + on_chan_open_ack_validate( + &self.ctx, + port_id, + channel_id, + counterparty_version, + ) + .map_err(into_channel_error) + } + + fn on_chan_open_ack_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + counterparty_version: &Version, + ) -> Result { + on_chan_open_ack_execute( + &mut self.ctx, + port_id, + channel_id, + counterparty_version, + ) + .map_err(into_channel_error) + } + + fn on_chan_open_confirm_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + on_chan_open_confirm_validate(&self.ctx, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_open_confirm_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result { + on_chan_open_confirm_execute(&mut self.ctx, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_init_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + on_chan_close_init_validate(&self.ctx, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_init_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result { + on_chan_close_init_execute(&mut self.ctx, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_confirm_validate( + &self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + on_chan_close_confirm_validate(&self.ctx, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_chan_close_confirm_execute( + &mut self, + port_id: &PortId, + channel_id: &ChannelId, + ) -> Result { + on_chan_close_confirm_execute(&mut self.ctx, port_id, channel_id) + .map_err(into_channel_error) + } + + fn on_recv_packet_execute( + &mut self, + packet: &Packet, + _relayer: &Signer, + ) -> (ModuleExtras, Acknowledgement) { + on_recv_packet_execute(&mut self.ctx, packet) + } + + fn on_acknowledgement_packet_validate( + &self, + packet: &Packet, + acknowledgement: &Acknowledgement, + relayer: &Signer, + ) -> Result<(), PacketError> { + on_acknowledgement_packet_validate( + &self.ctx, + packet, + acknowledgement, + relayer, + ) + .map_err(into_packet_error) + } + + fn on_acknowledgement_packet_execute( + &mut self, + packet: &Packet, + acknowledgement: &Acknowledgement, + relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + let (extras, result) = on_acknowledgement_packet_execute( + &mut self.ctx, + packet, + acknowledgement, + relayer, + ); + (extras, result.map_err(into_packet_error)) + } + + fn on_timeout_packet_validate( + &self, + packet: &Packet, + relayer: &Signer, + ) -> Result<(), PacketError> { + on_timeout_packet_validate(&self.ctx, packet, relayer) + .map_err(into_packet_error) + } + + fn on_timeout_packet_execute( + &mut self, + packet: &Packet, + relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + let (extras, result) = + on_timeout_packet_execute(&mut self.ctx, packet, relayer); + (extras, result.map_err(into_packet_error)) + } +} + +fn into_channel_error(error: NftTransferError) -> ChannelError { + ChannelError::AppModule { + description: error.to_string(), + } +} + +fn into_packet_error(error: NftTransferError) -> PacketError { + PacketError::AppModule { + description: error.to_string(), + } +} + +/// Helpers for testing +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use std::str::FromStr; + + use namada_core::ibc::apps::nft_transfer::types::{ + ack_success_b64, PORT_ID_STR, + }; + use namada_core::ibc::core::channel::types::acknowledgement::AcknowledgementStatus; + + use super::*; + + /// Dummy IBC module for token transfer + #[derive(Debug)] + pub struct DummyNftTransferModule {} + + impl ModuleWrapper for DummyNftTransferModule { + fn as_module(&self) -> &dyn Module { + self + } + + fn as_module_mut(&mut self) -> &mut dyn Module { + self + } + + fn module_id(&self) -> ModuleId { + ModuleId::new(MODULE_ID_STR.to_string()) + } + + fn port_id(&self) -> PortId { + PortId::from_str(PORT_ID_STR).unwrap() + } + } + + impl Module for DummyNftTransferModule { + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_validate( + &self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + ) -> Result { + Ok(version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init_execute( + &mut self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + Ok((ModuleExtras::empty(), version.clone())) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_validate( + &self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try_execute( + &mut self, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + counterparty_version: &Version, + ) -> Result<(ModuleExtras, Version), ChannelError> { + Ok((ModuleExtras::empty(), counterparty_version.clone())) + } + + fn on_chan_open_ack_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty_version: &Version, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_open_ack_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty_version: &Version, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_chan_open_confirm_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_open_confirm_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_chan_close_init_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_close_init_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_chan_close_confirm_validate( + &self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), ChannelError> { + Ok(()) + } + + fn on_chan_close_confirm_execute( + &mut self, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result { + Ok(ModuleExtras::empty()) + } + + fn on_recv_packet_execute( + &mut self, + _packet: &Packet, + _relayer: &Signer, + ) -> (ModuleExtras, Acknowledgement) { + ( + ModuleExtras::empty(), + AcknowledgementStatus::success(ack_success_b64()).into(), + ) + } + + fn on_acknowledgement_packet_validate( + &self, + _packet: &Packet, + _acknowledgement: &Acknowledgement, + _relayer: &Signer, + ) -> Result<(), PacketError> { + Ok(()) + } + + fn on_acknowledgement_packet_execute( + &mut self, + _packet: &Packet, + _acknowledgement: &Acknowledgement, + _relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + (ModuleExtras::empty(), Ok(())) + } + + fn on_timeout_packet_validate( + &self, + _packet: &Packet, + _relayer: &Signer, + ) -> Result<(), PacketError> { + Ok(()) + } + + fn on_timeout_packet_execute( + &mut self, + _packet: &Packet, + _relayer: &Signer, + ) -> (ModuleExtras, Result<(), PacketError>) { + (ModuleExtras::empty(), Ok(())) + } + } +} diff --git a/crates/ibc/src/context/router.rs b/crates/ibc/src/context/router.rs index da7b90c31d..0833a6282a 100644 --- a/crates/ibc/src/context/router.rs +++ b/crates/ibc/src/context/router.rs @@ -1,8 +1,8 @@ //! Functions to handle IBC modules -use std::collections::HashMap; use std::rc::Rc; +use namada_core::collections::HashMap; use namada_core::ibc::core::host::types::identifiers::PortId; use namada_core::ibc::core::router::module::Module; use namada_core::ibc::core::router::router::Router; @@ -27,13 +27,11 @@ impl<'a> IbcRouter<'a> { } /// Add TokenTransfer route - pub fn add_transfer_module( - &mut self, - module_id: ModuleId, - module: impl ModuleWrapper + 'a, - ) { + pub fn add_transfer_module(&mut self, module: impl ModuleWrapper + 'a) { + let module_id = module.module_id(); + let port_id = module.port_id(); self.modules.insert(module_id.clone(), Rc::new(module)); - self.ports.insert(PortId::transfer(), module_id); + self.ports.insert(port_id, module_id); } } diff --git a/crates/ibc/src/context/storage.rs b/crates/ibc/src/context/storage.rs index c9d8218bd1..21c0dc4b1d 100644 --- a/crates/ibc/src/context/storage.rs +++ b/crates/ibc/src/context/storage.rs @@ -3,7 +3,7 @@ pub use ics23::ProofSpec; use namada_core::address::Address; use namada_core::ibc::IbcEvent; -use namada_core::token::DenominatedAmount; +use namada_core::token::Amount; use namada_storage::{Error, StorageRead, StorageWrite}; /// IBC context trait to be implemented in integration that can read and write @@ -23,7 +23,7 @@ pub trait IbcStorageContext: StorageRead + StorageWrite { src: &Address, dest: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), Error>; /// Handle masp tx @@ -38,7 +38,7 @@ pub trait IbcStorageContext: StorageRead + StorageWrite { &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), Error>; /// Burn token @@ -46,7 +46,7 @@ pub trait IbcStorageContext: StorageRead + StorageWrite { &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), Error>; /// Logging diff --git a/crates/ibc/src/context/token_transfer.rs b/crates/ibc/src/context/token_transfer.rs index 29f4fd7bc1..94410c5673 100644 --- a/crates/ibc/src/context/token_transfer.rs +++ b/crates/ibc/src/context/token_transfer.rs @@ -1,6 +1,7 @@ //! IBC token transfer context use std::cell::RefCell; +use std::collections::BTreeSet; use std::rc::Rc; use namada_core::address::{Address, InternalAddress}; @@ -8,13 +9,15 @@ use namada_core::ibc::apps::transfer::context::{ TokenTransferExecutionContext, TokenTransferValidationContext, }; use namada_core::ibc::apps::transfer::types::error::TokenTransferError; -use namada_core::ibc::apps::transfer::types::{PrefixedCoin, PrefixedDenom}; +use namada_core::ibc::apps::transfer::types::{ + Memo, PrefixedCoin, PrefixedDenom, +}; use namada_core::ibc::core::channel::types::error::ChannelError; use namada_core::ibc::core::handler::types::error::ContextError; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; -use namada_core::token; +use namada_core::ibc::IBC_ESCROW_ADDRESS; use namada_core::uint::Uint; -use namada_token::read_denom; +use namada_token::{read_denom, Amount, Denomination}; use super::common::IbcCommonContext; use crate::storage; @@ -26,6 +29,7 @@ where C: IbcCommonContext, { inner: Rc>, + verifiers: Rc>>, } impl TokenTransferContext @@ -33,8 +37,16 @@ where C: IbcCommonContext, { /// Make new token transfer context - pub fn new(inner: Rc>) -> Self { - Self { inner } + pub fn new( + inner: Rc>, + verifiers: Rc>>, + ) -> Self { + Self { inner, verifiers } + } + + /// Insert a verifier address whose VP will verify the tx. + fn insert_verifier(&mut self, addr: &Address) { + self.verifiers.borrow_mut().insert(addr.clone()); } /// Get the token address and the amount from PrefixedCoin. If the base @@ -42,7 +54,7 @@ where fn get_token_amount( &self, coin: &PrefixedCoin, - ) -> Result<(Address, token::DenominatedAmount), TokenTransferError> { + ) -> Result<(Address, Amount), TokenTransferError> { let token = match Address::decode(coin.denom.base_denom.as_str()) { Ok(token_addr) if coin.denom.trace_path.is_empty() => token_addr, _ => storage::ibc_token(coin.denom.to_string()), @@ -51,23 +63,84 @@ where // Convert IBC amount to Namada amount for the token let denom = read_denom(&*self.inner.borrow(), &token) .map_err(ContextError::from)? - .unwrap_or(token::Denomination(0)); + .unwrap_or(Denomination(0)); let uint_amount = Uint(primitive_types::U256::from(coin.amount).0); - let amount = - token::Amount::from_uint(uint_amount, denom).map_err(|e| { - TokenTransferError::ContextError( - ChannelError::Other { - description: format!( - "The IBC amount is invalid: Coin {coin}, Error {e}", - ), - } - .into(), - ) - })?; - let amount = token::DenominatedAmount::new(amount, denom); + let amount = Amount::from_uint(uint_amount, denom).map_err(|e| { + TokenTransferError::ContextError( + ChannelError::Other { + description: format!( + "The IBC amount is invalid: Coin {coin}, Error {e}", + ), + } + .into(), + ) + })?; Ok((token, amount)) } + + /// Update the mint amount of the token + fn update_mint_amount( + &self, + token: &Address, + amount: Amount, + is_minted: bool, + ) -> Result<(), TokenTransferError> { + let mint = self.inner.borrow().mint_amount(token)?; + let updated_mint = if is_minted { + mint.checked_add(amount).ok_or_else(|| { + TokenTransferError::Other( + "The mint amount overflowed".to_string(), + ) + })? + } else { + mint.checked_sub(amount).ok_or_else(|| { + TokenTransferError::Other( + "The mint amount underflowed".to_string(), + ) + })? + }; + self.inner + .borrow_mut() + .store_mint_amount(token, updated_mint) + .map_err(TokenTransferError::from) + } + + /// Add the amount to the per-epoch withdraw of the token + fn add_deposit( + &self, + token: &Address, + amount: Amount, + ) -> Result<(), TokenTransferError> { + let deposit = self.inner.borrow().deposit(token)?; + let added_deposit = deposit.checked_add(amount).ok_or_else(|| { + TokenTransferError::Other( + "The per-epoch deposit overflowed".to_string(), + ) + })?; + self.inner + .borrow_mut() + .store_deposit(token, added_deposit) + .map_err(TokenTransferError::from) + } + + /// Add the amount to the per-epoch withdraw of the token + fn add_withdraw( + &self, + token: &Address, + amount: Amount, + ) -> Result<(), TokenTransferError> { + let withdraw = self.inner.borrow().withdraw(token)?; + let added_withdraw = withdraw.checked_add(amount).ok_or_else(|| { + TokenTransferError::Other( + "The per-epoch withdraw overflowed".to_string(), + ) + })?; + self.inner + .borrow_mut() + .store_withdraw(token, added_withdraw) + .map_err(TokenTransferError::from) + } } impl TokenTransferValidationContext for TokenTransferContext @@ -80,14 +153,6 @@ where Ok(PortId::transfer()) } - fn get_escrow_account( - &self, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result { - Ok(Address::Internal(InternalAddress::Ibc)) - } - fn can_send_coins(&self) -> Result<(), TokenTransferError> { Ok(()) } @@ -96,13 +161,26 @@ where Ok(()) } - fn send_coins_validate( + fn escrow_coins_validate( &self, - _from: &Self::AccountId, - _to: &Self::AccountId, + _from_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, _coin: &PrefixedCoin, + _memo: &Memo, ) -> Result<(), TokenTransferError> { - // validated by IBC token VP + // validated by Multitoken VP + Ok(()) + } + + fn unescrow_coins_validate( + &self, + _to_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, + _coin: &PrefixedCoin, + ) -> Result<(), TokenTransferError> { + // validated by Multitoken VP Ok(()) } @@ -111,7 +189,7 @@ where _account: &Self::AccountId, _coin: &PrefixedCoin, ) -> Result<(), TokenTransferError> { - // validated by IBC token VP + // validated by Multitoken VP Ok(()) } @@ -119,8 +197,9 @@ where &self, _account: &Self::AccountId, _coin: &PrefixedCoin, + _memo: &Memo, ) -> Result<(), TokenTransferError> { - // validated by IBC token VP + // validated by Multitoken VP Ok(()) } @@ -133,19 +212,50 @@ impl TokenTransferExecutionContext for TokenTransferContext where C: IbcCommonContext, { - fn send_coins_execute( + fn escrow_coins_execute( + &mut self, + from_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, + coin: &PrefixedCoin, + _memo: &Memo, + ) -> Result<(), TokenTransferError> { + let (ibc_token, amount) = self.get_token_amount(coin)?; + + self.add_withdraw(&ibc_token, amount)?; + + // A transfer of NUT tokens must be verified by their VP + if ibc_token.is_internal() + && matches!(ibc_token, Address::Internal(InternalAddress::Nut(_))) + { + self.insert_verifier(&ibc_token); + } + + self.inner + .borrow_mut() + .transfer_token( + from_account, + &IBC_ESCROW_ADDRESS, + &ibc_token, + amount, + ) + .map_err(|e| ContextError::from(e).into()) + } + + fn unescrow_coins_execute( &mut self, - from: &Self::AccountId, - to: &Self::AccountId, + to_account: &Self::AccountId, + _port_id: &PortId, + _channel_id: &ChannelId, coin: &PrefixedCoin, ) -> Result<(), TokenTransferError> { - // Assumes that the coin denom is prefixed with "port-id/channel-id" or - // has no prefix let (ibc_token, amount) = self.get_token_amount(coin)?; + self.add_deposit(&ibc_token, amount)?; + self.inner .borrow_mut() - .transfer_token(from, to, &ibc_token, amount) + .transfer_token(&IBC_ESCROW_ADDRESS, to_account, &ibc_token, amount) .map_err(|e| ContextError::from(e).into()) } @@ -157,6 +267,16 @@ where // The trace path of the denom is already updated if receiving the token let (ibc_token, amount) = self.get_token_amount(coin)?; + self.update_mint_amount(&ibc_token, amount, true)?; + self.add_deposit(&ibc_token, amount)?; + + // A transfer of NUT tokens must be verified by their VP + if ibc_token.is_internal() + && matches!(ibc_token, Address::Internal(InternalAddress::Nut(_))) + { + self.insert_verifier(&ibc_token); + } + self.inner .borrow_mut() .mint_token(account, &ibc_token, amount) @@ -167,9 +287,20 @@ where &mut self, account: &Self::AccountId, coin: &PrefixedCoin, + _memo: &Memo, ) -> Result<(), TokenTransferError> { let (ibc_token, amount) = self.get_token_amount(coin)?; + self.update_mint_amount(&ibc_token, amount, false)?; + self.add_withdraw(&ibc_token, amount)?; + + // A transfer of NUT tokens must be verified by their VP + if ibc_token.is_internal() + && matches!(ibc_token, Address::Internal(InternalAddress::Nut(_))) + { + self.insert_verifier(&ibc_token); + } + // The burn is "unminting" from the minted balance self.inner .borrow_mut() diff --git a/crates/ibc/src/context/transfer_mod.rs b/crates/ibc/src/context/transfer_mod.rs index 35c0a1d872..4b88220213 100644 --- a/crates/ibc/src/context/transfer_mod.rs +++ b/crates/ibc/src/context/transfer_mod.rs @@ -1,9 +1,12 @@ //! IBC module for token transfer use std::cell::RefCell; +use std::collections::BTreeSet; use std::fmt::Debug; use std::rc::Rc; +use namada_core::address::Address; +use namada_core::ibc::apps::transfer::context::TokenTransferValidationContext; use namada_core::ibc::apps::transfer::module::{ on_acknowledgement_packet_execute, on_acknowledgement_packet_validate, on_chan_close_confirm_execute, on_chan_close_confirm_validate, @@ -41,6 +44,12 @@ pub trait ModuleWrapper: Module { /// Mutable reference of the module fn as_module_mut(&mut self) -> &mut dyn Module; + + /// Get the module ID + fn module_id(&self) -> ModuleId; + + /// Get the port ID + fn port_id(&self) -> PortId; } /// IBC module for token transfer @@ -58,16 +67,14 @@ where C: IbcCommonContext, { /// Make a new module - pub fn new(ctx: Rc>) -> Self { + pub fn new( + ctx: Rc>, + verifiers: Rc>>, + ) -> Self { Self { - ctx: TokenTransferContext::new(ctx), + ctx: TokenTransferContext::new(ctx, verifiers), } } - - /// Get the module ID - pub fn module_id(&self) -> ModuleId { - ModuleId::new(MODULE_ID_STR.to_string()) - } } impl ModuleWrapper for TransferModule @@ -81,6 +88,14 @@ where fn as_module_mut(&mut self) -> &mut dyn Module { self } + + fn module_id(&self) -> ModuleId { + ModuleId::new(MODULE_ID_STR.to_string()) + } + + fn port_id(&self) -> PortId { + self.ctx.get_port().expect("The port ID should be set") + } } impl Module for TransferModule @@ -334,7 +349,11 @@ fn into_packet_error(error: TokenTransferError) -> PacketError { /// Helpers for testing #[cfg(any(test, feature = "testing"))] pub mod testing { - use namada_core::ibc::apps::transfer::types::ack_success_b64; + use std::str::FromStr; + + use namada_core::ibc::apps::transfer::types::{ + ack_success_b64, PORT_ID_STR, + }; use namada_core::ibc::core::channel::types::acknowledgement::AcknowledgementStatus; use super::*; @@ -343,13 +362,6 @@ pub mod testing { #[derive(Debug)] pub struct DummyTransferModule {} - impl DummyTransferModule { - /// Get the module ID - pub fn module_id(&self) -> ModuleId { - ModuleId::new(MODULE_ID_STR.to_string()) - } - } - impl ModuleWrapper for DummyTransferModule { fn as_module(&self) -> &dyn Module { self @@ -358,6 +370,14 @@ pub mod testing { fn as_module_mut(&mut self) -> &mut dyn Module { self } + + fn module_id(&self) -> ModuleId { + ModuleId::new(MODULE_ID_STR.to_string()) + } + + fn port_id(&self) -> PortId { + PortId::from_str(PORT_ID_STR).unwrap() + } } impl Module for DummyTransferModule { diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index 765c03d1ca..dd686f5fe9 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -2,16 +2,20 @@ mod actions; pub mod context; +pub mod parameters; pub mod storage; use std::cell::RefCell; +use std::collections::BTreeSet; use std::fmt::Debug; use std::rc::Rc; use std::str::FromStr; -pub use actions::{transfer_over_ibc, CompatibleIbcTxHostEnvState}; +pub use actions::transfer_over_ibc; use borsh::BorshDeserialize; pub use context::common::IbcCommonContext; +pub use context::nft_transfer::NftTransferContext; +pub use context::nft_transfer_mod::NftTransferModule; use context::router::IbcRouter; pub use context::storage::{IbcStorageContext, ProofSpec}; pub use context::token_transfer::TokenTransferContext; @@ -19,25 +23,39 @@ pub use context::transfer_mod::{ModuleWrapper, TransferModule}; use context::IbcContext; pub use context::ValidationParams; use namada_core::address::{Address, MASP}; +use namada_core::ibc::apps::nft_transfer::handler::{ + send_nft_transfer_execute, send_nft_transfer_validate, +}; +use namada_core::ibc::apps::nft_transfer::types::error::NftTransferError; +use namada_core::ibc::apps::nft_transfer::types::packet::PacketData as NftPacketData; +use namada_core::ibc::apps::nft_transfer::types::{ + is_receiver_chain_source as is_nft_receiver_chain_source, PrefixedClassId, + TokenId, TracePrefix as NftTracePrefix, +}; use namada_core::ibc::apps::transfer::handler::{ send_transfer_execute, send_transfer_validate, }; use namada_core::ibc::apps::transfer::types::error::TokenTransferError; -use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; +use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::{ - is_receiver_chain_source, PrefixedDenom, TracePrefix, + is_receiver_chain_source, TracePrefix, +}; +use namada_core::ibc::core::channel::types::acknowledgement::{ + Acknowledgement, AcknowledgementStatus, +}; +use namada_core::ibc::core::channel::types::msgs::{ + MsgRecvPacket as IbcMsgRecvPacket, PacketMsg, }; -use namada_core::ibc::core::channel::types::msgs::PacketMsg; use namada_core::ibc::core::entrypoint::{execute, validate}; use namada_core::ibc::core::handler::types::error::ContextError; use namada_core::ibc::core::handler::types::msgs::MsgEnvelope; use namada_core::ibc::core::host::types::error::IdentifierError; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada_core::ibc::core::router::types::error::RouterError; -use namada_core::ibc::core::router::types::module::ModuleId; use namada_core::ibc::primitives::proto::Any; pub use namada_core::ibc::*; use namada_core::masp::PaymentAddress; +use namada_token::Transfer; use prost::Message; use thiserror::Error; @@ -52,8 +70,10 @@ pub enum Error { Context(Box), #[error("IBC token transfer error: {0}")] TokenTransfer(TokenTransferError), - #[error("Denom error: {0}")] - Denom(String), + #[error("IBC NFT transfer error: {0}")] + NftTransfer(NftTransferError), + #[error("Trace error: {0}")] + Trace(String), #[error("Invalid chain ID: {0}")] ChainId(IdentifierError), #[error("Handling MASP transaction error: {0}")] @@ -68,6 +88,7 @@ where { ctx: IbcContext, router: IbcRouter<'a>, + verifiers: Rc>>, } impl<'a, C> IbcActions<'a, C> @@ -75,20 +96,20 @@ where C: IbcCommonContext + Debug, { /// Make new IBC actions - pub fn new(ctx: Rc>) -> Self { + pub fn new( + ctx: Rc>, + verifiers: Rc>>, + ) -> Self { Self { ctx: IbcContext::new(ctx), router: IbcRouter::new(), + verifiers, } } - /// Add TokenTransfer route - pub fn add_transfer_module( - &mut self, - module_id: ModuleId, - module: impl ModuleWrapper + 'a, - ) { - self.router.add_transfer_module(module_id, module) + /// Add a transfer module to the router + pub fn add_transfer_module(&mut self, module: impl ModuleWrapper + 'a) { + self.router.add_transfer_module(module) } /// Set the validation parameters @@ -97,146 +118,229 @@ where } /// Execute according to the message in an IBC transaction or VP - pub fn execute(&mut self, tx_data: &[u8]) -> Result<(), Error> { + pub fn execute( + &mut self, + tx_data: &[u8], + ) -> Result, Error> { let message = decode_message(tx_data)?; match &message { IbcMessage::Transfer(msg) => { - let mut token_transfer_ctx = - TokenTransferContext::new(self.ctx.inner.clone()); + let mut token_transfer_ctx = TokenTransferContext::new( + self.ctx.inner.clone(), + self.verifiers.clone(), + ); send_transfer_execute( &mut self.ctx, &mut token_transfer_ctx, - msg.clone(), + msg.message.clone(), ) - .map_err(Error::TokenTransfer) + .map_err(Error::TokenTransfer)?; + Ok(msg.transfer.clone()) } - IbcMessage::ShieldedTransfer(msg) => { - let mut token_transfer_ctx = - TokenTransferContext::new(self.ctx.inner.clone()); - send_transfer_execute( + IbcMessage::NftTransfer(msg) => { + let mut nft_transfer_ctx = + NftTransferContext::new(self.ctx.inner.clone()); + send_nft_transfer_execute( &mut self.ctx, - &mut token_transfer_ctx, + &mut nft_transfer_ctx, msg.message.clone(), ) - .map_err(Error::TokenTransfer)?; - self.handle_masp_tx(message) + .map_err(Error::NftTransfer)?; + Ok(msg.transfer.clone()) + } + IbcMessage::RecvPacket(msg) => { + let envelope = + MsgEnvelope::Packet(PacketMsg::Recv(msg.message.clone())); + execute(&mut self.ctx, &mut self.router, envelope) + .map_err(|e| Error::Context(Box::new(e)))?; + let transfer = if self.is_receiving_success()? { + // the current ibc-rs execution doesn't store the denom + // for the token hash when transfer with MsgRecvPacket + self.store_trace(&msg.message)?; + // For receiving the token to a shielded address + msg.transfer.clone() + } else { + None + }; + Ok(transfer) + } + IbcMessage::AckPacket(msg) => { + let envelope = + MsgEnvelope::Packet(PacketMsg::Ack(msg.message.clone())); + execute(&mut self.ctx, &mut self.router, envelope) + .map_err(|e| Error::Context(Box::new(e)))?; + let transfer = + if !is_ack_successful(&msg.message.acknowledgement)? { + // For refunding the token to a shielded address + msg.transfer.clone() + } else { + None + }; + Ok(transfer) + } + IbcMessage::Timeout(msg) => { + let envelope = MsgEnvelope::Packet(PacketMsg::Timeout( + msg.message.clone(), + )); + execute(&mut self.ctx, &mut self.router, envelope) + .map_err(|e| Error::Context(Box::new(e)))?; + Ok(msg.transfer.clone()) } IbcMessage::Envelope(envelope) => { - execute(&mut self.ctx, &mut self.router, envelope.clone()) + execute(&mut self.ctx, &mut self.router, *envelope.clone()) .map_err(|e| Error::Context(Box::new(e)))?; - // the current ibc-rs execution doesn't store the denom for the - // token hash when transfer with MsgRecvPacket - self.store_denom(envelope)?; - // For receiving the token to a shielded address - self.handle_masp_tx(message) + if let MsgEnvelope::Packet(PacketMsg::Recv(msg)) = &**envelope { + if self.is_receiving_success()? { + // the current ibc-rs execution doesn't store the denom + // for the token hash when transfer with MsgRecvPacket + self.store_trace(msg)?; + } + } + Ok(None) } } } - /// Store the denom when transfer with MsgRecvPacket - fn store_denom(&mut self, envelope: &MsgEnvelope) -> Result<(), Error> { - if let MsgEnvelope::Packet(PacketMsg::Recv(_)) = envelope { - if let Some((trace_hash, ibc_denom, receiver)) = - self.get_minted_token_info()? - { - // If the denomination trace event has the trace hash and - // the IBC denom, a token has been minted. The raw IBC denom - // including the port ID, the channel ID and the base token - // is stored to be restored from the trace hash. The amount - // denomination is also set for the minting. + /// Store the trace path when transfer with MsgRecvPacket + fn store_trace(&mut self, msg: &IbcMsgRecvPacket) -> Result<(), Error> { + // Get the IBC trace, and the receiver from the packet data + let minted_token_info = if let Ok(data) = + serde_json::from_slice::(&msg.packet.data) + { + let ibc_denom = received_ibc_trace( + data.token.denom.to_string(), + &msg.packet.port_id_on_a, + &msg.packet.chan_id_on_a, + &msg.packet.port_id_on_b, + &msg.packet.chan_id_on_b, + )?; + if !ibc_denom.contains('/') { + // Skip to store it because the token has been redeemed + return Ok(()); + } + let receiver = + if PaymentAddress::from_str(data.receiver.as_ref()).is_ok() { + MASP.to_string() + } else { + data.receiver.to_string() + }; + Some((vec![ibc_denom], receiver)) + } else if let Ok(data) = + serde_json::from_slice::(&msg.packet.data) + { + let ibc_traces: Result, _> = data + .token_ids + .0 + .iter() + .map(|id| { + let trace = format!("{}/{id}", data.class_id); + received_ibc_trace( + trace, + &msg.packet.port_id_on_a, + &msg.packet.chan_id_on_a, + &msg.packet.port_id_on_b, + &msg.packet.chan_id_on_b, + ) + }) + .collect(); + let receiver = + if PaymentAddress::from_str(data.receiver.as_ref()).is_ok() { + MASP.to_string() + } else { + data.receiver.to_string() + }; + Some((ibc_traces?, receiver)) + } else { + None + }; + + if let Some((ibc_traces, receiver)) = minted_token_info { + // If the trace event has the trace hash and the IBC denom or NFT + // IDs, a token has been minted. The raw IBC trace including the + // port ID, the channel ID and the base token is stored to be + // restored from the trace hash. + for ibc_trace in ibc_traces { + let trace_hash = storage::calc_hash(&ibc_trace); self.ctx .inner .borrow_mut() - .store_ibc_denom(&receiver, &trace_hash, &ibc_denom) + .store_ibc_trace(&receiver, &trace_hash, &ibc_trace) .map_err(|e| { - Error::Denom(format!( - "Writing the IBC denom failed: {}", + Error::Trace(format!( + "Writing the IBC trace failed: {}", + e + )) + })?; + let base_token = if let Some((_, base_token)) = + is_ibc_denom(&ibc_trace) + { + base_token + } else if let Some((_, _, token_id)) = is_nft_trace(&ibc_trace) + { + token_id + } else { + // non-prefixed denom + continue; + }; + self.ctx + .inner + .borrow_mut() + .store_ibc_trace(base_token, trace_hash, &ibc_trace) + .map_err(|e| { + Error::Trace(format!( + "Writing the IBC trace failed: {}", e )) })?; - if let Some((_, base_token)) = is_ibc_denom(&ibc_denom) { - self.ctx - .inner - .borrow_mut() - .store_ibc_denom(base_token, trace_hash, &ibc_denom) - .map_err(|e| { - Error::Denom(format!( - "Writing the IBC denom failed: {}", - e - )) - })?; - } } } Ok(()) } - /// Get the minted IBC denom, the trace hash, and the receiver from IBC - /// events - fn get_minted_token_info( - &self, - ) -> Result, Error> { - let receive_event = self + /// Check the result of receiving the packet from IBC events + fn is_receiving_success(&self) -> Result { + let mut receive_event = self .ctx .inner .borrow() .get_ibc_events(EVENT_TYPE_PACKET) .map_err(|_| { - Error::Denom("Reading the IBC event failed".to_string()) + Error::Trace("Reading the IBC event failed".to_string()) })?; - // The receiving event should be only one in the single IBC transaction - let receiver = match receive_event + if receive_event.is_empty() { + // check the packet is for an NFT + receive_event = self + .ctx + .inner + .borrow() + .get_ibc_events(EVENT_TYPE_NFT_PACKET) + .map_err(|_| { + Error::Trace("Reading the IBC event failed".to_string()) + })?; + } + match receive_event .first() .as_ref() - .and_then(|event| event.attributes.get("receiver")) + .and_then(|event| event.attributes.get(EVENT_ATTRIBUTE_SUCCESS)) { - // Check the receiver address - Some(receiver) => Some( - Address::decode(receiver) - .or_else(|_| { - // Replace it with MASP address when the receiver is a - // payment address - PaymentAddress::from_str(receiver).map(|_| MASP) - }) - .map_err(|_| { - Error::Denom(format!( - "Decoding the receiver address failed: {:?}", - receive_event - )) - })? - .to_string(), - ), - None => None, - }; - let denom_event = self - .ctx - .inner - .borrow() - .get_ibc_events(EVENT_TYPE_DENOM_TRACE) - .map_err(|_| { - Error::Denom("Reading the IBC event failed".to_string()) - })?; - // The denom event should be only one in the single IBC transaction - Ok(denom_event.first().as_ref().and_then(|event| { - let trace_hash = event.attributes.get("trace_hash").cloned()?; - let denom = event.attributes.get("denom").cloned()?; - Some((trace_hash, denom, receiver?)) - })) + Some(success) if success == EVENT_VALUE_SUCCESS => Ok(true), + _ => Ok(false), + } } /// Validate according to the message in IBC VP pub fn validate(&self, tx_data: &[u8]) -> Result<(), Error> { + // Use an empty verifiers set placeholder for validation, this is only + // needed in actual txs to addresses whose VPs should be triggered + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + let message = decode_message(tx_data)?; match message { IbcMessage::Transfer(msg) => { - let token_transfer_ctx = - TokenTransferContext::new(self.ctx.inner.clone()); - send_transfer_validate(&self.ctx, &token_transfer_ctx, msg) - .map_err(Error::TokenTransfer) - } - IbcMessage::ShieldedTransfer(msg) => { - let token_transfer_ctx = - TokenTransferContext::new(self.ctx.inner.clone()); + let token_transfer_ctx = TokenTransferContext::new( + self.ctx.inner.clone(), + verifiers.clone(), + ); send_transfer_validate( &self.ctx, &token_transfer_ctx, @@ -244,111 +348,170 @@ where ) .map_err(Error::TokenTransfer) } + IbcMessage::NftTransfer(msg) => { + let nft_transfer_ctx = + NftTransferContext::new(self.ctx.inner.clone()); + send_nft_transfer_validate( + &self.ctx, + &nft_transfer_ctx, + msg.message, + ) + .map_err(Error::NftTransfer) + } + IbcMessage::RecvPacket(msg) => validate( + &self.ctx, + &self.router, + MsgEnvelope::Packet(PacketMsg::Recv(msg.message)), + ) + .map_err(|e| Error::Context(Box::new(e))), + IbcMessage::AckPacket(msg) => validate( + &self.ctx, + &self.router, + MsgEnvelope::Packet(PacketMsg::Ack(msg.message)), + ) + .map_err(|e| Error::Context(Box::new(e))), + IbcMessage::Timeout(msg) => validate( + &self.ctx, + &self.router, + MsgEnvelope::Packet(PacketMsg::Timeout(msg.message)), + ) + .map_err(|e| Error::Context(Box::new(e))), IbcMessage::Envelope(envelope) => { - validate(&self.ctx, &self.router, envelope) + validate(&self.ctx, &self.router, *envelope) .map_err(|e| Error::Context(Box::new(e))) } } } - - /// Handle the MASP transaction if needed - fn handle_masp_tx(&mut self, message: IbcMessage) -> Result<(), Error> { - let shielded_transfer = match message { - IbcMessage::Envelope(MsgEnvelope::Packet(PacketMsg::Recv(_))) => { - let event = self - .ctx - .inner - .borrow() - .get_ibc_events(EVENT_TYPE_PACKET) - .map_err(|_| { - Error::MaspTx( - "Reading the IBC event failed".to_string(), - ) - })?; - // The receiving event should be only one in the single IBC - // transaction - match event.first() { - Some(event) => get_shielded_transfer(event) - .map_err(|e| Error::MaspTx(e.to_string()))?, - None => return Ok(()), - } - } - IbcMessage::ShieldedTransfer(msg) => Some(msg.shielded_transfer), - _ => return Ok(()), - }; - if let Some(shielded_transfer) = shielded_transfer { - self.ctx - .inner - .borrow_mut() - .handle_masp_tx( - &shielded_transfer.masp_tx, - shielded_transfer.transfer.key.as_deref(), - ) - .map_err(|_| { - Error::MaspTx("Writing MASP components failed".to_string()) - })?; - } - Ok(()) - } } -/// The different variants of an Ibc message -pub enum IbcMessage { - /// Ibc Envelop - Envelope(MsgEnvelope), - /// Ibc transaprent transfer - Transfer(MsgTransfer), - /// Ibc shielded transfer - ShieldedTransfer(MsgShieldedTransfer), +fn is_ack_successful(ack: &Acknowledgement) -> Result { + let acknowledgement = serde_json::from_slice::( + ack.as_ref(), + ) + .map_err(|e| { + Error::TokenTransfer(TokenTransferError::Other(format!( + "Decoding the acknowledgement failed: {e}" + ))) + })?; + Ok(acknowledgement.is_successful()) } /// Tries to decode transaction data to an `IbcMessage` pub fn decode_message(tx_data: &[u8]) -> Result { // ibc-rs message if let Ok(any_msg) = Any::decode(tx_data) { - if let Ok(transfer_msg) = MsgTransfer::try_from(any_msg.clone()) { - return Ok(IbcMessage::Transfer(transfer_msg)); - } if let Ok(envelope) = MsgEnvelope::try_from(any_msg) { - return Ok(IbcMessage::Envelope(envelope)); + return Ok(IbcMessage::Envelope(Box::new(envelope))); } } - // Message with Transfer for the shielded transfer - if let Ok(msg) = MsgShieldedTransfer::try_from_slice(tx_data) { - return Ok(IbcMessage::ShieldedTransfer(msg)); + // Transfer message with `IbcShieldedTransfer` + if let Ok(msg) = MsgTransfer::try_from_slice(tx_data) { + return Ok(IbcMessage::Transfer(msg)); + } + + // NFT transfer message with `IbcShieldedTransfer` + if let Ok(msg) = MsgNftTransfer::try_from_slice(tx_data) { + return Ok(IbcMessage::NftTransfer(msg)); + } + + // Receiving packet message with `IbcShieldedTransfer` + if let Ok(msg) = MsgRecvPacket::try_from_slice(tx_data) { + return Ok(IbcMessage::RecvPacket(msg)); + } + + // Acknowledge packet message with `IbcShieldedTransfer` + if let Ok(msg) = MsgAcknowledgement::try_from_slice(tx_data) { + return Ok(IbcMessage::AckPacket(msg)); + } + // Timeout packet message with `IbcShieldedTransfer` + if let Ok(msg) = MsgTimeout::try_from_slice(tx_data) { + return Ok(IbcMessage::Timeout(msg)); } Err(Error::DecodingData) } +fn received_ibc_trace( + base_trace: impl AsRef, + src_port_id: &PortId, + src_channel_id: &ChannelId, + dest_port_id: &PortId, + dest_channel_id: &ChannelId, +) -> Result { + if *dest_port_id == PortId::transfer() { + let mut prefixed_denom = + base_trace.as_ref().parse().map_err(Error::TokenTransfer)?; + if is_receiver_chain_source( + src_port_id.clone(), + src_channel_id.clone(), + &prefixed_denom, + ) { + let prefix = + TracePrefix::new(src_port_id.clone(), src_channel_id.clone()); + prefixed_denom.remove_trace_prefix(&prefix); + } else { + let prefix = + TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); + prefixed_denom.add_trace_prefix(prefix); + } + return Ok(prefixed_denom.to_string()); + } + + if let Some((trace_path, base_class_id, token_id)) = + is_nft_trace(&base_trace) + { + let mut class_id = PrefixedClassId { + trace_path, + base_class_id: base_class_id.parse().map_err(Error::NftTransfer)?, + }; + if is_nft_receiver_chain_source( + src_port_id.clone(), + src_channel_id.clone(), + &class_id, + ) { + let prefix = NftTracePrefix::new( + src_port_id.clone(), + src_channel_id.clone(), + ); + class_id.remove_trace_prefix(&prefix); + } else { + let prefix = NftTracePrefix::new( + dest_port_id.clone(), + dest_channel_id.clone(), + ); + class_id.add_trace_prefix(prefix); + } + let token_id: TokenId = token_id.parse().map_err(Error::NftTransfer)?; + return Ok(format!("{class_id}/{token_id}")); + } + + Err(Error::Trace(format!( + "Invalid IBC trace: {}", + base_trace.as_ref() + ))) +} + /// Get the IbcToken from the source/destination ports and channels pub fn received_ibc_token( - ibc_denom: &PrefixedDenom, + ibc_denom: impl AsRef, src_port_id: &PortId, src_channel_id: &ChannelId, dest_port_id: &PortId, dest_channel_id: &ChannelId, ) -> Result { - let mut ibc_denom = ibc_denom.clone(); - if is_receiver_chain_source( - src_port_id.clone(), - src_channel_id.clone(), - &ibc_denom, - ) { - let prefix = - TracePrefix::new(src_port_id.clone(), src_channel_id.clone()); - ibc_denom.remove_trace_prefix(&prefix); - } else { - let prefix = - TracePrefix::new(dest_port_id.clone(), dest_channel_id.clone()); - ibc_denom.add_trace_prefix(prefix); - } - if ibc_denom.trace_path.is_empty() { - Address::decode(ibc_denom.to_string()) - .map_err(|e| Error::Denom(format!("Invalid base denom: {e}"))) + let ibc_trace = received_ibc_trace( + ibc_denom, + src_port_id, + src_channel_id, + dest_port_id, + dest_channel_id, + )?; + if ibc_trace.contains('/') { + Ok(storage::ibc_token(ibc_trace)) } else { - Ok(storage::ibc_token(ibc_denom.to_string())) + Address::decode(ibc_trace) + .map_err(|e| Error::Trace(format!("Invalid base token: {e}"))) } } @@ -357,6 +520,7 @@ pub fn received_ibc_token( pub mod testing { use std::str::FromStr; + use ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use ibc::apps::transfer::types::packet::PacketData; use ibc::apps::transfer::types::{ Amount, BaseDenom, Memo, PrefixedCoin, PrefixedDenom, TracePath, @@ -367,8 +531,7 @@ pub mod testing { use ibc::core::host::types::identifiers::{ChannelId, PortId}; use ibc::core::primitives::Signer; use ibc::primitives::proto::Any; - use ibc::primitives::{Msg, Timestamp}; - use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; + use ibc::primitives::{Timestamp, ToProto}; use proptest::prelude::{Just, Strategy}; use proptest::{collection, prop_compose, prop_oneof}; diff --git a/crates/ibc/src/parameters.rs b/crates/ibc/src/parameters.rs new file mode 100644 index 0000000000..f09f964aba --- /dev/null +++ b/crates/ibc/src/parameters.rs @@ -0,0 +1,34 @@ +//! IBC system parameters + +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::token::Amount; +use namada_state::{StorageResult, StorageWrite}; + +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +/// Governance parameter structure +pub struct IbcParameters { + /// Default supply limit of each token + pub default_mint_limit: Amount, + /// Default per-epoch throughput limit of each token + pub default_per_epoch_throughput_limit: Amount, +} + +impl Default for IbcParameters { + fn default() -> Self { + Self { + default_mint_limit: Amount::zero(), + default_per_epoch_throughput_limit: Amount::zero(), + } + } +} + +impl IbcParameters { + /// Initialize IBC parameters into storage + pub fn init_storage(&self, storage: &mut S) -> StorageResult<()> + where + S: StorageWrite, + { + let key = crate::storage::params_key(); + storage.write(&key, self) + } +} diff --git a/crates/ibc/src/storage.rs b/crates/ibc/src/storage.rs index c4aafc947d..e506cfbb0e 100644 --- a/crates/ibc/src/storage.rs +++ b/crates/ibc/src/storage.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use namada_core::address::{Address, InternalAddress, HASH_LEN, SHA_HASH_LEN}; +use namada_core::ibc::apps::nft_transfer::types::{PrefixedClassId, TokenId}; use namada_core::ibc::core::client::types::Height; use namada_core::ibc::core::host::types::identifiers::{ ChannelId, ClientId, ConnectionId, PortId, Sequence, @@ -14,14 +15,26 @@ use namada_core::ibc::core::host::types::path::{ }; use namada_core::ibc::IbcTokenHash; use namada_core::storage::{DbKeySeg, Key, KeySeg}; +use namada_core::token::Amount; +use namada_state::{StorageRead, StorageResult}; use sha2::{Digest, Sha256}; use thiserror::Error; +use crate::parameters::IbcParameters; + const CLIENTS_COUNTER_PREFIX: &str = "clients"; const CONNECTIONS_COUNTER_PREFIX: &str = "connections"; const CHANNELS_COUNTER_PREFIX: &str = "channelEnds"; const COUNTER_SEG: &str = "counter"; -const DENOM: &str = "ibc_denom"; +const TRACE: &str = "ibc_trace"; +const NFT_CLASS: &str = "nft_class"; +const NFT_METADATA: &str = "nft_meta"; +const PARAMS: &str = "params"; +const MINT_LIMIT: &str = "mint_limit"; +const MINT: &str = "mint"; +const THROUGHPUT_LIMIT: &str = "throughput_limit"; +const DEPOSIT: &str = "deposit"; +const WITHDRAW: &str = "withdraw"; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -32,10 +45,6 @@ pub enum Error { InvalidKey(String), #[error("Port capability error: {0}")] InvalidPortCapability(String), - #[error("Denom error: {0}")] - Denom(String), - #[error("IBS signer error: {0}")] - IbcSigner(String), } /// IBC storage functions result @@ -208,6 +217,20 @@ pub fn client_update_height_key(client_id: &ClientId) -> Key { ibc_key(path).expect("Creating a key for the ack shouldn't fail") } +/// Returns a key for the NFT class +pub fn nft_class_key(class_id: &PrefixedClassId) -> Key { + let ibc_token = ibc_token(class_id.to_string()); + let path = format!("{NFT_CLASS}/{ibc_token}"); + ibc_key(path).expect("Creating a key for the NFT class shouldn't fail") +} + +/// Returns a key for the NFT metadata +pub fn nft_metadata_key(class_id: &PrefixedClassId, token_id: &TokenId) -> Key { + let ibc_token = ibc_token_for_nft(class_id, token_id); + let path = format!("{NFT_METADATA}/{ibc_token}"); + ibc_key(path).expect("Creating a key for the NFT metadata shouldn't fail") +} + /// Returns a client ID from the given client key `#IBC/clients/` pub fn client_id(key: &Key) -> Result { match &key.segments[..] { @@ -367,12 +390,12 @@ pub fn port_id(key: &Key) -> Result { } } -/// The storage key prefix to get the denom name with the hashed IBC denom. The -/// address is given as string because the given address could be non-Namada -/// token. -pub fn ibc_denom_key_prefix(addr: Option) -> Key { +/// The storage key prefix to get the denom/class name with the hashed IBC +/// denom/class. The address is given as string because the given address could +/// be non-Namada token. +pub fn ibc_trace_key_prefix(addr: Option) -> Key { let prefix = Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) - .push(&DENOM.to_string().to_db_key()) + .push(&TRACE.to_string().to_db_key()) .expect("Cannot obtain a storage key"); if let Some(addr) = addr { @@ -386,11 +409,11 @@ pub fn ibc_denom_key_prefix(addr: Option) -> Key { /// The storage key to get the denom name with the hashed IBC denom. The address /// is given as string because the given address could be non-Namada token. -pub fn ibc_denom_key( +pub fn ibc_trace_key( addr: impl AsRef, token_hash: impl AsRef, ) -> Key { - ibc_denom_key_prefix(Some(addr.as_ref().to_string())) + ibc_trace_key_prefix(Some(addr.as_ref().to_string())) .push(&token_hash.as_ref().to_string().to_db_key()) .expect("Cannot obtain a storage key") } @@ -422,6 +445,14 @@ pub fn ibc_token(denom: impl AsRef) -> Address { Address::Internal(InternalAddress::IbcToken(hash)) } +/// Obtain the IbcToken with the hash from the given NFT class ID and NFT ID +pub fn ibc_token_for_nft( + class_id: &PrefixedClassId, + token_id: &TokenId, +) -> Address { + ibc_token(format!("{class_id}/{token_id}")) +} + /// Returns true if the given key is for IBC pub fn is_ibc_key(key: &Key) -> bool { matches!(&key.segments[0], @@ -429,7 +460,7 @@ pub fn is_ibc_key(key: &Key) -> bool { } /// Returns the owner and the token hash if the given key is the denom key -pub fn is_ibc_denom_key(key: &Key) -> Option<(String, String)> { +pub fn is_ibc_trace_key(key: &Key) -> Option<(String, String)> { match &key.segments[..] { [ DbKeySeg::AddressSeg(addr), @@ -438,7 +469,7 @@ pub fn is_ibc_denom_key(key: &Key) -> Option<(String, String)> { DbKeySeg::StringSeg(hash), ] => { if addr == &Address::Internal(InternalAddress::Ibc) - && prefix == DENOM + && prefix == TRACE { Some((owner.clone(), hash.clone())) } else { @@ -460,3 +491,96 @@ pub fn is_ibc_counter_key(key: &Key) -> bool { || prefix == CHANNELS_COUNTER_PREFIX) && counter == COUNTER_SEG ) } + +/// Returns a key of IBC parameters +pub fn params_key() -> Key { + Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&PARAMS.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Returns a key of the mint limit for the token +pub fn mint_limit_key(token: &Address) -> Key { + Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&MINT_LIMIT.to_string().to_db_key()) + .expect("Cannot obtain a storage key") + // Set as String to avoid checking the token address + .push(&token.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Get the mint limit and the throughput limit for the token. If they don't +/// exist in the storage, the default limits are loaded from IBC parameters +pub fn get_limits( + storage: &S, + token: &Address, +) -> StorageResult<(Amount, Amount)> { + let mint_limit_key = mint_limit_key(token); + let mint_limit: Option = storage.read(&mint_limit_key)?; + let throughput_limit_key = throughput_limit_key(token); + let throughput_limit: Option = + storage.read(&throughput_limit_key)?; + Ok(match (mint_limit, throughput_limit) { + (Some(ml), Some(tl)) => (ml, tl), + _ => { + let params: IbcParameters = storage + .read(¶ms_key())? + .expect("Parameters should be stored"); + ( + mint_limit.unwrap_or(params.default_mint_limit), + throughput_limit + .unwrap_or(params.default_per_epoch_throughput_limit), + ) + } + }) +} + +/// Returns a key of the IBC mint amount for the token +pub fn mint_amount_key(token: &Address) -> Key { + Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&MINT.to_string().to_db_key()) + .expect("Cannot obtain a storage key") + // Set as String to avoid checking the token address + .push(&token.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Returns a key of the per-epoch throughput limit for the token +pub fn throughput_limit_key(token: &Address) -> Key { + Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&THROUGHPUT_LIMIT.to_string().to_db_key()) + .expect("Cannot obtain a storage key") + // Set as String to avoid checking the token address + .push(&token.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Returns a prefix of the per-epoch deposit +pub fn deposit_prefix() -> Key { + Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&DEPOSIT.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Returns a key of the per-epoch deposit for the token +pub fn deposit_key(token: &Address) -> Key { + deposit_prefix() + // Set as String to avoid checking the token address + .push(&token.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Returns a prefix of the per-epoch withdraw +pub fn withdraw_prefix() -> Key { + Key::from(Address::Internal(InternalAddress::Ibc).to_db_key()) + .push(&WITHDRAW.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Returns a key of the per-epoch withdraw for the token +pub fn withdraw_key(token: &Address) -> Key { + withdraw_prefix() + // Set as String to avoid checking the token address + .push(&token.to_string().to_db_key()) + .expect("Cannot obtain a storage key") +} diff --git a/crates/light_sdk/Cargo.toml b/crates/light_sdk/Cargo.toml index 6f62c018c9..bad4cde8af 100644 --- a/crates/light_sdk/Cargo.toml +++ b/crates/light_sdk/Cargo.toml @@ -16,6 +16,7 @@ version.workspace = true [features] blocking = ["tokio"] +namada-eth-bridge = ["namada_sdk/namada-eth-bridge"] [dependencies] namada_sdk = { path = "../sdk" } diff --git a/crates/light_sdk/src/reading/asynchronous/pos.rs b/crates/light_sdk/src/reading/asynchronous/pos.rs index 9330db274d..c2e67c3f0a 100644 --- a/crates/light_sdk/src/reading/asynchronous/pos.rs +++ b/crates/light_sdk/src/reading/asynchronous/pos.rs @@ -1,5 +1,6 @@ -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::BTreeSet; +use namada_sdk::collections::{HashMap, HashSet}; use namada_sdk::key::common; use namada_sdk::proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorMetaData, ValidatorState, diff --git a/crates/light_sdk/src/reading/blocking/pos.rs b/crates/light_sdk/src/reading/blocking/pos.rs index 4a070d36f8..231adf5349 100644 --- a/crates/light_sdk/src/reading/blocking/pos.rs +++ b/crates/light_sdk/src/reading/blocking/pos.rs @@ -1,6 +1,7 @@ -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::BTreeSet; use namada_sdk::address::Address; +use namada_sdk::collections::{HashMap, HashSet}; use namada_sdk::key::common; use namada_sdk::proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorMetaData, ValidatorState, diff --git a/crates/light_sdk/src/transaction/account.rs b/crates/light_sdk/src/transaction/account.rs index fce8794bb6..b037238a2e 100644 --- a/crates/light_sdk/src/transaction/account.rs +++ b/crates/light_sdk/src/transaction/account.rs @@ -4,7 +4,7 @@ use namada_sdk::key::common; use namada_sdk::storage::Epoch; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -90,7 +90,7 @@ impl InitAccount { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -162,7 +162,7 @@ impl RevealPk { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -246,7 +246,7 @@ impl UpdateAccount { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/light_sdk/src/transaction/bridge.rs b/crates/light_sdk/src/transaction/bridge.rs index c1e01238c5..a8fb35c2ab 100644 --- a/crates/light_sdk/src/transaction/bridge.rs +++ b/crates/light_sdk/src/transaction/bridge.rs @@ -5,7 +5,7 @@ use namada_sdk::key::common; use namada_sdk::storage::Epoch; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -85,7 +85,7 @@ impl BridgeTransfer { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/light_sdk/src/transaction/governance.rs b/crates/light_sdk/src/transaction/governance.rs index a0ac5e96cc..e23e1e0d0c 100644 --- a/crates/light_sdk/src/transaction/governance.rs +++ b/crates/light_sdk/src/transaction/governance.rs @@ -5,7 +5,7 @@ use namada_sdk::key::common; use namada_sdk::storage::Epoch; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -20,23 +20,21 @@ impl InitProposal { /// Build a raw InitProposal transaction from the given parameters #[allow(clippy::too_many_arguments)] pub fn new( - id: u64, content: Hash, author: Address, r#type: ProposalType, voting_start_epoch: Epoch, voting_end_epoch: Epoch, - grace_epoch: Epoch, + activation_epoch: Epoch, args: GlobalArgs, ) -> Self { let init_proposal = namada_sdk::governance::InitProposalData { - id, content, author, r#type, voting_start_epoch, voting_end_epoch, - grace_epoch, + activation_epoch, }; Self(transaction::build_tx( @@ -178,7 +176,7 @@ impl VoteProposal { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/light_sdk/src/transaction/ibc.rs b/crates/light_sdk/src/transaction/ibc.rs index 2ecc9b3825..32afdb4c30 100644 --- a/crates/light_sdk/src/transaction/ibc.rs +++ b/crates/light_sdk/src/transaction/ibc.rs @@ -3,13 +3,13 @@ use std::str::FromStr; use namada_sdk::address::Address; use namada_sdk::hash::Hash; pub use namada_sdk::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; -use namada_sdk::ibc::primitives::Msg; +use namada_sdk::ibc::primitives::ToProto; use namada_sdk::key::common; use namada_sdk::storage::Epoch; use namada_sdk::time::DateTimeUtc; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -94,7 +94,7 @@ impl IbcTransfer { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/light_sdk/src/transaction/mod.rs b/crates/light_sdk/src/transaction/mod.rs index 598a665381..6f8aebd5da 100644 --- a/crates/light_sdk/src/transaction/mod.rs +++ b/crates/light_sdk/src/transaction/mod.rs @@ -10,7 +10,7 @@ use namada_sdk::storage::Epoch; use namada_sdk::time::DateTimeUtc; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::{Fee, GasLimit}; -use namada_sdk::tx::{Section, Signature, Signer, Tx}; +use namada_sdk::tx::{Authorization, Section, Signer, Tx}; pub mod account; pub mod bridge; @@ -53,7 +53,7 @@ pub(in crate::transaction) fn get_sign_bytes(tx: &Tx) -> Vec { pub(in crate::transaction) fn get_wrapper_sign_bytes(tx: &Tx) -> Hash { let targets = tx.sechashes(); // Commit to the given targets - let partial = Signature { + let partial = Authorization { targets, signer: Signer::PubKeys(vec![]), signatures: BTreeMap::new(), @@ -67,7 +67,7 @@ pub(in crate::transaction) fn attach_raw_signatures( signature: common::Signature, ) -> Tx { tx.protocol_filter(); - tx.add_section(Section::Signature(Signature { + tx.add_section(Section::Authorization(Authorization { targets: vec![tx.raw_header_hash()], signer: Signer::PubKeys(vec![signer]), signatures: [(0, signature)].into_iter().collect(), @@ -102,7 +102,7 @@ pub(in crate::transaction) fn attach_fee_signature( signature: common::Signature, ) -> Tx { tx.protocol_filter(); - tx.add_section(Section::Signature(Signature { + tx.add_section(Section::Authorization(Authorization { targets: tx.sechashes(), signer: Signer::PubKeys(vec![signer]), signatures: [(0, signature)].into_iter().collect(), diff --git a/crates/light_sdk/src/transaction/pgf.rs b/crates/light_sdk/src/transaction/pgf.rs index 48afb38a68..1a492b331b 100644 --- a/crates/light_sdk/src/transaction/pgf.rs +++ b/crates/light_sdk/src/transaction/pgf.rs @@ -1,13 +1,12 @@ -use std::collections::HashMap; - use namada_sdk::address::Address; +use namada_sdk::collections::HashMap; use namada_sdk::dec::Dec; use namada_sdk::hash::Hash; use namada_sdk::key::common; use namada_sdk::storage::Epoch; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -81,7 +80,7 @@ impl ResignSteward { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -163,7 +162,7 @@ impl UpdateStewardCommission { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/light_sdk/src/transaction/pos.rs b/crates/light_sdk/src/transaction/pos.rs index 39ae504c7a..25b7a19f37 100644 --- a/crates/light_sdk/src/transaction/pos.rs +++ b/crates/light_sdk/src/transaction/pos.rs @@ -7,7 +7,7 @@ use namada_sdk::token; use namada_sdk::token::{Amount, DenominatedAmount}; use namada_sdk::tx::data::pos::Redelegation; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -76,7 +76,7 @@ impl Bond { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -132,7 +132,7 @@ impl Unbond { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -207,7 +207,7 @@ impl BecomeValidator { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -252,7 +252,7 @@ impl UnjailValidator { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -297,7 +297,7 @@ impl DeactivateValidator { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -368,7 +368,7 @@ impl ReactivateValidator { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -446,7 +446,7 @@ impl ClaimRewards { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -537,7 +537,7 @@ impl ChangeMetaData { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -617,7 +617,7 @@ impl ChangeConsensusKey { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -693,7 +693,7 @@ impl ChangeCommission { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -771,7 +771,7 @@ impl Withdraw { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } @@ -855,7 +855,7 @@ impl Redelegate { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index b548665086..3a5d188a21 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -5,7 +5,7 @@ use namada_sdk::key::common; use namada_sdk::storage::Epoch; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; -use namada_sdk::tx::{Signature, Tx, TxError}; +use namada_sdk::tx::{Authorization, Tx, TxError}; use super::{attach_fee, attach_fee_signature, GlobalArgs}; use crate::transaction; @@ -96,7 +96,7 @@ impl Transfer { } /// Validate this wrapper transaction - pub fn validate_tx(&self) -> Result, TxError> { + pub fn validate_tx(&self) -> Result, TxError> { self.0.validate_tx() } } diff --git a/crates/macros/src/lib.rs b/crates/macros/src/lib.rs index 0fd7147598..d0dae31a04 100644 --- a/crates/macros/src/lib.rs +++ b/crates/macros/src/lib.rs @@ -11,17 +11,10 @@ use proc_macro2::{Span as Span2, Span, TokenStream as TokenStream2}; use quote::{quote, ToTokens}; use sha2::Digest; use syn::punctuated::Punctuated; -use syn::{ - parse_macro_input, ExprAssign, FnArg, ItemEnum, ItemFn, ItemStruct, - LitByte, Pat, -}; +use syn::{parse_macro_input, ItemEnum, ItemFn, ItemStruct, LitByte}; /// Generate WASM binding for a transaction main entrypoint function. /// -/// It expects an attribute in the form: `gas = u64`, so that a call to the gas -/// meter can be injected as the first instruction of the transaction to account -/// for the whitelisted gas amount. -/// /// This macro expects a function with signature: /// /// ```compiler_fail @@ -31,42 +24,19 @@ use syn::{ /// ) -> TxResult /// ``` #[proc_macro_attribute] -pub fn transaction(attr: TokenStream, input: TokenStream) -> TokenStream { +pub fn transaction(_attr: TokenStream, input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as ItemFn); - let ItemFn { - attrs, - vis, - sig, - block, - } = ast; - let stmts = &block.stmts; - let ident = &sig.ident; - let attr_ast = parse_macro_input!(attr as ExprAssign); - let gas = attr_ast.right; - let ctx = match sig.inputs.first() { - Some(FnArg::Typed(pat_type)) => { - if let Pat::Ident(pat_ident) = pat_type.pat.as_ref() { - &pat_ident.ident - } else { - panic!("Unexpected token, expected ctx ident") - } - } - _ => panic!("Unexpected token, expected ctx ident"), - }; + let ident = &ast.sig.ident; let gen = quote! { // Use `wee_alloc` as the global allocator. #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - #(#attrs)* #vis #sig { - // Consume the whitelisted gas - #ctx.charge_gas(#gas)?; - #(#stmts)* - } + #ast // The module entrypoint callable by wasm runtime #[no_mangle] - extern "C" fn _apply_tx(tx_data_ptr: u64, tx_data_len: u64) { + extern "C" fn _apply_tx(tx_data_ptr: u64, tx_data_len: u64) -> u64 { let slice = unsafe { core::slice::from_raw_parts( tx_data_ptr as *const u8, @@ -82,10 +52,15 @@ pub fn transaction(attr: TokenStream, input: TokenStream) -> TokenStream { // to "fake" it. let mut ctx = unsafe { namada_tx_prelude::Ctx::new() }; - if let Err(err) = #ident(&mut ctx, tx_data) { - namada_tx_prelude::debug_log!("Transaction error: {}", err); - // crash the transaction to abort - panic!(); + match #ident(&mut ctx, tx_data) { + Ok(()) => 1, + Err(err) => { + namada_tx_prelude::debug_log!("Transaction error: {err}"); + // TODO: pass some proper error from txs, instead of a string + let err = err.to_string().serialize_to_vec(); + ctx.yield_value(err); + 0 + }, } } }; @@ -94,10 +69,6 @@ pub fn transaction(attr: TokenStream, input: TokenStream) -> TokenStream { /// Generate WASM binding for validity predicate main entrypoint function. /// -/// It expects an attribute in the form: `gas = u64`, so that a call to the gas -/// meter can be injected as the first instruction of the validity predicate to -/// account for the whitelisted gas amount. -/// /// This macro expects a function with signature: /// /// ```compiler_fail @@ -111,40 +82,17 @@ pub fn transaction(attr: TokenStream, input: TokenStream) -> TokenStream { /// ``` #[proc_macro_attribute] pub fn validity_predicate( - attr: TokenStream, + _attr: TokenStream, input: TokenStream, ) -> TokenStream { let ast = parse_macro_input!(input as ItemFn); - let ItemFn { - attrs, - vis, - sig, - block, - } = ast; - let stmts = &block.stmts; - let ident = &sig.ident; - let attr_ast = parse_macro_input!(attr as ExprAssign); - let gas = attr_ast.right; - let ctx = match sig.inputs.first() { - Some(FnArg::Typed(pat_type)) => { - if let Pat::Ident(pat_ident) = pat_type.pat.as_ref() { - &pat_ident.ident - } else { - panic!("Unexpected token, expected ctx ident") - } - } - _ => panic!("Unexpected token, expected ctx ident"), - }; + let ident = &ast.sig.ident; let gen = quote! { // Use `wee_alloc` as the global allocator. #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - #(#attrs)* #vis #sig { - // Consume the whitelisted gas - #ctx.charge_gas(#gas)?; - #(#stmts)* - } + #ast // The module entrypoint callable by wasm runtime #[no_mangle] @@ -198,10 +146,11 @@ pub fn validity_predicate( // run validation with the concrete type(s) match #ident(&ctx, tx_data, addr, keys_changed, verifiers) { - Ok(true) => 1, - Ok(false) => 0, + Ok(()) => 1, Err(err) => { - namada_vp_prelude::debug_log!("Validity predicate error: {}", err); + namada_vp_prelude::debug_log!("Validity predicate error: {err}"); + let err = err.serialize_to_vec(); + ctx.yield_value(err); 0 }, } diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index 85e1603937..180e203fdb 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -149,6 +149,8 @@ pub enum StoreType { PoS, /// For the Ethereum bridge Pool transfers BridgePool, + /// For the commit only data + CommitData, } /// Backing storage for merkle trees @@ -163,6 +165,8 @@ pub enum Store { PoS(SmtStore), /// For the Ethereum bridge Pool transfers BridgePool(BridgePoolStore), + /// For the commit only data + CommitData, } impl Store { @@ -174,6 +178,7 @@ impl Store { Self::Ibc(store) => StoreRef::Ibc(store), Self::PoS(store) => StoreRef::PoS(store), Self::BridgePool(store) => StoreRef::BridgePool(store), + Self::CommitData => StoreRef::CommitData, } } } @@ -190,6 +195,8 @@ pub enum StoreRef<'a> { PoS(&'a SmtStore), /// For the Ethereum bridge Pool transfers BridgePool(&'a BridgePoolStore), + /// For commit only data + CommitData, } impl<'a> StoreRef<'a> { @@ -201,6 +208,7 @@ impl<'a> StoreRef<'a> { Self::Ibc(store) => Store::Ibc(store.to_owned()), Self::PoS(store) => Store::PoS(store.to_owned()), Self::BridgePool(store) => Store::BridgePool(store.to_owned()), + Self::CommitData => Store::CommitData, } } @@ -212,6 +220,7 @@ impl<'a> StoreRef<'a> { Self::Ibc(store) => store.serialize_to_vec(), Self::PoS(store) => store.serialize_to_vec(), Self::BridgePool(store) => store.serialize_to_vec(), + Self::CommitData => vec![], } } } @@ -219,23 +228,25 @@ impl<'a> StoreRef<'a> { impl StoreType { /// Get an iterator for the base tree and subtrees pub fn iter() -> std::slice::Iter<'static, Self> { - static SUB_TREE_TYPES: [StoreType; 5] = [ + static SUB_TREE_TYPES: [StoreType; 6] = [ StoreType::Base, StoreType::Account, StoreType::PoS, StoreType::Ibc, StoreType::BridgePool, + StoreType::CommitData, ]; SUB_TREE_TYPES.iter() } /// Get an iterator for subtrees pub fn iter_subtrees() -> std::slice::Iter<'static, Self> { - static SUB_TREE_TYPES: [StoreType; 4] = [ + static SUB_TREE_TYPES: [StoreType; 5] = [ StoreType::Account, StoreType::PoS, StoreType::Ibc, StoreType::BridgePool, + StoreType::CommitData, ]; SUB_TREE_TYPES.iter() } @@ -281,6 +292,9 @@ impl StoreType { _ => Ok((StoreType::Account, key.clone())), } } + Some(DbKeySeg::StringSeg(data)) if data.eq("commit_data") => { + Ok((StoreType::CommitData, key.clone())) + } // use the same key for Account _ => Ok((StoreType::Account, key.clone())), } @@ -310,6 +324,7 @@ impl StoreType { Self::Ibc => Ok(Store::Ibc(decode(bytes)?)), Self::PoS => Ok(Store::PoS(decode(bytes)?)), Self::BridgePool => Ok(Store::BridgePool(decode(bytes)?)), + Self::CommitData => Ok(Store::CommitData), } } } @@ -324,6 +339,7 @@ impl FromStr for StoreType { "ibc" => Ok(StoreType::Ibc), "pos" => Ok(StoreType::PoS), "eth_bridge_pool" => Ok(StoreType::BridgePool), + "commit_data" => Ok(StoreType::CommitData), _ => Err(Error::StoreType(s.to_string())), } } @@ -337,26 +353,43 @@ impl fmt::Display for StoreType { StoreType::Ibc => write!(f, "ibc"), StoreType::PoS => write!(f, "pos"), StoreType::BridgePool => write!(f, "eth_bridge_pool"), + StoreType::CommitData => write!(f, "commit_data"), } } } -/// Get the key prefix with which the base root and store are stored in the +/// Get the key prefix with which the tree root and store are stored in the /// storage -pub fn base_tree_key_prefix(height: BlockHeight) -> Key { +pub fn tree_key_prefix_with_height(st: &StoreType, height: BlockHeight) -> Key { Key::from(height.to_db_key()) .with_segment("tree".to_owned()) - .with_segment(StoreType::Base.to_string()) + .with_segment(st.to_string()) } -/// Get the key prefix with which the subtree root and store are stored in the +/// Get the key prefix with which the tree root and store are stored in the /// storage -pub fn subtree_key_prefix(st: &StoreType, epoch: Epoch) -> Key { +pub fn tree_key_prefix_with_epoch(st: &StoreType, epoch: Epoch) -> Key { Key::from(epoch.to_db_key()) .with_segment("tree".to_owned()) .with_segment(st.to_string()) } +/// Fake merkle tree just to commit extra data to the merkle tree +#[derive(Default)] +pub struct CommitDataRoot(Hash); + +impl From for CommitDataRoot { + fn from(value: Hash) -> Self { + Self(value) + } +} + +impl CommitDataRoot { + pub fn get_commit_data_key() -> Key { + Key::parse("commit_data").expect("Should be able to parse the key.") + } +} + /// Merkle tree storage #[derive(Default)] pub struct MerkleTree { @@ -365,6 +398,7 @@ pub struct MerkleTree { ibc: Amt, pos: Smt, bridge_pool: BridgePoolTree, + commit_data: CommitDataRoot, } impl core::fmt::Debug for MerkleTree { @@ -385,12 +419,15 @@ impl MerkleTree { let pos = Smt::new(stores.pos.0.into(), stores.pos.1); let bridge_pool = BridgePoolTree::new(stores.bridge_pool.0, stores.bridge_pool.1); + let commit_data = stores.commit.into(); + let tree = Self { base, account, ibc, pos, bridge_pool, + commit_data, }; // validate @@ -402,15 +439,19 @@ impl MerkleTree { let pos_root = tree.base.get(&pos_key.into())?; let bp_key = H::hash(StoreType::BridgePool.to_string()); let bp_root = tree.base.get(&bp_key.into())?; + let commit_data_key = H::hash(StoreType::CommitData.to_string()); + let commit_data_root = tree.base.get(&commit_data_key.into())?; if tree.base.root().is_zero() && tree.account.root().is_zero() && tree.ibc.root().is_zero() && tree.pos.root().is_zero() && tree.bridge_pool.root().is_zero() + && tree.commit_data.0.is_zero() || (account_root == tree.account.root().into() && ibc_root == tree.ibc.root().into() && pos_root == tree.pos.root().into() - && bp_root == tree.bridge_pool.root().into()) + && bp_root == tree.bridge_pool.root().into() + && commit_data_root == tree.commit_data.0) { Ok(tree) } else { @@ -428,12 +469,15 @@ impl MerkleTree { let pos = Smt::new(stores.pos.0.into(), stores.pos.1); let bridge_pool = BridgePoolTree::new(stores.bridge_pool.0, stores.bridge_pool.1); + let commit_data = stores.commit.into(); + Self { base, account, ibc, pos, bridge_pool, + commit_data, } } @@ -444,6 +488,7 @@ impl MerkleTree { StoreType::Ibc => Box::new(&self.ibc), StoreType::PoS => Box::new(&self.pos), StoreType::BridgePool => Box::new(&self.bridge_pool), + StoreType::CommitData => Box::new(&self.commit_data), } } @@ -457,6 +502,7 @@ impl MerkleTree { StoreType::Ibc => Box::new(&mut self.ibc), StoreType::PoS => Box::new(&mut self.pos), StoreType::BridgePool => Box::new(&mut self.bridge_pool), + StoreType::CommitData => Box::new(&mut self.commit_data), } } @@ -495,6 +541,14 @@ impl MerkleTree { self.update_tree(&store_type, &sub_key, value) } + /// Update the commit data subtree + pub fn update_commit_data( + &mut self, + value: impl AsRef<[u8]>, + ) -> Result<()> { + self.update(&CommitDataRoot::get_commit_data_key(), value) + } + /// Delete the value corresponding to the given key pub fn delete(&mut self, key: &Key) -> Result<()> { let (store_type, sub_key) = StoreType::sub_key(key)?; @@ -527,6 +581,7 @@ impl MerkleTree { self.bridge_pool.root().into(), self.bridge_pool.store(), ), + commit: self.commit_data.0, } } @@ -635,6 +690,12 @@ impl From for MerkleRoot { } } +impl From for MerkleRoot { + fn from(root: Hash) -> Self { + Self(root.0) + } +} + impl From<&H256> for MerkleRoot { fn from(root: &H256) -> Self { let root = *root; @@ -674,6 +735,7 @@ pub struct MerkleTreeStoresRead { ibc: (Hash, AmtStore), pos: (Hash, SmtStore), bridge_pool: (KeccakHash, BridgePoolStore), + commit: Hash, } impl MerkleTreeStoresRead { @@ -685,6 +747,7 @@ impl MerkleTreeStoresRead { StoreType::Ibc => self.ibc.0 = root, StoreType::PoS => self.pos.0 = root, StoreType::BridgePool => self.bridge_pool.0 = root.into(), + StoreType::CommitData => self.commit = root, } } @@ -696,6 +759,7 @@ impl MerkleTreeStoresRead { Store::Ibc(store) => self.ibc.1 = store, Store::PoS(store) => self.pos.1 = store, Store::BridgePool(store) => self.bridge_pool.1 = store, + Store::CommitData => (), } } @@ -707,6 +771,7 @@ impl MerkleTreeStoresRead { StoreType::Ibc => StoreRef::Ibc(&self.ibc.1), StoreType::PoS => StoreRef::PoS(&self.pos.1), StoreType::BridgePool => StoreRef::BridgePool(&self.bridge_pool.1), + StoreType::CommitData => StoreRef::CommitData, } } @@ -718,6 +783,7 @@ impl MerkleTreeStoresRead { StoreType::Ibc => self.ibc.0, StoreType::PoS => self.pos.0, StoreType::BridgePool => Hash(self.bridge_pool.0.0), + StoreType::CommitData => Hash(self.commit.0), } } } @@ -729,6 +795,7 @@ pub struct MerkleTreeStoresWrite<'a> { ibc: (Hash, &'a AmtStore), pos: (Hash, &'a SmtStore), bridge_pool: (Hash, &'a BridgePoolStore), + commit: Hash, } impl<'a> MerkleTreeStoresWrite<'a> { @@ -740,6 +807,7 @@ impl<'a> MerkleTreeStoresWrite<'a> { StoreType::Ibc => &self.ibc.0, StoreType::PoS => &self.pos.0, StoreType::BridgePool => &self.bridge_pool.0, + StoreType::CommitData => &self.commit, } } @@ -751,6 +819,7 @@ impl<'a> MerkleTreeStoresWrite<'a> { StoreType::Ibc => StoreRef::Ibc(self.ibc.1), StoreType::PoS => StoreRef::PoS(self.pos.1), StoreType::BridgePool => StoreRef::BridgePool(self.bridge_pool.1), + StoreType::CommitData => StoreRef::CommitData, } } } @@ -1003,6 +1072,50 @@ impl<'a> SubTreeWrite for &'a mut BridgePoolTree { } } +impl<'a> SubTreeRead for &'a CommitDataRoot { + fn root(&self) -> MerkleRoot { + self.0.into() + } + + fn subtree_has_key(&self, key: &Key) -> Result { + Ok(*key == CommitDataRoot::get_commit_data_key()) + } + + fn subtree_get(&self, key: &Key) -> Result> { + if self.subtree_has_key(key).unwrap() { + Ok(self.0.to_vec()) + } else { + Err(Error::MerkleTree( + "Invalid key for commit data subtree.".to_string(), + )) + } + } + + fn subtree_membership_proof( + &self, + _keys: &[Key], + _values: Vec, + ) -> Result { + unimplemented!("Commit data subspace hold only a single hash value.") + } +} + +impl<'a> SubTreeWrite for &'a mut CommitDataRoot { + fn subtree_update( + &mut self, + _key: &Key, + value: StorageBytes, + ) -> Result { + self.0 = Hash::sha256(value); + Ok(self.0) + } + + fn subtree_delete(&mut self, _key: &Key) -> Result { + self.0 = Hash::default(); + Ok(self.0) + } +} + #[cfg(test)] mod test { use ics23::HostFunctionsManager; diff --git a/crates/migrations/src/lib.rs b/crates/migrations/src/lib.rs index 5c3e0c7dd2..e94caeba96 100644 --- a/crates/migrations/src/lib.rs +++ b/crates/migrations/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::disallowed_types)] use std::collections::HashMap; use std::sync::{Mutex, OnceLock}; diff --git a/crates/namada/Cargo.toml b/crates/namada/Cargo.toml index c09842abb3..7d3538cab5 100644 --- a/crates/namada/Cargo.toml +++ b/crates/namada/Cargo.toml @@ -50,6 +50,7 @@ http-client = ["tendermint-rpc/http-client"] testing = [ "namada_core/testing", "namada_ethereum_bridge/testing", + "namada_parameters/testing", "namada_proof_of_stake/testing", "namada_sdk/testing", "namada_state/testing", @@ -65,6 +66,11 @@ namada-sdk = [ "namada_sdk/rand", ] +namada-eth-bridge = [ + "namada_ethereum_bridge/namada-eth-bridge", + "namada_sdk/namada-eth-bridge", +] + multicore = [ "masp_proofs/multicore", "namada_sdk/multicore", @@ -77,6 +83,7 @@ migrations = [ "namada_migrations", "linkme", ] +benches = ["namada_core/benches", "namada_state/benches"] [dependencies] namada_account = { path = "../account" } @@ -141,14 +148,12 @@ tiny-bip39.workspace = true tiny-hderive.workspace = true toml.workspace = true tracing.workspace = true -wasm-instrument = { version = "0.4.0", features = [ - "sign_ext", -], optional = true } -wasmer = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b", optional = true } +wasm-instrument = { workspace = true, optional = true } +wasmer = { workspace = true, optional = true } wasmer-cache = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b", optional = true } -wasmer-compiler-singlepass = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b", optional = true } +wasmer-compiler-singlepass = { workspace = true, optional = true } wasmer-engine-dylib = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b", optional = true } -wasmer-engine-universal = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b", optional = true } +wasmer-engine-universal = { workspace = true, optional = true } wasmer-vm = { git = "https://github.com/heliaxdev/wasmer", rev = "255054f7f58b7b4a525f2fee6b9b86422d1ca15b", optional = true } # Greater versions break in `test_tx_stack_limiter` and `test_vp_stack_limiter` wat = "=1.0.71" @@ -166,6 +171,9 @@ wasmtimer = "0.2.0" namada_core = { path = "../core", default-features = false, features = [ "testing", ] } +namada_parameters = { path = "../parameters", default-features = false, features = [ + "testing", +] } namada_ethereum_bridge = { path = "../ethereum_bridge", default-features = false, features = [ "testing", ] } diff --git a/crates/namada/src/ledger/governance/mod.rs b/crates/namada/src/ledger/governance/mod.rs index b549cfb201..f12fb4184b 100644 --- a/crates/namada/src/ledger/governance/mod.rs +++ b/crates/namada/src/ledger/governance/mod.rs @@ -5,6 +5,7 @@ pub mod utils; use std::collections::BTreeSet; use borsh::BorshDeserialize; +use namada_core::booleans::{BoolResultUnitExt, ResultBoolExt}; use namada_governance::storage::proposal::{ AddRemove, PGFAction, ProposalType, }; @@ -14,6 +15,7 @@ use namada_governance::ProposalVote; use namada_proof_of_stake::is_validator; use namada_proof_of_stake::queries::find_delegations; use namada_state::{StateRead, StorageRead}; +use namada_tx::action::{Action, GovAction, Read}; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; @@ -38,12 +40,12 @@ pub const MAX_PGF_ACTIONS: usize = 20; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] + #[error("Governance VP error: {0}")] NativeVpError(#[from] native_vp::Error), - #[error("Proposal field should not be empty: {0}")] - EmptyProposalField(String), - #[error("Vote key is not valid: {0}")] - InvalidVoteKey(String), + #[error( + "Action {0} not authorized by {1} which is not part of verifier set" + )] + Unauthorized(&'static str, Address), } /// Governance VP @@ -68,17 +70,70 @@ where tx_data: &Tx, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<()> { let (is_valid_keys_set, set_count) = self.is_valid_init_proposal_key_set(keys_changed)?; if !is_valid_keys_set { tracing::info!("Invalid changed governance key set"); - return Ok(false); + return Err(native_vp::Error::new_const( + "Invalid changed governance key set", + ) + .into()); }; let native_token = self.ctx.pre().get_native_token()?; - Ok(keys_changed.iter().all(|key| { + // Find the actions applied in the tx + let actions = self.ctx.read_actions()?; + + // There must be at least one action if any of the keys belong to gov + if actions.is_empty() + && keys_changed.iter().any(gov_storage::is_governance_key) + { + tracing::info!( + "Rejecting tx without any action written to temp storage" + ); + return Err(native_vp::Error::new_const( + "Rejecting tx without any action written to temp storage", + ) + .into()); + } + + // Check action authorization + for action in actions { + match action { + Action::Gov(gov_action) => match gov_action { + GovAction::InitProposal { author } => { + if !verifiers.contains(&author) { + tracing::info!( + "Unauthorized GovAction::InitProposal" + ); + return Err(Error::Unauthorized( + "InitProposal", + author, + )); + } + } + GovAction::VoteProposal { id: _, voter } => { + if !verifiers.contains(&voter) { + tracing::info!( + "Unauthorized GovAction::VoteProposal" + ); + return Err(Error::Unauthorized( + "VoteProposal", + voter, + )); + } + } + }, + _ => { + // Other actions are not relevant to PoS VP + continue; + } + } + } + + keys_changed.iter().try_for_each(|key| { let proposal_id = gov_storage::get_proposal_id(key); let key_type = KeyType::from_key(key, &native_token); @@ -95,8 +150,8 @@ where (KeyType::PROPOSAL_CODE, Some(proposal_id)) => { self.is_valid_proposal_code(proposal_id) } - (KeyType::GRACE_EPOCH, Some(proposal_id)) => { - self.is_valid_grace_epoch(proposal_id) + (KeyType::ACTIVATION_EPOCH, Some(proposal_id)) => { + self.is_valid_activation_epoch(proposal_id) } (KeyType::START_EPOCH, Some(proposal_id)) => { self.is_valid_start_epoch(proposal_id) @@ -116,19 +171,27 @@ where } (KeyType::PARAMETER, _) => self.is_valid_parameter(tx_data), (KeyType::BALANCE, _) => self.is_valid_balance(&native_token), - (KeyType::UNKNOWN_GOVERNANCE, _) => Ok(false), - (KeyType::UNKNOWN, _) => Ok(true), - _ => Ok(false), + (KeyType::UNKNOWN_GOVERNANCE, _) => { + Err(native_vp::Error::new_alloc(format!( + "Unkown governance key change: {key}" + )) + .into()) + } + (KeyType::UNKNOWN, _) => Ok(()), + _ => Err(native_vp::Error::new_alloc(format!( + "Unkown governance key change: {key}" + )) + .into()), }; - match &result { - Err(err) => tracing::info!( + + result.inspect_err(|err| { + tracing::info!( "Key {key_type:?} rejected with error: {err:#?}." - ), - Ok(false) => tracing::info!("Key {key_type:?} rejected"), - Ok(true) => {} - } - result.unwrap_or(false) - })) + ) + })?; + + Ok(()) + }) } } @@ -162,7 +225,7 @@ where gov_storage::get_funds_key(counter), gov_storage::get_voting_start_epoch_key(counter), gov_storage::get_voting_end_epoch_key(counter), - gov_storage::get_grace_epoch_key(counter), + gov_storage::get_activation_epoch_key(counter), ]); // Check that expected set is a subset of the actual one @@ -179,7 +242,7 @@ where proposal_id: u64, key: &Key, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<()> { let counter_key = gov_storage::get_counter_key(); let voting_start_epoch_key = gov_storage::get_voting_start_epoch_key(proposal_id); @@ -202,16 +265,23 @@ where (Some(voter_address), Some(delegator_address)) => { (voter_address, delegator_address) } - _ => return Err(Error::InvalidVoteKey(key.to_string())), + _ => { + return Err(native_vp::Error::new_alloc(format!( + "Vote key is not valid: {key}" + )) + .into()); + } }; // Invalid proposal id if pre_counter <= proposal_id { - tracing::info!( + let error = native_vp::Error::new_alloc(format!( "Invalid proposal ID. Expected {pre_counter} or lower, got \ - {proposal_id}." - ); - return Ok(false); + {proposal_id}" + )) + .into(); + tracing::info!("{error}"); + return Err(error); } let vote_key = gov_storage::get_vote_proposal_key( @@ -224,7 +294,10 @@ where .force_read::(&vote_key, ReadType::Post) .is_err() { - return Err(Error::InvalidVoteKey(key.to_string())); + return Err(native_vp::Error::new_alloc(format!( + "Vote key is not valid: {key}" + )) + .into()); } // TODO: We should refactor this by modifying the vote proposal tx @@ -232,7 +305,10 @@ where find_delegations(&self.ctx.pre(), voter_address, ¤t_epoch) { if delegations.is_empty() { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "No delegations found for {voter_address}" + )) + .into()); } else { delegations.iter().all(|(address, _)| { let vote_key = gov_storage::get_vote_proposal_key( @@ -244,10 +320,16 @@ where }) } } else { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Failed to query delegations for {voter_address}" + )) + .into()); }; if !all_delegations_are_valid { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Not all delegations of {voter_address} were deemed valid" + )) + .into()); } // Voted outside of voting window. We dont check for validator because @@ -259,52 +341,67 @@ where pre_voting_end_epoch, false, ) { - tracing::info!( + let error = native_vp::Error::new_alloc(format!( "Voted outside voting window. Current epoch: {current_epoch}, \ start: {pre_voting_start_epoch}, end: {pre_voting_end_epoch}." - ); - return Ok(false); + )) + .into(); + tracing::info!("{error}"); + return Err(error); } // first check if validator, then check if delegator - let is_validator = self - .is_validator( - pre_voting_start_epoch, - verifiers, - voter_address, - delegation_address, - ) - .unwrap_or(false); + let is_validator = + self.is_validator(verifiers, voter_address, delegation_address)?; if is_validator { - let valid_voting_period = is_valid_validator_voting_period( + return is_valid_validator_voting_period( current_epoch, pre_voting_start_epoch, pre_voting_end_epoch, - ); - return Ok(valid_voting_period); + ) + .ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Validator {voter_address} voted outside of the voting \ + period. Current epoch: {current_epoch}, pre voting start \ + epoch: {pre_voting_start_epoch}, pre voting end epoch: \ + {pre_voting_end_epoch}." + )) + .into() + }); } - let is_delegator = self - .is_delegator( - pre_voting_start_epoch, - verifiers, - voter_address, - delegation_address, - ) - .unwrap_or(false); - Ok(is_delegator) + let is_delegator = self.is_delegator( + pre_voting_start_epoch, + verifiers, + voter_address, + delegation_address, + )?; + + if !is_delegator { + return Err(native_vp::Error::new_alloc(format!( + "Address {voter_address} is neither a validator nor a \ + delegator." + )) + .into()); + } + + Ok(()) } /// Validate a content key - pub fn is_valid_content_key(&self, proposal_id: u64) -> Result { + pub fn is_valid_content_key(&self, proposal_id: u64) -> Result<()> { let content_key: Key = gov_storage::get_content_key(proposal_id); let max_content_length_parameter_key = gov_storage::get_max_proposal_content_key(); let has_pre_content: bool = self.ctx.has_key_pre(&content_key)?; if has_pre_content { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} already had content written \ + to storage." + )) + .into()); } let max_content_length: usize = @@ -314,16 +411,19 @@ where let is_valid = post_content.len() <= max_content_length; if !is_valid { - tracing::info!( + let error = native_vp::Error::new_alloc(format!( "Max content length {max_content_length}, got {}.", post_content.len() - ); + )) + .into(); + tracing::info!("{error}"); + return Err(error); } - Ok(is_valid) + Ok(()) } /// Validate the proposal type - pub fn is_valid_proposal_type(&self, proposal_id: u64) -> Result { + pub fn is_valid_proposal_type(&self, proposal_id: u64) -> Result<()> { let proposal_type_key = gov_storage::get_proposal_type_key(proposal_id); let proposal_type: ProposalType = self.force_read(&proposal_type_key, ReadType::Post)?; @@ -350,27 +450,66 @@ where // we allow only a single steward to be added if total_stewards_added > 1 { - Ok(false) + Err(native_vp::Error::new_const( + "Only one steward is allowed to be added per proposal", + ) + .into()) } else if total_stewards_added == 0 { let is_valid_total_pgf_actions = stewards.len() < MAX_PGF_ACTIONS; - return Ok(is_valid_total_pgf_actions); + + return if is_valid_total_pgf_actions { + Ok(()) + } else { + return Err(native_vp::Error::new_alloc(format!( + "Maximum number of steward actions \ + ({MAX_PGF_ACTIONS}) exceeded ({})", + stewards.len() + )) + .into()); + }; } else if let Some(address) = stewards_added.first() { let author_key = gov_storage::get_author_key(proposal_id); let author = self .force_read::
(&author_key, ReadType::Post)?; let is_valid_author = address.eq(&author); + if !is_valid_author { + return Err(native_vp::Error::new_alloc(format!( + "Author {author} does not match added steward \ + address {address}", + )) + .into()); + } + let stewards_addresses_are_unique = stewards.len() == all_pgf_action_addresses; + + if !stewards_addresses_are_unique { + return Err(native_vp::Error::new_const( + "Non-unique modified steward addresses", + ) + .into()); + } + let is_valid_total_pgf_actions = all_pgf_action_addresses < MAX_PGF_ACTIONS; - return Ok(is_valid_author - && stewards_addresses_are_unique - && is_valid_total_pgf_actions); + if !is_valid_total_pgf_actions { + return Err(native_vp::Error::new_alloc(format!( + "Maximum number of steward actions \ + ({MAX_PGF_ACTIONS}) exceeded \ + ({all_pgf_action_addresses})", + )) + .into()); + } + + return Ok(()); } else { - return Ok(false); + return Err(native_vp::Error::new_const( + "Invalid PGF proposal", + ) + .into()); } } ProposalType::PGFPayment(fundings) => { @@ -398,44 +537,71 @@ where }) .collect::>(); - let total_retro_targerts = fundings + let total_retro_targets = fundings .iter() .filter(|funding| matches!(funding, PGFAction::Retro(_))) .count(); let is_total_fundings_valid = fundings.len() < MAX_PGF_ACTIONS; + if !is_total_fundings_valid { + return Err(native_vp::Error::new_alloc(format!( + "Maximum number of funding actions \ + ({MAX_PGF_ACTIONS}) exceeded ({})", + fundings.len() + )) + .into()); + } + // check that they are unique by checking that the set of add // plus the set of remove plus the set of retro is equal to the // total fundings let are_continuous_fundings_unique = are_continuous_add_targets_unique.len() + are_continuous_remove_targets_unique.len() - + total_retro_targerts + + total_retro_targets == fundings.len(); + if !are_continuous_fundings_unique { + return Err(native_vp::Error::new_const( + "Non-unique modified fundings", + ) + .into()); + } + // can't remove and add the same target in the same proposal let are_targets_unique = are_continuous_add_targets_unique .intersection(&are_continuous_remove_targets_unique) .count() as u64 == 0; - Ok(is_total_fundings_valid - && are_continuous_fundings_unique - && are_targets_unique) + are_targets_unique.ok_or_else(|| { + native_vp::Error::new_const( + "One or more payment targets were added and removed \ + in the same proposal", + ) + .into() + }) } - _ => Ok(true), // default proposal + // Default proposal condition are checked already for all other + // proposals. + // default_with_wasm proposal needs to check only for valid code + _ => Ok(()), } } /// Validate a proposal code - pub fn is_valid_proposal_code(&self, proposal_id: u64) -> Result { + pub fn is_valid_proposal_code(&self, proposal_id: u64) -> Result<()> { let proposal_type_key = gov_storage::get_proposal_type_key(proposal_id); let proposal_type: ProposalType = self.force_read(&proposal_type_key, ReadType::Post)?; - if !proposal_type.is_default() { - return Ok(false); + if !proposal_type.is_default_with_wasm() { + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} modified a proposal code key, \ + but its type is not default.", + )) + .into()); } let code_key = gov_storage::get_proposal_code_key(proposal_id); @@ -444,7 +610,11 @@ where let has_pre_code: bool = self.ctx.has_key_pre(&code_key)?; if has_pre_code { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} already had wasm code written \ + to storage in its slot.", + )) + .into()); } let max_proposal_length: usize = @@ -452,73 +622,98 @@ where let post_code: Vec = self.ctx.read_bytes_post(&code_key)?.unwrap_or_default(); - Ok(post_code.len() <= max_proposal_length) + let wasm_code_below_max_len = post_code.len() <= max_proposal_length; + + if !wasm_code_below_max_len { + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} wrote wasm code with length \ + {} to storage, but the max allowed length is \ + {max_proposal_length}.", + post_code.len(), + )) + .into()); + } + + Ok(()) } - /// Validate a grace_epoch key - pub fn is_valid_grace_epoch(&self, proposal_id: u64) -> Result { + /// Validate an activation_epoch key + pub fn is_valid_activation_epoch(&self, proposal_id: u64) -> Result<()> { let start_epoch_key = gov_storage::get_voting_start_epoch_key(proposal_id); let end_epoch_key = gov_storage::get_voting_end_epoch_key(proposal_id); - let grace_epoch_key = gov_storage::get_grace_epoch_key(proposal_id); + let activation_epoch_key = + gov_storage::get_activation_epoch_key(proposal_id); let max_proposal_period = gov_storage::get_max_proposal_period_key(); - let min_grace_epoch_key = - gov_storage::get_min_proposal_grace_epoch_key(); - - let has_pre_grace_epoch = self.ctx.has_key_pre(&grace_epoch_key)?; - if has_pre_grace_epoch { - return Ok(false); + let min_grace_epochs_key = + gov_storage::get_min_proposal_grace_epochs_key(); + + let has_pre_activation_epoch = + self.ctx.has_key_pre(&activation_epoch_key)?; + if has_pre_activation_epoch { + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} already had a grace epoch \ + written to storage in its slot.", + )) + .into()); } let start_epoch: Epoch = self.force_read(&start_epoch_key, ReadType::Post)?; let end_epoch: Epoch = self.force_read(&end_epoch_key, ReadType::Post)?; - let grace_epoch: Epoch = - self.force_read(&grace_epoch_key, ReadType::Post)?; - let min_grace_epoch: u64 = - self.force_read(&min_grace_epoch_key, ReadType::Pre)?; + let activation_epoch: Epoch = + self.force_read(&activation_epoch_key, ReadType::Post)?; + let min_grace_epochs: u64 = + self.force_read(&min_grace_epochs_key, ReadType::Pre)?; let max_proposal_period: u64 = self.force_read(&max_proposal_period, ReadType::Pre)?; let committing_epoch_key = gov_storage::get_committing_proposals_key( proposal_id, - grace_epoch.into(), + activation_epoch.into(), ); let has_post_committing_epoch = self.ctx.has_key_post(&committing_epoch_key)?; if !has_post_committing_epoch { - tracing::info!("Committing proposal key is missing present"); + let error = native_vp::Error::new_const( + "Committing proposal key is missing present", + ) + .into(); + tracing::info!("{error}"); + return Err(error); } - let is_valid_grace_epoch = end_epoch < grace_epoch - && (grace_epoch - end_epoch).0 >= min_grace_epoch; - if !is_valid_grace_epoch { - tracing::info!( + let is_valid_activation_epoch = end_epoch < activation_epoch + && (activation_epoch - end_epoch).0 >= min_grace_epochs; + if !is_valid_activation_epoch { + let error = native_vp::Error::new_alloc(format!( "Expected min duration between the end and grace epoch \ - {min_grace_epoch}, but got grace = {}, end = {}", - grace_epoch, - end_epoch - ); + {min_grace_epochs}, but got activation = {activation_epoch}, \ + end = {end_epoch}", + )) + .into(); + tracing::info!("{error}"); + return Err(error); } - let is_valid_max_proposal_period = start_epoch < grace_epoch - && grace_epoch.0 - start_epoch.0 <= max_proposal_period; + let is_valid_max_proposal_period = start_epoch < activation_epoch + && activation_epoch.0 - start_epoch.0 <= max_proposal_period; if !is_valid_max_proposal_period { - tracing::info!( + let error = native_vp::Error::new_alloc(format!( "Expected max duration between the start and grace epoch \ - {max_proposal_period}, but got grace ={}, start = {}", - grace_epoch, - start_epoch - ); + {max_proposal_period}, but got activation = \ + {activation_epoch}, start = {start_epoch}", + )) + .into(); + tracing::info!("{error}"); + return Err(error); } - Ok(has_post_committing_epoch - && is_valid_grace_epoch - && is_valid_max_proposal_period) + Ok(()) } /// Validate a start_epoch key - pub fn is_valid_start_epoch(&self, proposal_id: u64) -> Result { + pub fn is_valid_start_epoch(&self, proposal_id: u64) -> Result<()> { let start_epoch_key = gov_storage::get_voting_start_epoch_key(proposal_id); let end_epoch_key = gov_storage::get_voting_end_epoch_key(proposal_id); @@ -528,10 +723,27 @@ where let current_epoch = self.ctx.get_block_epoch()?; let has_pre_start_epoch = self.ctx.has_key_pre(&start_epoch_key)?; - let has_pre_end_epoch = self.ctx.has_key_pre(&end_epoch_key)?; + if has_pre_start_epoch { + let error = native_vp::Error::new_alloc(format!( + "Failed to validate start epoch. Proposal with id \ + {proposal_id} already had a pre_start epoch written to \ + storage in its slot.", + )) + .into(); + tracing::info!("{error}"); + return Err(error); + } - if has_pre_start_epoch || has_pre_end_epoch { - return Ok(false); + let has_pre_end_epoch = self.ctx.has_key_pre(&end_epoch_key)?; + if has_pre_end_epoch { + let error = native_vp::Error::new_alloc(format!( + "Failed to validate start epoch. Proposal with id \ + {proposal_id} already had a pre_end epoch written to storage \ + in its slot.", + )) + .into(); + tracing::info!("{error}"); + return Err(error); } let start_epoch: Epoch = @@ -541,22 +753,63 @@ where let min_period: u64 = self.force_read(&min_period_parameter_key, ReadType::Pre)?; - if end_epoch <= start_epoch || start_epoch <= current_epoch { - return Ok(false); + if end_epoch <= start_epoch { + return Err(native_vp::Error::new_alloc(format!( + "Ending epoch {end_epoch} cannot be lower than or equal to \ + the starting epoch {start_epoch} of the proposal with id \ + {proposal_id}.", + )) + .into()); + } + + if start_epoch <= current_epoch { + return Err(native_vp::Error::new_alloc(format!( + "Starting epoch {start_epoch} cannot be lower than or equal \ + to the current epoch {current_epoch} of the proposal with id \ + {proposal_id}.", + )) + .into()); } // TODO: HACK THAT NEEDS TO BE PROPERLY FIXED WITH PARAM let latency = 30u64; if start_epoch.0 - current_epoch.0 > latency { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Starting epoch {start_epoch} of the proposal with id \ + {proposal_id} is too far in the future (more than {latency} \ + epochs away from the current epoch {current_epoch}).", + )) + .into()); + } + + let proposal_period_multiple_of_min_period = + (end_epoch - start_epoch) % min_period == 0; + if !proposal_period_multiple_of_min_period { + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} does not have a voting period \ + that is a multiple of the minimum voting period \ + {min_period}. Starting epoch is {start_epoch}, and ending \ + epoch is {end_epoch}.", + )) + .into()); } - Ok((end_epoch - start_epoch) % min_period == 0 - && (end_epoch - start_epoch).0 >= min_period) + let proposal_meets_min_period = + (end_epoch - start_epoch).0 >= min_period; + if !proposal_meets_min_period { + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} does not meet the required \ + minimum period of {min_period} epochs. Starting epoch is \ + {start_epoch}, and ending epoch is {end_epoch}.", + )) + .into()); + } + + Ok(()) } /// Validate a end_epoch key - fn is_valid_end_epoch(&self, proposal_id: u64) -> Result { + fn is_valid_end_epoch(&self, proposal_id: u64) -> Result<()> { let start_epoch_key = gov_storage::get_voting_start_epoch_key(proposal_id); let end_epoch_key = gov_storage::get_voting_end_epoch_key(proposal_id); @@ -568,10 +821,25 @@ where let current_epoch = self.ctx.get_block_epoch()?; let has_pre_start_epoch = self.ctx.has_key_pre(&start_epoch_key)?; - let has_pre_end_epoch = self.ctx.has_key_pre(&end_epoch_key)?; + if has_pre_start_epoch { + let error = native_vp::Error::new_alloc(format!( + "Failed to validate end epoch. Proposal with id {proposal_id} \ + already had a pre_start epoch written to storage in its slot.", + )) + .into(); + tracing::info!("{error}"); + return Err(error); + } - if has_pre_start_epoch || has_pre_end_epoch { - return Ok(false); + let has_pre_end_epoch = self.ctx.has_key_pre(&end_epoch_key)?; + if has_pre_end_epoch { + let error = native_vp::Error::new_alloc(format!( + "Failed to validate end epoch. Proposal with id {proposal_id} \ + already had a pre_end epoch written to storage in its slot.", + )) + .into(); + tracing::info!("{error}"); + return Err(error); } let start_epoch: Epoch = @@ -584,16 +852,40 @@ where self.force_read(&max_period_parameter_key, ReadType::Pre)?; if end_epoch <= start_epoch || start_epoch <= current_epoch { - tracing::info!( - "Proposal end epoch ({end_epoch}) must be after the start \ - epoch ({start_epoch}), and the start epoch must be after the \ - current epoch ({current_epoch})." - ); - return Ok(false); + let error = native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id}'s end epoch ({end_epoch}) \ + must be after the start epoch ({start_epoch}), and the start \ + epoch must be after the current epoch ({current_epoch})." + )) + .into(); + tracing::info!("{error}"); + return Err(error); } - Ok((end_epoch - start_epoch) % min_period == 0 - && (end_epoch - start_epoch).0 >= min_period - && (end_epoch - start_epoch).0 <= max_period) + + let proposal_period_multiple_of_min_period = + (end_epoch - start_epoch) % min_period == 0; + if !proposal_period_multiple_of_min_period { + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} does not have a voting period \ + that is a multiple of the minimum voting period \ + {min_period}. Starting epoch is {start_epoch}, and ending \ + epoch is {end_epoch}.", + )) + .into()); + } + + let valid_voting_period = (end_epoch - start_epoch).0 >= min_period + && (end_epoch - start_epoch).0 <= max_period; + + valid_voting_period.ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} must have a voting period \ + with a minimum of {min_period} epochs, and a maximum of \ + {max_period} epochs. The starting epoch is {start_epoch}, \ + and the ending epoch is {end_epoch}.", + )) + .into() + }) } /// Validate a funds key @@ -601,7 +893,7 @@ where &self, proposal_id: u64, native_token_address: &Address, - ) -> Result { + ) -> Result<()> { let funds_key = gov_storage::get_funds_key(proposal_id); let balance_key = token::storage_key::balance_key( native_token_address, @@ -618,19 +910,56 @@ where let post_funds: token::Amount = self.force_read(&funds_key, ReadType::Post)?; - if let Some(pre_balance) = pre_balance { - let is_post_funds_greater_than_minimum = - post_funds >= min_funds_parameter; - let is_valid_funds = post_balance >= pre_balance - && post_balance - pre_balance == post_funds; - Ok(is_post_funds_greater_than_minimum && is_valid_funds) - } else { - Ok(post_funds >= min_funds_parameter && post_balance == post_funds) - } + pre_balance.map_or_else( + // null pre balance + || { + let is_post_funds_greater_than_minimum = + post_funds >= min_funds_parameter; + is_post_funds_greater_than_minimum.ok_or_else(|| { + Error::NativeVpError(native_vp::Error::new_alloc(format!( + "Funds must be greater than the minimum funds of {}", + min_funds_parameter.native_denominated() + ))) + })?; + + let post_balance_is_same = post_balance == post_funds; + post_balance_is_same.ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Funds and the balance of the governance account have \ + diverged: funds {} != balance {}", + post_funds.native_denominated(), + post_balance.native_denominated() + )) + .into() + }) + }, + // there was some non-zero balance in the governance account + |pre_balance| { + let is_post_funds_greater_than_minimum = + post_funds >= min_funds_parameter; + is_post_funds_greater_than_minimum.ok_or_else(|| { + Error::NativeVpError(native_vp::Error::new_alloc(format!( + "Funds {} must be greater than the minimum funds of {}", + post_funds.native_denominated(), + min_funds_parameter.native_denominated() + ))) + })?; + + let is_valid_funds = post_balance >= pre_balance + && post_balance - pre_balance == post_funds; + is_valid_funds.ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Invalid funds {} have been written to storage", + post_funds.native_denominated() + )) + .into() + }) + }, + ) } /// Validate a balance key - fn is_valid_balance(&self, native_token_address: &Address) -> Result { + fn is_valid_balance(&self, native_token_address: &Address) -> Result<()> { let balance_key = token::storage_key::balance_key( native_token_address, self.ctx.address, @@ -645,12 +974,20 @@ where let post_balance: token::Amount = self.force_read(&balance_key, ReadType::Post)?; - if let Some(pre_balance) = pre_balance { - Ok(post_balance > pre_balance - && post_balance - pre_balance >= min_funds_parameter) + let balance_is_valid = if let Some(pre_balance) = pre_balance { + post_balance > pre_balance + && post_balance - pre_balance >= min_funds_parameter } else { - Ok(post_balance >= min_funds_parameter) - } + post_balance >= min_funds_parameter + }; + + balance_is_valid.ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Invalid balance {} has been written to storage", + post_balance.native_denominated() + )) + .into() + }) } /// Validate a author key @@ -658,33 +995,59 @@ where &self, proposal_id: u64, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<()> { let author_key = gov_storage::get_author_key(proposal_id); let has_pre_author = self.ctx.has_key_pre(&author_key)?; if has_pre_author { - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Proposal with id {proposal_id} already had an author written \ + to storage" + )) + .into()); } let author = self.force_read(&author_key, ReadType::Post)?; - let author_exists = - namada_account::exists(&self.ctx.pre(), &author).unwrap_or(false); - - Ok(author_exists && verifiers.contains(&author)) + namada_account::exists(&self.ctx.pre(), &author) + .map_err(Error::NativeVpError) + .true_or_else(|| { + native_vp::Error::new_alloc(format!( + "No author account {author} could be found for the \ + proposal with id {proposal_id}" + )) + .into() + })?; + + verifiers.contains(&author).ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "The VP of the proposal with id {proposal_id}'s author \ + {author} should have been triggered" + )) + .into() + }) } /// Validate a counter key - pub fn is_valid_counter(&self, set_count: u64) -> Result { + pub fn is_valid_counter(&self, set_count: u64) -> Result<()> { let counter_key = gov_storage::get_counter_key(); let pre_counter: u64 = self.force_read(&counter_key, ReadType::Pre)?; let post_counter: u64 = self.force_read(&counter_key, ReadType::Post)?; - Ok(pre_counter + set_count == post_counter) + let expected_counter = pre_counter + set_count; + let valid_counter = expected_counter == post_counter; + + valid_counter.ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Invalid proposal counter. Expected {expected_counter}, but \ + got {post_counter} instead." + )) + .into() + }) } /// Validate a commit key - pub fn is_valid_proposal_commit(&self) -> Result { + pub fn is_valid_proposal_commit(&self) -> Result<()> { let counter_key = gov_storage::get_counter_key(); let pre_counter: u64 = self.force_read(&counter_key, ReadType::Pre)?; let post_counter: u64 = @@ -693,22 +1056,45 @@ where // NOTE: can't do pre_counter + set_count == post_counter here // because someone may update an empty proposal that just // register a committing key causing a bug - Ok(pre_counter < post_counter) + let pre_counter_is_lower = pre_counter < post_counter; + + pre_counter_is_lower.ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "The value of the previous counter {pre_counter} must be \ + lower than the value of the new counter {post_counter}." + )) + .into() + }) } /// Validate a governance parameter - pub fn is_valid_parameter(&self, tx: &Tx) -> Result { - match tx.data() { - Some(data) => is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError), - None => Ok(false), - } + pub fn is_valid_parameter(&self, tx: &Tx) -> Result<()> { + tx.data().map_or_else( + || { + Err(native_vp::Error::new_const( + "Governance parameter changes require tx data to be \ + present", + ) + .into()) + }, + |data| { + is_proposal_accepted(&self.ctx.pre(), data.as_ref()) + .map_err(Error::NativeVpError)? + .ok_or_else(|| { + native_vp::Error::new_const( + "Governance parameter changes can only be \ + performed by a governance proposal that has been \ + accepted", + ) + .into() + }) + }, + ) } /// Check if a vote is from a validator pub fn is_validator( &self, - _epoch: Epoch, verifiers: &BTreeSet
, address: &Address, delegation_address: &Address, @@ -739,7 +1125,10 @@ where if let Some(data) = res { Ok(data) } else { - Err(Error::EmptyProposalField(key.to_string())) + Err(native_vp::Error::new_alloc(format!( + "Proposal field should not be empty: {key}" + )) + .into()) } } @@ -795,7 +1184,7 @@ enum KeyType { #[allow(non_camel_case_types)] PROPOSAL_COMMIT, #[allow(non_camel_case_types)] - GRACE_EPOCH, + ACTIVATION_EPOCH, #[allow(non_camel_case_types)] START_EPOCH, #[allow(non_camel_case_types)] @@ -824,8 +1213,8 @@ impl KeyType { Self::TYPE } else if gov_storage::is_proposal_code_key(key) { Self::PROPOSAL_CODE - } else if gov_storage::is_grace_epoch_key(key) { - KeyType::GRACE_EPOCH + } else if gov_storage::is_activation_epoch_key(key) { + KeyType::ACTIVATION_EPOCH } else if gov_storage::is_start_epoch_key(key) { KeyType::START_EPOCH } else if gov_storage::is_commit_proposal_key(key) { @@ -851,3 +1240,1797 @@ impl KeyType { } } } + +#[cfg(test)] +mod test { + use std::cell::RefCell; + use std::collections::BTreeSet; + + use borsh_ext::BorshSerializeExt; + use namada_gas::{TxGasMeter, VpGasMeter}; + use namada_governance::storage::keys::{ + get_activation_epoch_key, get_author_key, get_committing_proposals_key, + get_content_key, get_counter_key, get_funds_key, get_proposal_type_key, + get_vote_proposal_key, get_voting_end_epoch_key, + get_voting_start_epoch_key, + }; + use namada_governance::{ProposalType, ProposalVote, ADDRESS}; + use namada_proof_of_stake::bond_tokens; + use namada_sdk::address::testing::{ + established_address_1, established_address_3, nam, + }; + use namada_sdk::key::testing::keypair_1; + use namada_sdk::key::RefTo; + use namada_sdk::time::DateTimeUtc; + use namada_sdk::token; + use namada_state::mockdb::MockDB; + use namada_state::testing::TestState; + use namada_state::{ + BlockHash, BlockHeight, Epoch, FullAccessState, Key, Sha256Hasher, + State, StorageRead, TxIndex, + }; + use namada_token::storage_key::balance_key; + use namada_tx::action::{Action, GovAction, Write}; + use namada_tx::data::TxType; + use namada_tx::{Authorization, Code, Data, Section, Tx}; + + use crate::core::address::Address; + use crate::ledger::governance::GovernanceVp; + use crate::ledger::native_vp::ibc::{ + get_dummy_genesis_validator, get_dummy_header, + }; + use crate::ledger::native_vp::{Ctx, NativeVp}; + use crate::ledger::pos; + use crate::vm::wasm; + + fn init_storage() -> TestState { + let mut state = TestState::default(); + + pos::test_utils::test_init_genesis( + &mut state, + namada_proof_of_stake::OwnedPosParams::default(), + vec![get_dummy_genesis_validator()].into_iter(), + Epoch(1), + ) + .unwrap(); + + state + .in_mem_mut() + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + state + .in_mem_mut() + .begin_block(BlockHash::default(), BlockHeight(1)) + .unwrap(); + + state + } + + #[test] + fn test_noop() { + let state = init_storage(); + let keys_changed = BTreeSet::new(); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address]); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + } + + fn initialize_account_balance( + state: &mut S, + address: &Address, + amount: token::Amount, + ) where + S: State, + { + let balance_key = balance_key(&nam(), address); + state + .write_log_mut() + .write(&balance_key, amount.serialize_to_vec()) + .expect("write failed"); + state.write_log_mut().commit_tx(); + } + + fn update_epoch_to( + state: &mut FullAccessState, + total_epochs: u64, + height: BlockHeight, + ) { + state.in_mem_mut().update_epoch_blocks_delay = Some(1); + for _ in 0..total_epochs { + state.in_mem_mut().update_epoch_blocks_delay = Some(1); + state + .update_epoch( + height, + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + .next_second() + .next_second() + .next_second() + .next_second() + .next_second(), + ) + .unwrap(); + } + } + + fn get_proposal_keys( + proposal_id: u64, + activation_epoch: u64, + ) -> BTreeSet { + let counter_key = get_counter_key(); + let voting_end_epoch_key = get_voting_end_epoch_key(proposal_id); + let voting_start_epoch_key = get_voting_start_epoch_key(proposal_id); + let activation_epoch_key = get_activation_epoch_key(proposal_id); + let content_key = get_content_key(proposal_id); + let author_key = get_author_key(proposal_id); + let proposal_type_key = get_proposal_type_key(proposal_id); + let funds_key = get_funds_key(proposal_id); + let commiting_key = + get_committing_proposals_key(proposal_id, activation_epoch); + + BTreeSet::from([ + counter_key.clone(), + funds_key.clone(), + content_key.clone(), + author_key.clone(), + proposal_type_key.clone(), + voting_start_epoch_key.clone(), + voting_end_epoch_key.clone(), + activation_epoch_key.clone(), + commiting_key.clone(), + ]) + } + + fn transfer( + state: &mut S, + source: &Address, + target: &Address, + amount: u64, + ) where + S: State, + { + let source_balance_key = balance_key(&nam(), source); + let target_balance_key = balance_key(&nam(), target); + let amount = token::Amount::native_whole(amount); + + let mut current_source: token::Amount = + state.read(&source_balance_key).unwrap().unwrap(); + let mut current_target: token::Amount = + state.read(&target_balance_key).unwrap().unwrap(); + + current_source.spend(&amount).unwrap(); + current_target.receive(&amount).unwrap(); + + state + .write_log_mut() + .write(&source_balance_key, current_source.serialize_to_vec()) + .expect("write failed"); + + state + .write_log_mut() + .write(&target_balance_key, current_target.serialize_to_vec()) + .expect("write failed"); + } + + #[allow(clippy::too_many_arguments)] + fn init_proposal( + state: &mut S, + proposal_id: u64, + funds: u64, + start_epoch: u64, + end_epoch: u64, + grace_epoch: u64, + signer_address: &Address, + no_commiting_key: bool, + ) where + S: State + namada_tx::action::Write, + { + let counter_key = get_counter_key(); + let voting_end_epoch_key = get_voting_end_epoch_key(proposal_id); + let voting_start_epoch_key = get_voting_start_epoch_key(proposal_id); + let activation_epoch_key = get_activation_epoch_key(proposal_id); + let content_key = get_content_key(proposal_id); + let author_key = get_author_key(proposal_id); + let proposal_type_key = get_proposal_type_key(proposal_id); + let funds_key = get_funds_key(proposal_id); + let commiting_key = + get_committing_proposals_key(proposal_id, grace_epoch); + // let governance_balance_key = balance_key(&nam(), &ADDRESS); + // let author_balance_key = balance_key(&nam(), signer_address); + + transfer(state, signer_address, &ADDRESS, funds); + + state + .push_action(Action::Gov(GovAction::InitProposal { + author: signer_address.clone(), + })) + .unwrap(); + + state + .write_log_mut() + .write(&counter_key, (proposal_id + 1).serialize_to_vec()) + .unwrap(); + state + .write_log_mut() + .write(&voting_end_epoch_key, Epoch(end_epoch).serialize_to_vec()) + .unwrap(); + state + .write_log_mut() + .write( + &voting_start_epoch_key, + Epoch(start_epoch).serialize_to_vec(), + ) + .unwrap(); + state + .write_log_mut() + .write(&activation_epoch_key, Epoch(grace_epoch).serialize_to_vec()) + .unwrap(); + state + .write_log_mut() + .write(&content_key, vec![1, 2, 3, 4]) + .unwrap(); + state + .write_log_mut() + .write(&author_key, signer_address.serialize_to_vec()) + .unwrap(); + state + .write_log_mut() + .write(&proposal_type_key, ProposalType::Default.serialize_to_vec()) + .unwrap(); + state + .write_log_mut() + .write( + &funds_key, + token::Amount::native_whole(funds).serialize_to_vec(), + ) + .unwrap(); + if !no_commiting_key { + state + .write_log_mut() + .write(&commiting_key, ().serialize_to_vec()) + .unwrap(); + } + } + + #[test] + fn test_governance_proposal_accepted() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let governance_balance_key = balance_key(&nam(), &ADDRESS); + let amount: token::Amount = + state.read(&governance_balance_key).unwrap().unwrap(); + assert_eq!(amount, token::Amount::native_whole(500)); + + let author_balance_key = balance_key(&nam(), &signer_address); + let amount: token::Amount = + state.read(&author_balance_key).unwrap().unwrap(); + assert_eq!(amount, token::Amount::native_whole(10)); + + let governance_counter_key = get_counter_key(); + let counter: u64 = + state.read(&governance_counter_key).unwrap().unwrap(); + assert_eq!(counter, 1); + } + + #[test] + fn test_governance_proposal_not_enough_funds_failed() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(500), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 499, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + let result = governance_vp.validate_tx(&tx, &keys_changed, &verifiers); + // this should fail + assert_matches!(&result, Err(_)); + + if result.is_err() { + state.write_log_mut().drop_tx(); + } else { + state.write_log_mut().commit_tx(); + } + state.commit_block().unwrap(); + + let governance_balance_key = balance_key(&nam(), &ADDRESS); + let amount: token::Amount = + state.read(&governance_balance_key).unwrap().unwrap(); + assert_eq!(amount, token::Amount::native_whole(0)); + + let author_balance_key = balance_key(&nam(), &signer_address); + let amount: token::Amount = + state.read(&author_balance_key).unwrap().unwrap(); + assert_eq!(amount, token::Amount::native_whole(500)); + + let governance_counter_key = get_counter_key(); + let counter: u64 = + state.read(&governance_counter_key).unwrap().unwrap(); + assert_eq!(counter, 0); + } + + #[test] + fn test_governance_proposal_more_funds_accepted() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 509, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + let result = governance_vp.validate_tx(&tx, &keys_changed, &verifiers); + assert_matches!(&result, Ok(_)); + + if result.is_err() { + state.write_log_mut().drop_tx(); + } else { + state.write_log_mut().commit_tx(); + } + state.commit_block().unwrap(); + + let governance_balance_key = balance_key(&nam(), &ADDRESS); + let amount: token::Amount = + state.read(&governance_balance_key).unwrap().unwrap(); + assert_eq!(amount, token::Amount::native_whole(509)); + + let author_balance_key = balance_key(&nam(), &signer_address); + let amount: token::Amount = + state.read(&author_balance_key).unwrap().unwrap(); + assert_eq!(amount, token::Amount::native_whole(1)); + + let governance_counter_key = get_counter_key(); + let counter: u64 = + state.read(&governance_counter_key).unwrap().unwrap(); + assert_eq!(counter, 1); + } + + #[test] + fn test_governance_too_small_voting_period_failed() { + let mut state = init_storage(); + + let proposal_id = 0; + let grace_epoch = 19; + + let keys_changed = get_proposal_keys(proposal_id, grace_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 509, + 3, + 8, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_too_small_grace_period_failed() { + let mut state = init_storage(); + + let proposal_id = 0; + let grace_epoch = 12; + + let keys_changed = get_proposal_keys(proposal_id, grace_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 509, + 3, + 9, + 12, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_too_big_voting_window_failed() { + let mut state = init_storage(); + + let proposal_id = 0; + let grace_epoch = 40; + + let keys_changed = get_proposal_keys(proposal_id, grace_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 509, + 3, + 9, + 40, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_no_committing_key_failed() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let counter_key = get_counter_key(); + let voting_end_epoch_key = get_voting_end_epoch_key(proposal_id); + let voting_start_epoch_key = get_voting_start_epoch_key(proposal_id); + let activation_epoch_key = get_activation_epoch_key(proposal_id); + let content_key = get_content_key(proposal_id); + let author_key = get_author_key(proposal_id); + let proposal_type_key = get_proposal_type_key(proposal_id); + let funds_key = get_funds_key(proposal_id); + + let keys_changed = BTreeSet::from([ + counter_key.clone(), + funds_key.clone(), + content_key.clone(), + author_key.clone(), + proposal_type_key.clone(), + voting_start_epoch_key.clone(), + voting_end_epoch_key.clone(), + activation_epoch_key.clone(), + ]); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 509, + 3, + 9, + activation_epoch, + &signer_address, + true, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_invalid_start_epoch_failed() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let counter_key = get_counter_key(); + let voting_end_epoch_key = get_voting_end_epoch_key(proposal_id); + let voting_start_epoch_key = get_voting_start_epoch_key(proposal_id); + let activation_epoch_key = get_activation_epoch_key(proposal_id); + let content_key = get_content_key(proposal_id); + let author_key = get_author_key(proposal_id); + let proposal_type_key = get_proposal_type_key(proposal_id); + let funds_key = get_funds_key(proposal_id); + + let keys_changed = BTreeSet::from([ + counter_key.clone(), + funds_key.clone(), + content_key.clone(), + author_key.clone(), + proposal_type_key.clone(), + voting_start_epoch_key.clone(), + voting_end_epoch_key.clone(), + activation_epoch_key.clone(), + ]); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 0, + 9, + activation_epoch, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_vote_validator_success() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let mut keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let mut verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let height = state.in_mem().get_block_height().0 + (7 * 2); + + update_epoch_to(&mut state, 7, height); + + let validator_address = established_address_1(); + + let vote_key = get_vote_proposal_key( + 0, + validator_address.clone(), + validator_address.clone(), + ); + state + .push_action(Action::Gov(GovAction::VoteProposal { + id: 0, + voter: validator_address.clone(), + })) + .unwrap(); + state + .write_log_mut() + .write(&vote_key, ProposalVote::Yay.serialize_to_vec()) + .unwrap(); + + keys_changed.clear(); + keys_changed.insert(vote_key); + + verifiers.clear(); + verifiers.insert(validator_address); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + } + + #[test] + fn test_governance_vote_validator_out_of_voting_window_fail() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let mut keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let mut verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let height = state.in_mem().get_block_height().0 + (7 * 2); + + update_epoch_to(&mut state, 10, height); + + let validator_address = established_address_1(); + + let vote_key = get_vote_proposal_key( + 0, + validator_address.clone(), + validator_address.clone(), + ); + state + .push_action(Action::Gov(GovAction::VoteProposal { + id: 0, + voter: validator_address.clone(), + })) + .unwrap(); + state + .write_log_mut() + .write(&vote_key, ProposalVote::Yay.serialize_to_vec()) + .unwrap(); + + keys_changed.clear(); + keys_changed.insert(vote_key); + + verifiers.clear(); + verifiers.insert(validator_address); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_vote_validator_fail() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let mut keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let mut verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + let governance_vp = GovernanceVp { ctx }; + // this should return true because state has been stored + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let height = state.in_mem().get_block_height().0 + (7 * 2); + + update_epoch_to(&mut state, 8, height); + + let validator_address = established_address_1(); + + let vote_key = get_vote_proposal_key( + 0, + validator_address.clone(), + validator_address.clone(), + ); + state + .push_action(Action::Gov(GovAction::VoteProposal { + id: 0, + voter: validator_address.clone(), + })) + .unwrap(); + state + .write_log_mut() + .write(&vote_key, ProposalVote::Yay.serialize_to_vec()) + .unwrap(); + + keys_changed.clear(); + keys_changed.insert(vote_key); + + verifiers.clear(); + verifiers.insert(validator_address); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_vote_delegator_success() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let mut keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let mut verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let height = state.in_mem().get_block_height().0 + (9 * 2); + + let validator_address = established_address_1(); + let delegator_address = established_address_3(); + + initialize_account_balance( + &mut state, + &delegator_address, + token::Amount::native_whole(1000000), + ); + + bond_tokens( + &mut state, + Some(&delegator_address), + &validator_address, + token::Amount::from_u64(10000), + Epoch(1), + None, + ) + .unwrap(); + + update_epoch_to(&mut state, 9, height); + + let vote_key = get_vote_proposal_key( + 0, + delegator_address.clone(), + validator_address.clone(), + ); + state + .push_action(Action::Gov(GovAction::VoteProposal { + id: 0, + voter: delegator_address.clone(), + })) + .unwrap(); + state + .write_log_mut() + .write(&vote_key, ProposalVote::Yay.serialize_to_vec()) + .unwrap(); + + keys_changed.clear(); + keys_changed.insert(vote_key); + + verifiers.clear(); + verifiers.insert(delegator_address); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + } + + #[test] + fn test_governance_vote_delegator_fail() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let mut keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let mut verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let height = state.in_mem().get_block_height().0 + (10 * 2); + + let validator_address = established_address_1(); + let delegator_address = established_address_3(); + + initialize_account_balance( + &mut state, + &delegator_address, + token::Amount::native_whole(1000000), + ); + + bond_tokens( + &mut state, + Some(&delegator_address), + &validator_address, + token::Amount::from_u64(10000), + Epoch(1), + None, + ) + .unwrap(); + + update_epoch_to(&mut state, 10, height); + + let vote_key = get_vote_proposal_key( + 0, + delegator_address.clone(), + validator_address.clone(), + ); + state + .push_action(Action::Gov(GovAction::VoteProposal { + id: 0, + voter: delegator_address.clone(), + })) + .unwrap(); + state + .write_log_mut() + .write(&vote_key, ProposalVote::Yay.serialize_to_vec()) + .unwrap(); + + keys_changed.clear(); + keys_changed.insert(vote_key); + + verifiers.clear(); + verifiers.insert(delegator_address); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } + + #[test] + fn test_governance_vote_invalid_verifier_fail() { + let mut state = init_storage(); + + let proposal_id = 0; + let activation_epoch = 19; + + let mut keys_changed = get_proposal_keys(proposal_id, activation_epoch); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let tx_index = TxIndex::default(); + + let signer = keypair_1(); + let signer_address = Address::from(&signer.clone().ref_to()); + let mut verifiers = BTreeSet::from([signer_address.clone()]); + + initialize_account_balance( + &mut state, + &signer_address.clone(), + token::Amount::native_whole(510), + ); + initialize_account_balance( + &mut state, + &ADDRESS, + token::Amount::native_whole(0), + ); + state.commit_block().unwrap(); + + let tx_code = vec![]; + let tx_data = vec![]; + + let mut tx = Tx::from_type(TxType::Raw); + tx.header.chain_id = state.in_mem().chain_id.clone(); + tx.set_code(Code::new(tx_code, None)); + tx.set_data(Data::new(tx_data)); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.header_hash()], + [(0, keypair_1())].into_iter().collect(), + None, + ))); + + init_proposal( + &mut state, + proposal_id, + 500, + 3, + 9, + 19, + &signer_address, + false, + ); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Ok(_) + ); + + state.write_log_mut().commit_tx(); + state.commit_block().unwrap(); + + let height = state.in_mem().get_block_height().0 + (10 * 2); + + let validator_address = established_address_1(); + let delegator_address = established_address_3(); + + initialize_account_balance( + &mut state, + &delegator_address, + token::Amount::native_whole(1000000), + ); + + bond_tokens( + &mut state, + Some(&delegator_address), + &validator_address, + token::Amount::from_u64(10000), + Epoch(1), + None, + ) + .unwrap(); + + update_epoch_to(&mut state, 10, height); + + let vote_key = get_vote_proposal_key( + 0, + delegator_address.clone(), + validator_address.clone(), + ); + state + .push_action(Action::Gov(GovAction::VoteProposal { + id: 0, + voter: delegator_address.clone(), + })) + .unwrap(); + state + .write_log_mut() + .write(&vote_key, ProposalVote::Yay.serialize_to_vec()) + .unwrap(); + + keys_changed.clear(); + keys_changed.insert(vote_key); + + verifiers.clear(); + verifiers.insert(validator_address); + + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let governance_vp = GovernanceVp { ctx }; + + assert_matches!( + governance_vp.validate_tx(&tx, &keys_changed, &verifiers), + Err(_) + ); + } +} diff --git a/crates/namada/src/ledger/governance/utils.rs b/crates/namada/src/ledger/governance/utils.rs index 3b79168723..ec31e87d54 100644 --- a/crates/namada/src/ledger/governance/utils.rs +++ b/crates/namada/src/ledger/governance/utils.rs @@ -1,7 +1,6 @@ //! Governance utility functions -use std::collections::HashMap; - +use namada_core::collections::HashMap; use namada_governance::utils::TallyResult; use namada_sdk::events::{Event, EventLevel}; use thiserror::Error; diff --git a/crates/namada/src/ledger/ibc/mod.rs b/crates/namada/src/ledger/ibc/mod.rs index d8f89952fb..a3746e62e5 100644 --- a/crates/namada/src/ledger/ibc/mod.rs +++ b/crates/namada/src/ledger/ibc/mod.rs @@ -1,10 +1,16 @@ //! IBC integration -pub use namada_ibc::storage; +use namada_core::event::EmitEvents; +use namada_core::token::Amount; use namada_ibc::storage::{ channel_counter_key, client_counter_key, connection_counter_key, + deposit_prefix, withdraw_prefix, +}; +pub use namada_ibc::{parameters, storage}; +use namada_state::{ + DBIter, Key, State, StorageError, StorageHasher, StorageRead, StorageWrite, + WlState, DB, }; -use namada_state::State; /// Initialize storage in the genesis block. pub fn init_genesis_storage(storage: &mut S) @@ -34,3 +40,42 @@ where .write(&key, init_value) .expect("Unable to write the initial channel counter"); } + +/// Update IBC-related data when finalizing block +pub fn finalize_block( + state: &mut WlState, + _events: &mut impl EmitEvents, + is_new_epoch: bool, +) -> Result<(), StorageError> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if is_new_epoch { + clear_throughputs(state)?; + } + Ok(()) +} + +/// Clear the per-epoch throughputs (deposit and withdraw) +fn clear_throughputs( + state: &mut WlState, +) -> Result<(), StorageError> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + for prefix in [deposit_prefix(), withdraw_prefix()] { + let keys: Vec = state + .iter_prefix(&prefix)? + .map(|(key, _, _)| { + Key::parse(key).expect("The key should be parsable") + }) + .collect(); + for key in keys { + state.write(&key, Amount::from(0))?; + } + } + + Ok(()) +} diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index ed59846e93..041a1af407 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -43,14 +43,14 @@ mod dry_run_tx { { use borsh_ext::BorshSerializeExt; use namada_gas::{Gas, GasMetering, TxGasMeter}; - use namada_tx::data::{DecryptedTx, TxType}; + use namada_tx::data::TxType; use namada_tx::Tx; use crate::ledger::protocol::ShellParams; use crate::storage::TxIndex; let mut temp_state = ctx.state.with_temp_write_log(); - let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; + let tx = Tx::try_from(&request.data[..]).into_storage_result()?; tx.validate_tx().into_storage_result()?; let mut cumulated_gas = Gas::default(); @@ -77,12 +77,10 @@ mod dry_run_tx { temp_state.write_log_mut().commit_tx(); cumulated_gas = tx_gas_meter.borrow_mut().get_tx_consumed_gas(); - - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); let available_gas = tx_gas_meter.borrow().get_available_gas(); TxGasMeter::new_from_sub_limit(available_gas) } - TxType::Protocol(_) | TxType::Decrypted(_) => { + TxType::Protocol(_) => { // If dry run only the inner tx, use the max block gas as // the gas limit TxGasMeter::new(GasLimit::from( @@ -91,7 +89,6 @@ mod dry_run_tx { } TxType::Raw => { // Cast tx to a decrypted for execution - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); // If dry run only the inner tx, use the max block gas as // the gas limit @@ -145,7 +142,6 @@ mod test { use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_test_utils::TestWasms; - use namada_tx::data::decrypted::DecryptedTx; use namada_tx::data::TxType; use namada_tx::{Code, Data, Tx}; use tempfile::TempDir; @@ -286,8 +282,7 @@ mod test { assert_eq!(current_epoch, read_epoch); // Request dry run tx - let mut outer_tx = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); + let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.header.chain_id = client.state.in_mem().chain_id.clone(); outer_tx.set_code(Code::from_hash(tx_hash, None)); outer_tx.set_data(Data::new(vec![])); diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 331252256e..1e7e7cf388 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -17,31 +17,32 @@ use std::fmt::Debug; use std::marker::PhantomData; use borsh::BorshDeserialize; -use eyre::eyre; +use namada_core::booleans::BoolResultUnitExt; use namada_core::eth_bridge_pool::erc20_token_address; use namada_core::hints; use namada_ethereum_bridge::storage::bridge_pool::{ get_pending_key, is_bridge_pool_key, BRIDGE_POOL_ADDRESS, }; +use namada_ethereum_bridge::storage::eth_bridge_queries::is_bridge_active_at; use namada_ethereum_bridge::storage::parameters::read_native_erc20_address; use namada_ethereum_bridge::storage::whitelist; use namada_ethereum_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_state::StateRead; +use namada_state::{ResultExt, StateRead}; use namada_tx::Tx; use crate::address::{Address, InternalAddress}; use crate::eth_bridge_pool::{PendingTransfer, TransferToEthereumKind}; use crate::ethereum_events::EthAddress; -use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; +use crate::ledger::native_vp::{self, Ctx, NativeVp, StorageReader}; use crate::storage::Key; use crate::token::storage_key::balance_key; use crate::token::Amount; use crate::vm::WasmCacheAccess; #[derive(thiserror::Error, Debug)] -#[error(transparent)] +#[error("Bridge Pool VP error: {0}")] /// Generic error that may be returned by the validity predicate -pub struct Error(#[from] eyre::Error); +pub struct Error(#[from] native_vp::Error); /// A positive or negative amount #[derive(Copy, Clone)] @@ -195,10 +196,13 @@ where } // some other error occurred while calculating // balance deltas - (None, _) | (_, None) => Err(Error(eyre!( - "Could not calculate the balance delta for {}", - payer_account - ))), + (None, _) | (_, None) => { + Err(native_vp::Error::AllocMessage(format!( + "Could not calculate the balance delta for {}", + payer_account + )) + .into()) + } } } @@ -269,7 +273,10 @@ where suffix: whitelist::KeyType::Whitelisted, } .into(); - (&self.ctx).read_pre_value(&key)?.unwrap_or(false) + (&self.ctx) + .read_pre_value(&key) + .map_err(Error)? + .unwrap_or(false) }; if !wnam_whitelisted { tracing::debug!( @@ -294,7 +301,10 @@ where suffix: whitelist::KeyType::Cap, } .into(); - (&self.ctx).read_pre_value(&key)?.unwrap_or_default() + (&self.ctx) + .read_pre_value(&key) + .map_err(Error)? + .unwrap_or_default() }; if escrowed_balance > wnam_cap { tracing::debug!( @@ -511,8 +521,8 @@ fn sum_gas_and_token_amounts( .amount .checked_add(transfer.transfer.amount) .ok_or_else(|| { - Error(eyre!( - "Addition overflowed adding gas fee + transfer amount." + Error(native_vp::Error::SimpleMessage( + "Addition overflowed adding gas fee + transfer amount.", )) }) } @@ -529,75 +539,95 @@ where tx: &Tx, keys_changed: &BTreeSet, _verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<(), Error> { tracing::debug!( keys_changed_len = keys_changed.len(), verifiers_len = _verifiers.len(), "Ethereum Bridge Pool VP triggered", ); + if !is_bridge_active_at( + &self.ctx.pre(), + self.ctx.state.in_mem().get_current_epoch().0, + ) + .map_err(Error)? + { + tracing::debug!( + "Rejecting transaction, since the Ethereum bridge is disabled." + ); + return Err(native_vp::Error::SimpleMessage( + "Rejecting transaction, since the Ethereum bridge is disabled.", + ) + .into()); + } let Some(tx_data) = tx.data() else { - return Err(eyre!("No transaction data found").into()); + return Err(native_vp::Error::SimpleMessage( + "No transaction data found", + ) + .into()); }; let transfer: PendingTransfer = BorshDeserialize::try_from_slice(&tx_data[..]) - .map_err(|e| Error(e.into()))?; + .into_storage_result() + .map_err(Error)?; let pending_key = get_pending_key(&transfer); // check that transfer is not already in the pool match (&self.ctx).read_pre_value::(&pending_key) { Ok(Some(_)) => { - tracing::debug!( + let error = native_vp::Error::new_const( "Rejecting transaction as the transfer is already in the \ - Ethereum bridge pool." - ); - return Ok(false); - } - Err(e) => { - return Err(eyre!( - "Could not read the storage key associated with the \ - transfer: {:?}", - e + Ethereum bridge pool.", ) - .into()); + .into(); + tracing::debug!("{error}"); + return Err(error); } + // NOTE: make sure we don't erase storage errors returned by the + // ctx, as these may contain gas errors! + Err(e) => return Err(e.into()), _ => {} } for key in keys_changed.iter().filter(|k| is_bridge_pool_key(k)) { if *key != pending_key { - tracing::debug!( + let error = native_vp::Error::new_alloc(format!( "Rejecting transaction as it is attempting to change an \ - incorrect key in the Ethereum bridge pool: {}.\n \ - Expected key: {}", - key, - pending_key - ); - return Ok(false); + incorrect key in the Ethereum bridge pool: {key}.\n \ + Expected key: {pending_key}", + )) + .into(); + tracing::debug!("{error}"); + return Err(error); } } let pending: PendingTransfer = - (&self.ctx).read_post_value(&pending_key)?.ok_or(eyre!( - "Rejecting transaction as the transfer wasn't added to the \ - pool of pending transfers" - ))?; + (&self.ctx).read_post_value(&pending_key)?.ok_or_else(|| { + Error(native_vp::Error::SimpleMessage( + "Rejecting transaction as the transfer wasn't added to \ + the pool of pending transfers", + )) + })?; if pending != transfer { - tracing::debug!( + let error = native_vp::Error::new_alloc(format!( "An incorrect transfer was added to the Ethereum bridge pool: \ - {:?}.\n Expected: {:?}", - transfer, - pending - ); - return Ok(false); + {transfer:?}.\n Expected: {pending:?}", + )) + .into(); + tracing::debug!("{error}"); + return Err(error); } // The deltas in the escrowed amounts we must check. - let wnam_address = read_native_erc20_address(&self.ctx.pre())?; + let wnam_address = + read_native_erc20_address(&self.ctx.pre()).map_err(Error)?; let escrow_checks = self.determine_escrow_checks(&wnam_address, &transfer)?; if !escrow_checks.validate(keys_changed) { - tracing::debug!( - ?transfer, - "Missing storage modifications in the Bridge pool" - ); - return Ok(false); + let error = native_vp::Error::new_const( + // TODO: specify which storage changes are missing + "Missing storage modifications in the Bridge pool", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } // check that gas was correctly escrowed. if !self.check_gas_escrow( @@ -605,7 +635,10 @@ where &transfer, escrow_checks.gas_check, )? { - return Ok(false); + return Err(native_vp::Error::new_const( + "Gas was not correctly escrowed into the Bridge pool storage", + ) + .into()); } // check the escrowed assets if transfer.transfer.asset == wnam_address { @@ -613,35 +646,46 @@ where &wnam_address, &transfer, escrow_checks.token_check, - ) + )? + .ok_or_else(|| { + native_vp::Error::new_const( + "The wrapped NAM tokens were not escrowed properly", + ) + .into() + }) } else { - self.check_escrowed_toks(escrow_checks.token_check) + self.check_escrowed_toks(escrow_checks.token_check)? + .ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "The {} tokens were not escrowed properly", + transfer.transfer.asset + )) + .into() + }) } - .map(|ok| { - if ok { - tracing::info!( - "The Ethereum bridge pool VP accepted the transfer {:?}.", - transfer - ); - } else { - tracing::debug!( - ?transfer, - "The assets of the transfer were not properly escrowed \ - into the Ethereum bridge pool." - ); - } - ok + .inspect(|_| { + tracing::info!( + "The Ethereum bridge pool VP accepted the transfer {:?}.", + transfer + ); + }) + .inspect_err(|err| { + tracing::debug!( + ?transfer, + reason = ?err, + "The assets of the transfer were not properly escrowed \ + into the Ethereum bridge pool." + ); }) } } -#[cfg(test)] +#[cfg(all(test, feature = "namada-eth-bridge"))] mod test_bridge_pool_vp { use std::cell::RefCell; use std::env::temp_dir; use namada_core::borsh::BorshSerializeExt; - use namada_core::validity_predicate::VpSentinel; use namada_ethereum_bridge::storage::bridge_pool::get_signed_root_key; use namada_ethereum_bridge::storage::parameters::{ Contracts, EthereumBridgeParams, UpgradeableContract, @@ -921,7 +965,6 @@ mod test_bridge_pool_vp { tx: &'a Tx, state: &'a TestState, gas_meter: &'a RefCell, - sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, ) -> Ctx<'a, TestState, WasmCacheRwAccess> { @@ -931,7 +974,6 @@ mod test_bridge_pool_vp { tx, &TxIndex(0), gas_meter, - sentinel, keys_changed, verifiers, VpCache::new(temp_dir(), 100usize), @@ -939,9 +981,8 @@ mod test_bridge_pool_vp { } enum Expect { - True, - False, - Error, + Accepted, + Rejected, } /// Helper function that tests various ways gas can be escrowed, @@ -1013,26 +1054,23 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); - match expect { - Expect::True => assert!(res.expect("Test failed")), - Expect::False => assert!(!res.expect("Test failed")), - Expect::Error => assert!(res.is_err()), + match (expect, res) { + (Expect::Accepted, Ok(())) => (), + (Expect::Accepted, Err(err)) => { + panic!("Expected VP success, but got: {err}") + } + (Expect::Rejected, Err(_)) => (), + (Expect::Rejected, Ok(())) => { + panic!("Expected VP failure, but the tx was accepted") + } } } @@ -1052,7 +1090,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::True, + Expect::Accepted, ); } @@ -1073,7 +1111,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1094,7 +1132,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1115,7 +1153,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1137,7 +1175,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1158,7 +1196,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1179,7 +1217,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1200,7 +1238,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1214,7 +1252,7 @@ mod test_bridge_pool_vp { SignedAmount::Negative(TOKENS.into()), SignedAmount::Positive(TOKENS.into()), |transfer, _| BTreeSet::from([get_pending_key(transfer)]), - Expect::Error, + Expect::Rejected, ); } @@ -1246,7 +1284,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1278,7 +1316,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::Error, + Expect::Rejected, ); } @@ -1302,7 +1340,7 @@ mod test_bridge_pool_vp { get_signed_root_key(), ]) }, - Expect::False, + Expect::Rejected, ); } @@ -1361,23 +1399,15 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); - assert!(!res.expect("Test failed")); + assert!(res.is_err()); } /// Test that a transfer added to the pool with zero gas fees @@ -1427,25 +1457,15 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); - let res = vp - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("Test failed"); - assert!(!res); + let res = vp.validate_tx(&tx, &keys_changed, &verifiers); + assert!(res.is_err()); } /// Test that we can escrow Nam if we @@ -1516,25 +1536,15 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); - let res = vp - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("Test failed"); - assert!(res); + let res = vp.validate_tx(&tx, &keys_changed, &verifiers); + assert!(res.is_ok()); } /// Test that we can reject a transfer that @@ -1600,25 +1610,15 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); - let res = vp - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("Test failed"); - assert!(!res); + let res = vp.validate_tx(&tx, &keys_changed, &verifiers); + assert!(res.is_err()); } /// Test that we check escrowing Nam correctly when minting wNam @@ -1701,25 +1701,15 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); - let res = vp - .validate_tx(&tx, &keys_changed, &verifiers) - .expect("Test failed"); - assert!(!res); + let res = vp.validate_tx(&tx, &keys_changed, &verifiers); + assert!(res.is_err()); } /// Auxiliary function to test NUT functionality. @@ -1788,26 +1778,23 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let mut tx = Tx::from_type(TxType::Raw); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); - match expect { - Expect::True => assert!(res.expect("Test failed")), - Expect::False => assert!(!res.expect("Test failed")), - Expect::Error => assert!(res.is_err()), + match (expect, res) { + (Expect::Accepted, Ok(())) => (), + (Expect::Accepted, Err(err)) => { + panic!("Expected VP success, but got: {err}") + } + (Expect::Rejected, Err(_)) => (), + (Expect::Rejected, Ok(())) => { + panic!("Expected VP failure, but the tx was accepted") + } } } @@ -1816,13 +1803,13 @@ mod test_bridge_pool_vp { /// asset, but not hold ERC20s. #[test] fn test_reject_no_erc20_balance_despite_nut_balance() { - test_nut_aux(TransferToEthereumKind::Erc20, Expect::False) + test_nut_aux(TransferToEthereumKind::Erc20, Expect::Rejected) } /// Test the happy flow of escrowing NUTs. #[test] fn test_escrowing_nuts_happy_flow() { - test_nut_aux(TransferToEthereumKind::Nut, Expect::True) + test_nut_aux(TransferToEthereumKind::Nut, Expect::Accepted) } /// Test that the Bridge pool VP rejects a wNAM NUT transfer. @@ -1843,7 +1830,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::False, + Expect::Rejected, ); } @@ -1865,7 +1852,7 @@ mod test_bridge_pool_vp { .unwrap(); BTreeSet::from([get_pending_key(transfer)]) }, - Expect::True, + Expect::Accepted, ); } diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs index acabe77b2d..a880f47eac 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -2,22 +2,22 @@ use std::collections::BTreeSet; -use eyre::WrapErr; use namada_core::address::{Address, InternalAddress}; +use namada_core::booleans::BoolResultUnitExt; use namada_core::storage::Key; use namada_state::StateRead; use namada_tx::Tx; use namada_vp_env::VpEnv; -use crate::ledger::native_vp::{Ctx, NativeVp}; +use crate::ledger::native_vp::{self, Ctx, NativeVp}; use crate::token::storage_key::is_any_token_balance_key; use crate::token::Amount; use crate::vm::WasmCacheAccess; /// Generic error that may be returned by the validity predicate #[derive(thiserror::Error, Debug)] -#[error(transparent)] -pub struct Error(#[from] eyre::Report); +#[error("Non-usable token VP error: {0}")] +pub struct Error(#[from] native_vp::Error); /// Validity predicate for non-usable tokens. /// @@ -44,19 +44,22 @@ where _: &Tx, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<(), Self::Error> { tracing::debug!( keys_changed_len = keys_changed.len(), verifiers_len = verifiers.len(), "Non usable tokens VP triggered", ); - let is_multitoken = - verifiers.contains(&Address::Internal(InternalAddress::Multitoken)); - if !is_multitoken { - tracing::debug!("Rejecting non-multitoken transfer tx"); - return Ok(false); - } + verifiers + .contains(&Address::Internal(InternalAddress::Multitoken)) + .ok_or_else(|| { + let error = Error(native_vp::Error::new_const( + "Rejecting non-multitoken transfer tx", + )); + tracing::debug!("{error}"); + error + })?; let nut_owners = keys_changed.iter().filter_map( @@ -72,13 +75,11 @@ where let pre: Amount = self .ctx .read_pre(changed_key) - .context("Reading pre amount failed") .map_err(Error)? .unwrap_or_default(); let post: Amount = self .ctx .read_post(changed_key) - .context("Reading post amount failed") .map_err(Error)? .unwrap_or_default(); @@ -92,7 +93,12 @@ where post_amount = ?post, "Bridge pool balance should have increased" ); - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Bridge pool balance should have increased. The \ + previous balance was {pre:?}, the post balance \ + is {post:?}.", + )) + .into()); } } // arbitrary addresses should have their balance decrease @@ -104,13 +110,18 @@ where post_amount = ?post, "Balance should have decreased" ); - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Balance should have decreased. The previous \ + balance was {pre:?}, the post balance is \ + {post:?}." + )) + .into()); } } } } - Ok(true) + Ok(()) } } @@ -119,12 +130,10 @@ mod test_nuts { use std::cell::RefCell; use std::env::temp_dir; - use assert_matches::assert_matches; use namada_core::address::testing::arb_non_internal_address; use namada_core::borsh::BorshSerializeExt; use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada_core::storage::TxIndex; - use namada_core::validity_predicate::VpSentinel; use namada_ethereum_bridge::storage::wrapped_erc20s; use namada_state::testing::TestState; use namada_state::StorageWrite; @@ -138,7 +147,7 @@ mod test_nuts { use crate::vm::WasmCacheRwAccess; /// Run a VP check on a NUT transfer between the two provided addresses. - fn check_nut_transfer(src: Address, dst: Address) -> Option { + fn check_nut_transfer(src: Address, dst: Address) -> bool { let nut = wrapped_erc20s::nut(&DAI_ERC20_ETH_ADDRESS); let src_balance_key = balance_key(&nut, &src); let dst_balance_key = balance_key(&nut, &dst); @@ -190,14 +199,12 @@ mod test_nuts { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::<_, WasmCacheRwAccess>::new( &Address::Internal(InternalAddress::Nut(DAI_ERC20_ETH_ADDRESS)), &state, &tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, VpCache::new(temp_dir(), 100usize), @@ -219,7 +226,8 @@ mod test_nuts { println!("{key}: PRE={pre:?} POST={post:?}"); } - vp.validate_tx(&tx, &keys_changed, &verifiers).ok() + vp.validate_tx(&tx, &keys_changed, &verifiers) + .map_or_else(|_| false, |()| true) } proptest! { @@ -229,19 +237,17 @@ mod test_nuts { fn test_nut_transfer_rejected( (src, dst) in (arb_non_internal_address(), arb_non_internal_address()) ) { - let status = check_nut_transfer(src, dst); - assert_matches!(status, Some(false)); + assert!(!check_nut_transfer(src, dst)); } /// Test that transferring NUTs from an arbitrary address to the /// Bridge pool address passes. #[test] fn test_nut_transfer_passes(src in arb_non_internal_address()) { - let status = check_nut_transfer( + assert!(check_nut_transfer( src, Address::Internal(InternalAddress::EthBridgePool), - ); - assert_matches!(status, Some(true)); + )); } } } diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index 52f3012901..252f5b69df 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -1,14 +1,16 @@ //! Validity predicate for the Ethereum bridge -use std::collections::{BTreeSet, HashSet}; -use eyre::{eyre, Result}; +use std::collections::BTreeSet; + use namada_core::address::Address; +use namada_core::booleans::BoolResultUnitExt; +use namada_core::collections::HashSet; use namada_core::storage::Key; use namada_ethereum_bridge::storage; use namada_ethereum_bridge::storage::escrow_key; use namada_tx::Tx; -use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; +use crate::ledger::native_vp::{self, Ctx, NativeVp, StorageReader}; use crate::state::StateRead; use crate::token::storage_key::{balance_key, is_balance_key}; use crate::token::Amount; @@ -16,8 +18,8 @@ use crate::vm::WasmCacheAccess; /// Generic error that may be returned by the validity predicate #[derive(thiserror::Error, Debug)] -#[error(transparent)] -pub struct Error(#[from] eyre::Error); +#[error("Ethereum Bridge VP error: {0}")] +pub struct Error(#[from] native_vp::Error); /// Validity predicate for the Ethereum bridge pub struct EthBridge<'ctx, S, CA> @@ -37,47 +39,37 @@ where /// If the Ethereum bridge's escrow key was written to, we check /// that the NAM balance increased and that the Bridge pool VP has /// been triggered. - fn check_escrow( - &self, - verifiers: &BTreeSet
, - ) -> Result { + fn check_escrow(&self, verifiers: &BTreeSet
) -> Result<(), Error> { let escrow_key = balance_key( &self.ctx.state.in_mem().native_token, &crate::ethereum_bridge::ADDRESS, ); let escrow_pre: Amount = - if let Ok(Some(value)) = (&self.ctx).read_pre_value(&escrow_key) { - value - } else { - tracing::debug!( - "Could not retrieve the Ethereum bridge VP's balance from \ - storage" - ); - return Ok(false); - }; + (&self.ctx).read_pre_value(&escrow_key)?.unwrap_or_default(); let escrow_post: Amount = - if let Ok(Some(value)) = (&self.ctx).read_post_value(&escrow_key) { - value - } else { - tracing::debug!( - "Could not retrieve the modified Ethereum bridge VP's \ - balance after applying tx" - ); - return Ok(false); - }; + (&self.ctx).must_read_post_value(&escrow_key)?; // The amount escrowed should increase. if escrow_pre < escrow_post { // NB: normally, we only escrow NAM under the Ethereum bridge // address in the context of a Bridge pool transfer - Ok(verifiers.contains(&storage::bridge_pool::BRIDGE_POOL_ADDRESS)) + let bridge_pool_is_verifier = + verifiers.contains(&storage::bridge_pool::BRIDGE_POOL_ADDRESS); + + bridge_pool_is_verifier.ok_or_else(|| { + native_vp::Error::new_const( + "Bridge pool VP was not marked as a verifier of the \ + transaction", + ) + .into() + }) } else { - tracing::info!( - "A normal tx cannot decrease the amount of Nam escrowed in \ - the Ethereum bridge" - ); - Ok(false) + Err(native_vp::Error::new_const( + "User tx attempted to decrease the amount of native tokens \ + escrowed in the Ethereum Bridge's account", + ) + .into()) } } } @@ -104,19 +96,17 @@ where _: &Tx, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<(), Self::Error> { tracing::debug!( keys_changed_len = keys_changed.len(), verifiers_len = verifiers.len(), "Ethereum Bridge VP triggered", ); - if !validate_changed_keys( + validate_changed_keys( &self.ctx.state.in_mem().native_token, keys_changed, - )? { - return Ok(false); - } + )?; self.check_escrow(verifiers) } @@ -134,7 +124,7 @@ where fn validate_changed_keys( nam_addr: &Address, keys_changed: &BTreeSet, -) -> Result { +) -> Result<(), Error> { // acquire all keys that either changed our account, or that touched // nam balances let keys_changed: HashSet<_> = keys_changed @@ -146,19 +136,37 @@ fn validate_changed_keys( }) .collect(); if keys_changed.is_empty() { - return Err(Error(eyre!( + return Err(native_vp::Error::SimpleMessage( "No keys changed under our account so this validity predicate \ - shouldn't have been triggered" - ))); + shouldn't have been triggered", + ) + .into()); } tracing::debug!( relevant_keys.len = keys_changed.len(), "Found keys changed under our account" ); - Ok(keys_changed.contains(&escrow_key(nam_addr)) - && keys_changed - .iter() - .all(|key| is_balance_key(nam_addr, key).is_some())) + let nam_escrow_addr_modified = keys_changed.contains(&escrow_key(nam_addr)); + if !nam_escrow_addr_modified { + let error = native_vp::Error::new_const( + "The native token's escrow balance should have been modified", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); + } + let all_keys_are_nam_balance = keys_changed + .iter() + .all(|key| is_balance_key(nam_addr, key).is_some()); + if !all_keys_are_nam_balance { + let error = native_vp::Error::new_const( + "Some modified keys were not a native token's balance key", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); + } + Ok(()) } #[cfg(test)] @@ -167,7 +175,6 @@ mod tests { use std::env::temp_dir; use namada_core::borsh::BorshSerializeExt; - use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; use namada_state::testing::TestState; use namada_state::StorageWrite; @@ -243,7 +250,6 @@ mod tests { tx: &'a Tx, state: &'a TestState, gas_meter: &'a RefCell, - sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, ) -> Ctx<'a, TestState, WasmCacheRwAccess> { @@ -253,7 +259,6 @@ mod tests { tx, &TxIndex(0), gas_meter, - sentinel, keys_changed, verifiers, VpCache::new(temp_dir(), 100usize), @@ -269,7 +274,7 @@ mod tests { let result = validate_changed_keys(&nam(), &keys_changed); - assert_matches!(result, Ok(true)); + assert!(result.is_ok()); } #[test] @@ -288,7 +293,7 @@ mod tests { let result = validate_changed_keys(&nam(), &keys_changed); - assert_matches!(result, Ok(false)); + assert!(result.is_err()); } { let keys_changed = BTreeSet::from_iter(vec![ @@ -299,7 +304,7 @@ mod tests { let result = validate_changed_keys(&nam(), &keys_changed); - assert_matches!(result, Ok(false)); + assert!(result.is_err()); } } @@ -311,7 +316,7 @@ mod tests { let result = validate_changed_keys(&nam(), &keys_changed); - assert_matches!(result, Ok(false)); + assert!(result.is_err()); } { @@ -324,7 +329,7 @@ mod tests { let result = validate_changed_keys(&nam(), &keys_changed); - assert_matches!(result, Ok(false)); + assert!(result.is_err()); } { @@ -341,7 +346,7 @@ mod tests { let result = validate_changed_keys(&nam(), &keys_changed); - assert_matches!(result, Ok(false)); + assert!(result.is_err()); } } @@ -384,20 +389,12 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let res = vp.validate_tx(&tx, &keys_changed, &verifiers); - assert!(res.expect("Test failed")); + assert!(res.is_ok()); } /// Test that escrowing must increase the balance @@ -437,20 +434,12 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let res = vp.validate_tx(&tx, &keys_changed, &verifiers); - assert!(!res.expect("Test failed")); + assert!(res.is_err()); } /// Test that the VP checks that the bridge pool vp will @@ -493,19 +482,11 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { - ctx: setup_ctx( - &tx, - &state, - &gas_meter, - &sentinel, - &keys_changed, - &verifiers, - ), + ctx: setup_ctx(&tx, &state, &gas_meter, &keys_changed, &verifiers), }; let res = vp.validate_tx(&tx, &keys_changed, &verifiers); - assert!(!res.expect("Test failed")); + assert!(res.is_err()); } } diff --git a/crates/namada/src/ledger/native_vp/ibc/context.rs b/crates/namada/src/ledger/native_vp/ibc/context.rs index e4cfd76dd7..1c44b638ff 100644 --- a/crates/namada/src/ledger/native_vp/ibc/context.rs +++ b/crates/namada/src/ledger/native_vp/ibc/context.rs @@ -1,20 +1,25 @@ //! Contexts for IBC validity predicate -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::BTreeSet; use borsh_ext::BorshSerializeExt; +use namada_core::collections::{HashMap, HashSet}; use namada_core::storage::Epochs; +use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_ibc::{IbcCommonContext, IbcStorageContext}; use namada_state::{StateRead, StorageError, StorageRead, StorageWrite}; +use namada_vp_env::VpEnv; use crate::address::{Address, InternalAddress}; use crate::ibc::IbcEvent; use crate::ledger::ibc::storage::is_ibc_key; use crate::ledger::native_vp::CtxPreStorageRead; use crate::state::write_log::StorageModification; -use crate::state::{PrefixIter, ResultExt}; +use crate::state::PrefixIter; use crate::storage::{BlockHash, BlockHeight, Epoch, Header, Key, TxIndex}; -use crate::token::{self as token, Amount, DenominatedAmount}; +use crate::token::{ + self as token, burn_tokens, credit_tokens, transfer, Amount, +}; use crate::vm::WasmCacheAccess; /// Result of a storage API call. @@ -73,9 +78,18 @@ where fn read_bytes(&self, key: &Key) -> Result>> { match self.store.get(key) { Some(StorageModification::Write { ref value }) => { + let gas = key.len() + value.len(); + self.ctx + .ctx + .charge_gas(gas as u64 * MEMORY_ACCESS_GAS_PER_BYTE)?; Ok(Some(value.clone())) } - Some(StorageModification::Delete) => Ok(None), + Some(StorageModification::Delete) => { + self.ctx.ctx.charge_gas( + key.len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + )?; + Ok(None) + } Some(StorageModification::Temp { .. }) => { Err(StorageError::new_const( "Temp shouldn't be inserted in an IBC transaction", @@ -84,7 +98,12 @@ where Some(StorageModification::InitAccount { .. }) => Err( StorageError::new_const("InitAccount shouldn't be inserted"), ), - None => self.ctx.read_bytes(key), + None => { + self.ctx.ctx.charge_gas( + key.len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + )?; + self.ctx.read_bytes(key) + } } } @@ -151,18 +170,20 @@ where key: &Key, value: impl AsRef<[u8]>, ) -> Result<()> { - self.store.insert( - key.clone(), - StorageModification::Write { - value: value.as_ref().to_vec(), - }, - ); - Ok(()) + let value = value.as_ref().to_vec(); + let gas = key.len() + value.len(); + self.store + .insert(key.clone(), StorageModification::Write { value }); + self.ctx + .ctx + .charge_gas(gas as u64 * MEMORY_ACCESS_GAS_PER_BYTE) } fn delete(&mut self, key: &Key) -> Result<()> { self.store.insert(key.clone(), StorageModification::Delete); - Ok(()) + self.ctx + .ctx + .charge_gas(key.len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE) } } @@ -194,22 +215,9 @@ where src: &Address, dest: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<()> { - let amount = crate::token::denom_to_amount(amount, token, self)?; - let src_key = token::storage_key::balance_key(token, src); - let dest_key = token::storage_key::balance_key(token, dest); - let src_bal: Option = self.ctx.read(&src_key)?; - let mut src_bal = src_bal.ok_or_else(|| { - StorageError::new_const("the source has no balance") - })?; - src_bal.spend(&amount).into_storage_result()?; - let mut dest_bal: Amount = - self.ctx.read(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount).into_storage_result()?; - - self.write(&src_key, src_bal.serialize_to_vec())?; - self.write(&dest_key, dest_bal.serialize_to_vec()) + transfer(self, token, src, dest, amount) } fn handle_masp_tx( @@ -225,21 +233,9 @@ where &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<()> { - let amount = crate::token::denom_to_amount(amount, token, self)?; - let target_key = token::storage_key::balance_key(token, target); - let mut target_bal: Amount = - self.ctx.read(&target_key)?.unwrap_or_default(); - target_bal.receive(&amount).into_storage_result()?; - - let minted_key = token::storage_key::minted_balance_key(token); - let mut minted_bal: Amount = - self.ctx.read(&minted_key)?.unwrap_or_default(); - minted_bal.receive(&amount).into_storage_result()?; - - self.write(&target_key, target_bal.serialize_to_vec())?; - self.write(&minted_key, minted_bal.serialize_to_vec())?; + credit_tokens(self, token, target, amount)?; let minter_key = token::storage_key::minter_key(token); self.write( @@ -252,21 +248,9 @@ where &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<()> { - let amount = crate::token::denom_to_amount(amount, token, self)?; - let target_key = token::storage_key::balance_key(token, target); - let mut target_bal: Amount = - self.ctx.read(&target_key)?.unwrap_or_default(); - target_bal.spend(&amount).into_storage_result()?; - - let minted_key = token::storage_key::minted_balance_key(token); - let mut minted_bal: Amount = - self.ctx.read(&minted_key)?.unwrap_or_default(); - minted_bal.spend(&amount).into_storage_result()?; - - self.write(&target_key, target_bal.serialize_to_vec())?; - self.write(&minted_key, minted_bal.serialize_to_vec()) + burn_tokens(self, token, target, amount) } fn log_string(&self, message: String) { @@ -406,7 +390,7 @@ where _src: &Address, _dest: &Address, _token: &Address, - _amount: DenominatedAmount, + _amount: Amount, ) -> Result<()> { unimplemented!("Validation doesn't transfer") } @@ -423,7 +407,7 @@ where &mut self, _target: &Address, _token: &Address, - _amount: DenominatedAmount, + _amount: Amount, ) -> Result<()> { unimplemented!("Validation doesn't mint") } @@ -432,7 +416,7 @@ where &mut self, _target: &Address, _token: &Address, - _amount: DenominatedAmount, + _amount: Amount, ) -> Result<()> { unimplemented!("Validation doesn't burn") } diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index e16f7af60f..2e581b3b1c 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -3,16 +3,18 @@ pub mod context; use std::cell::RefCell; -use std::collections::{BTreeSet, HashSet}; +use std::collections::BTreeSet; use std::rc::Rc; use std::time::Duration; use context::{PseudoExecutionContext, VpValidationContext}; use namada_core::address::Address; +use namada_core::collections::HashSet; use namada_core::storage::Key; use namada_gas::{IBC_ACTION_EXECUTE_GAS, IBC_ACTION_VALIDATE_GAS}; use namada_ibc::{ - Error as ActionError, IbcActions, TransferModule, ValidationParams, + Error as ActionError, IbcActions, NftTransferModule, TransferModule, + ValidationParams, }; use namada_proof_of_stake::storage::read_pos_params; use namada_state::write_log::StorageModification; @@ -22,26 +24,33 @@ use namada_vp_env::VpEnv; use thiserror::Error; use crate::ibc::core::host::types::identifiers::ChainId as IbcChainId; -use crate::ledger::ibc::storage::{calc_hash, is_ibc_denom_key, is_ibc_key}; +use crate::ledger::ibc::storage::{ + calc_hash, deposit_key, get_limits, is_ibc_key, is_ibc_trace_key, + mint_amount_key, withdraw_key, +}; use crate::ledger::native_vp::{self, Ctx, NativeVp}; use crate::ledger::parameters::read_epoch_duration_parameter; +use crate::token::storage_key::is_any_token_balance_key; +use crate::token::Amount; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(native_vp::Error), - #[error("Decoding error: {0}")] - Decoding(std::io::Error), - #[error("IBC message is required as transaction data")] + #[error("IBC VP error: Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), + #[error("IBC VP error: Decoding error: {0}")] + Decoding(#[from] std::io::Error), + #[error("IBC VP error: IBC message is required as transaction data")] NoTxData, - #[error("IBC action error: {0}")] - IbcAction(ActionError), - #[error("State change error: {0}")] + #[error("IBC VP error: IBC action error: {0}")] + IbcAction(#[from] ActionError), + #[error("IBC VP error: State change error: {0}")] StateChange(String), - #[error("IBC event error: {0}")] + #[error("IBC VP error: IBC event error: {0}")] IbcEvent(String), + #[error("IBC rate limit: {0}")] + RateLimit(String), } /// IBC functions result @@ -69,7 +78,7 @@ where tx_data: &Tx, keys_changed: &BTreeSet, _verifiers: &BTreeSet
, - ) -> VpResult { + ) -> VpResult<()> { let signed = tx_data; let tx_data = signed.data().ok_or(Error::NoTxData)?; @@ -80,9 +89,12 @@ where self.validate_with_msg(&tx_data)?; // Validate the denom store if a denom key has been changed - self.validate_denom(keys_changed)?; + self.validate_trace(keys_changed)?; - Ok(true) + // Check the limits + self.check_limits(keys_changed)?; + + Ok(()) } } @@ -98,10 +110,15 @@ where ) -> VpResult<()> { let exec_ctx = PseudoExecutionContext::new(self.ctx.pre()); let ctx = Rc::new(RefCell::new(exec_ctx)); - - let mut actions = IbcActions::new(ctx.clone()); - let module = TransferModule::new(ctx.clone()); - actions.add_transfer_module(module.module_id(), module); + // Use an empty verifiers set placeholder for validation, this is only + // needed in actual txs to addresses whose VPs should be triggered + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + + let mut actions = IbcActions::new(ctx.clone(), verifiers.clone()); + let module = TransferModule::new(ctx.clone(), verifiers); + actions.add_transfer_module(module); + let module = NftTransferModule::new(ctx.clone()); + actions.add_transfer_module(module); // Charge gas for the expensive execution self.ctx .charge_gas(IBC_ACTION_EXECUTE_GAS) @@ -142,12 +159,17 @@ where fn validate_with_msg(&self, tx_data: &[u8]) -> VpResult<()> { let validation_ctx = VpValidationContext::new(self.ctx.pre()); let ctx = Rc::new(RefCell::new(validation_ctx)); + // Use an empty verifiers set placeholder for validation, this is only + // needed in actual txs to addresses whose VPs should be triggered + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); - let mut actions = IbcActions::new(ctx.clone()); + let mut actions = IbcActions::new(ctx.clone(), verifiers.clone()); actions.set_validation_params(self.validation_params()?); - let module = TransferModule::new(ctx); - actions.add_transfer_module(module.module_id(), module); + let module = TransferModule::new(ctx.clone(), verifiers); + actions.add_transfer_module(module); + let module = NftTransferModule::new(ctx); + actions.add_transfer_module(module); // Charge gas for the expensive validation self.ctx .charge_gas(IBC_ACTION_VALIDATE_GAS) @@ -177,27 +199,27 @@ where }) } - fn validate_denom(&self, keys_changed: &BTreeSet) -> VpResult<()> { + fn validate_trace(&self, keys_changed: &BTreeSet) -> VpResult<()> { for key in keys_changed { - if let Some((_, hash)) = is_ibc_denom_key(key) { + if let Some((_, hash)) = is_ibc_trace_key(key) { match self.ctx.read_post::(key).map_err(|e| { - ActionError::Denom(format!( - "Getting the denom failed: Key {}, Error {}", + ActionError::Trace(format!( + "Getting the trace failed: Key {}, Error {}", key, e )) })? { - Some(denom) => { - if calc_hash(&denom) != hash { - return Err(ActionError::Denom(format!( - "The denom is invalid: Key {}, Denom {}", - key, denom + Some(trace) => { + if calc_hash(&trace) != hash { + return Err(ActionError::Trace(format!( + "The trace is invalid: Key {}, Trace {}", + key, trace )) .into()); } } None => { - return Err(ActionError::Denom(format!( - "The corresponding denom wasn't stored: Key {}", + return Err(ActionError::Trace(format!( + "The corresponding trace wasn't stored: Key {}", key )) .into()); @@ -207,6 +229,68 @@ where } Ok(()) } + + fn check_limits(&self, keys_changed: &BTreeSet) -> VpResult { + let tokens: BTreeSet<&Address> = keys_changed + .iter() + .filter_map(|k| is_any_token_balance_key(k).map(|[key, _]| key)) + .collect(); + for token in tokens { + let (mint_limit, throughput_limit) = + get_limits(&self.ctx.pre(), token) + .map_err(Error::NativeVpError)?; + + // Check the supply + let mint_amount_key = mint_amount_key(token); + let minted: Amount = self + .ctx + .read_post(&mint_amount_key) + .map_err(Error::NativeVpError)? + .unwrap_or_default(); + if mint_limit < minted { + return Err(Error::RateLimit(format!( + "Transfer exceeding the mint limit is not allowed: Mint \ + limit {mint_limit}, minted amount {minted}" + ))); + } + + // Check the rate limit + let throughput = self.calc_throughput(token)?; + if throughput_limit < throughput { + return Err(Error::RateLimit(format!( + "Transfer exceeding the per-epoch throughput limit is not \ + allowed: Per-epoch throughput limit {throughput_limit}, \ + actual throughput {throughput}" + ))); + } + } + Ok(true) + } + + fn calc_throughput(&self, token: &Address) -> VpResult { + let deposit_key = deposit_key(token); + let deposit: Amount = self + .ctx + .read_post(&deposit_key) + .map_err(Error::NativeVpError)? + .unwrap_or_default(); + let withdraw_key = withdraw_key(token); + let withdraw: Amount = self + .ctx + .read_post(&withdraw_key) + .map_err(Error::NativeVpError)? + .unwrap_or_default(); + let throughput = if deposit < withdraw { + withdraw + .checked_sub(deposit) + .expect("withdraw should be bigger than deposit") + } else { + deposit + .checked_sub(withdraw) + .expect("deposit should be bigger than withdraw") + }; + Ok(throughput) + } } fn match_value( @@ -237,14 +321,8 @@ fn match_value( } } -impl From for Error { - fn from(err: ActionError) -> Self { - Self::IbcAction(err) - } -} - /// A dummy header used for testing -#[cfg(any(test, feature = "testing"))] +#[cfg(any(test, feature = "testing", feature = "benches"))] pub fn get_dummy_header() -> crate::storage::Header { use crate::tendermint::time::Time as TmTime; crate::storage::Header { @@ -262,7 +340,6 @@ pub fn get_dummy_genesis_validator() use crate::core::dec::Dec; use crate::core::key::testing::common_sk_from_simple_seed; use crate::key; - use crate::token::Amount; let address = established_address_1(); let tokens = Amount::native_whole(1); @@ -313,13 +390,14 @@ mod tests { }; use ibc_testkit::testapp::ibc::clients::mock::consensus_state::MockConsensusState; use ibc_testkit::testapp::ibc::clients::mock::header::MockHeader; - use namada_core::validity_predicate::VpSentinel; + use namada_core::address::InternalAddress; use namada_gas::TxGasMeter; use namada_governance::parameters::GovernanceParameters; use namada_state::testing::TestState; use namada_state::StorageRead; + use namada_token::NATIVE_MAX_DECIMAL_PLACES; use namada_tx::data::TxType; - use namada_tx::{Code, Data, Section, Signature}; + use namada_tx::{Authorization, Code, Data, Section}; use prost::Message; use sha2::Digest; @@ -327,12 +405,22 @@ mod tests { use crate::core::address::testing::{ established_address_1, established_address_2, nam, }; - use crate::core::address::InternalAddress; + use crate::core::ibc::{MsgNftTransfer, MsgTransfer}; use crate::core::storage::Epoch; + use crate::ibc::apps::nft_transfer::types::events::{ + RecvEvent as NftRecvEvent, TokenTraceEvent, + TransferEvent as NftTransferEvent, + }; + use crate::ibc::apps::nft_transfer::types::msgs::transfer::MsgTransfer as IbcMsgNftTransfer; + use crate::ibc::apps::nft_transfer::types::packet::PacketData as NftPacketData; + use crate::ibc::apps::nft_transfer::types::{ + self as nft_types, PrefixedClassId, TokenId, TokenIds, + VERSION as NFT_VERSION, + }; use crate::ibc::apps::transfer::types::events::{ AckEvent, DenomTraceEvent, RecvEvent, TimeoutEvent, TransferEvent, }; - use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; + use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; use crate::ibc::apps::transfer::types::packet::PacketData; use crate::ibc::apps::transfer::types::{ ack_success_b64, PrefixedCoin, TracePrefix, VERSION, @@ -385,16 +473,19 @@ mod tests { ChannelId, ClientId, ConnectionId, PortId, Sequence, }; use crate::ibc::core::router::types::event::ModuleEvent; + use crate::ibc::parameters::IbcParameters; use crate::ibc::primitives::proto::{Any, Protobuf}; - use crate::ibc::primitives::{Msg, Timestamp}; + use crate::ibc::primitives::{Timestamp, ToProto}; use crate::ibc::storage::{ - ack_key, channel_counter_key, channel_key, client_connections_key, - client_counter_key, client_state_key, client_update_height_key, - client_update_timestamp_key, commitment_key, connection_counter_key, - connection_key, consensus_state_key, ibc_denom_key, - next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, - receipt_key, + ack_key, calc_hash, channel_counter_key, channel_key, + client_connections_key, client_counter_key, client_state_key, + client_update_height_key, client_update_timestamp_key, commitment_key, + connection_counter_key, connection_key, consensus_state_key, ibc_token, + ibc_trace_key, mint_amount_key, next_sequence_ack_key, + next_sequence_recv_key, next_sequence_send_key, nft_class_key, + nft_metadata_key, receipt_key, }; + use crate::ibc::{NftClass, NftMetadata}; use crate::key::testing::keypair_1; use crate::ledger::gas::VpGasMeter; use crate::ledger::parameters::storage::{ @@ -406,12 +497,11 @@ mod tests { use crate::tendermint::time::Time as TmTime; use crate::time::DurationSecs; use crate::token::storage_key::balance_key; - use crate::token::Amount; use crate::vm::wasm; const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); const COMMITMENT_PREFIX: &[u8] = b"ibc"; - const TX_GAS_LIMIT: u64 = 1_000_000; + const TX_GAS_LIMIT: u64 = 10_000_000_000; fn get_client_id() -> ClientId { let id = format!("{}-0", MOCK_CLIENT_TYPE); @@ -425,6 +515,11 @@ mod tests { ibc::init_genesis_storage(&mut state); let gov_params = GovernanceParameters::default(); gov_params.init_storage(&mut state).unwrap(); + let ibc_params = IbcParameters { + default_mint_limit: Amount::native_whole(100), + default_per_epoch_throughput_limit: Amount::native_whole(100), + }; + ibc_params.init_storage(&mut state).unwrap(); pos::test_utils::test_init_genesis( &mut state, namada_proof_of_stake::OwnedPosParams::default(), @@ -517,6 +612,11 @@ mod tests { PortId::transfer() } + fn get_nft_port_id() -> PortId { + PortId::from_str(crate::ibc::apps::nft_transfer::types::PORT_ID_STR) + .unwrap() + } + fn get_channel_id() -> ChannelId { ChannelId::new(0) } @@ -533,7 +633,8 @@ mod tests { } fn get_conn_counterparty() -> ConnCounterparty { - let counterpart_client_id = ClientId::new(client_type(), 22).unwrap(); + let counterpart_client_id = + ClientId::new(&client_type().to_string(), 22).unwrap(); let counterpart_conn_id = ConnectionId::new(32); let commitment_prefix = CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) @@ -562,6 +663,26 @@ mod tests { ChanCounterparty::new(counterpart_port_id, Some(counterpart_channel_id)) } + fn get_channel_for_nft( + channel_state: ChanState, + order: Order, + ) -> ChannelEnd { + ChannelEnd::new( + channel_state, + order, + get_channel_counterparty_for_nft(), + vec![get_connection_id()], + ChanVersion::new(NFT_VERSION.to_string()), + ) + .unwrap() + } + + fn get_channel_counterparty_for_nft() -> ChanCounterparty { + let counterpart_port_id = get_nft_port_id(); + let counterpart_channel_id = ChannelId::new(0); + ChanCounterparty::new(counterpart_port_id, Some(counterpart_channel_id)) + } + fn get_next_seq(state: &TestState, key: &Key) -> Sequence { let (val, _) = state.db_read(key).expect("read failed"); match val { @@ -609,7 +730,7 @@ mod tests { } fn packet_from_message( - msg: &MsgTransfer, + msg: &IbcMsgTransfer, sequence: Sequence, counterparty: &ChanCounterparty, ) -> Packet { @@ -631,6 +752,35 @@ mod tests { } } + fn nft_packet_from_message( + msg: &IbcMsgNftTransfer, + sequence: Sequence, + counterparty: &ChanCounterparty, + ) -> Packet { + // the packet data should be updated + let mut packet_data = msg.packet_data.clone(); + packet_data.class_uri = Some(DUMMY_URI.parse().unwrap()); + packet_data.class_data = Some(DUMMY_DATA.parse().unwrap()); + packet_data.token_uris = Some(vec![DUMMY_URI.parse().unwrap()]); + packet_data.token_data = Some(vec![DUMMY_DATA.parse().unwrap()]); + let data = serde_json::to_vec(&packet_data) + .expect("Encoding NftPacketData failed"); + + Packet { + seq_on_a: sequence, + port_id_on_a: msg.port_id_on_a.clone(), + chan_id_on_a: msg.chan_id_on_a.clone(), + port_id_on_b: counterparty.port_id.clone(), + chan_id_on_b: counterparty + .channel_id() + .expect("the counterparty channel should exist") + .clone(), + data, + timeout_height_on_b: msg.timeout_height_on_b, + timeout_timestamp_on_b: msg.timeout_timestamp_on_b, + } + } + fn commitment(packet: &Packet) -> PacketCommitment { let timeout = packet.timeout_timestamp_on_b.nanoseconds().to_be_bytes(); let revision_number = packet @@ -652,6 +802,33 @@ mod tests { sha2::Sha256::digest(&input).to_vec().into() } + fn get_nft_class_id() -> PrefixedClassId { + "nft-transfer/channel-14/myclass".parse().unwrap() + } + + fn get_nft_id() -> TokenId { + "mytoken".parse().unwrap() + } + + const DUMMY_DATA: &str = r#"{"name":{"value":"Crypto Creatures"},"image":{"value":"binary","mime":"image/png"}}"#; + const DUMMY_URI: &str = "http://example.com"; + fn dummy_nft_class() -> NftClass { + NftClass { + class_id: get_nft_class_id(), + class_uri: Some(DUMMY_URI.parse().unwrap()), + class_data: Some(DUMMY_DATA.parse().unwrap()), + } + } + + fn dummy_nft_metadata() -> NftMetadata { + NftMetadata { + class_id: get_nft_class_id(), + token_id: get_nft_id(), + token_uri: Some(DUMMY_URI.parse().unwrap()), + token_data: Some(DUMMY_DATA.parse().unwrap()), + } + } + #[test] fn test_create_client() { let mut state = init_storage(); @@ -720,19 +897,17 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, ))); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -740,9 +915,9 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers), + Ok(_) ); } @@ -804,14 +979,12 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -931,24 +1104,19 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } #[test] @@ -1031,7 +1199,7 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, @@ -1043,14 +1211,12 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1059,7 +1225,7 @@ mod tests { // this should return true because state has been stored assert!( ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + .is_ok() ); } @@ -1140,14 +1306,12 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1262,24 +1426,19 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } #[test] @@ -1363,7 +1522,7 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, @@ -1375,22 +1534,20 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers), + Ok(_) ); } @@ -1461,7 +1618,7 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, @@ -1473,22 +1630,20 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers), + Ok(_) ); } @@ -1587,7 +1742,7 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, @@ -1599,22 +1754,20 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers), + Ok(_) ); } @@ -1712,7 +1865,7 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, @@ -1724,22 +1877,20 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers), + Ok(_) ); } @@ -1822,7 +1973,7 @@ mod tests { outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); - outer_tx.add_section(Section::Signature(Signature::new( + outer_tx.add_section(Section::Authorization(Authorization::new( vec![outer_tx.header_hash()], [(0, keypair_1())].into_iter().collect(), None, @@ -1834,22 +1985,20 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &outer_tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&outer_tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!( + ibc.validate_tx(&outer_tx, &keys_changed, &verifiers), + Ok(_) ); } @@ -1939,23 +2088,18 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } // skip test_close_init_channel() and test_close_confirm_channel() since it @@ -2004,13 +2148,13 @@ mod tests { .unwrap(); // prepare data - let msg = MsgTransfer { + let msg = IbcMsgTransfer { port_id_on_a: get_port_id(), chan_id_on_a: get_channel_id(), packet_data: PacketData { token: PrefixedCoin { denom: nam().to_string().parse().unwrap(), - amount: 100u64.into(), + amount: 100.into(), }, sender: sender.to_string().into(), receiver: "receiver".to_string().into(), @@ -2040,6 +2184,19 @@ mod tests { .write(&commitment_key, bytes) .expect("write failed"); keys_changed.insert(commitment_key); + // withdraw + let withdraw_key = withdraw_key(&nam()); + let bytes = Amount::from_str( + msg.packet_data.token.amount.to_string(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap() + .serialize_to_vec(); + state + .write_log_mut() + .write(&withdraw_key, bytes) + .expect("write failed"); + keys_changed.insert(withdraw_key); // event let transfer_event = TransferEvent { sender: msg.packet_data.sender.clone(), @@ -2067,8 +2224,11 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; - let mut tx_data = vec![]; - msg.to_any().encode(&mut tx_data).expect("encoding failed"); + let tx_data = MsgTransfer { + message: msg, + transfer: None, + } + .serialize_to_vec(); let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) @@ -2082,23 +2242,18 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } #[test] @@ -2138,7 +2293,7 @@ mod tests { // prepare data let sender = established_address_1(); let receiver = established_address_2(); - let transfer_msg = MsgTransfer { + let transfer_msg = IbcMsgTransfer { port_id_on_a: get_port_id(), chan_id_on_a: get_channel_id(), packet_data: PacketData { @@ -2154,12 +2309,8 @@ mod tests { timeout_timestamp_on_b: Timestamp::none(), }; let counterparty = get_channel_counterparty(); - let mut packet = + let packet = packet_from_message(&transfer_msg, 1.into(), &counterparty); - packet.port_id_on_a = counterparty.port_id().clone(); - packet.chan_id_on_a = counterparty.channel_id().cloned().unwrap(); - packet.port_id_on_b = get_port_id(); - packet.chan_id_on_b = get_channel_id(); let msg = MsgRecvPacket { packet: packet.clone(), proof_commitment_on_a: dummy_proof(), @@ -2193,27 +2344,49 @@ mod tests { .write(&ack_key, bytes) .expect("write failed"); keys_changed.insert(ack_key); - // denom + let mut coin = transfer_msg.packet_data.token; coin.denom.add_trace_prefix(TracePrefix::new( packet.port_id_on_b.clone(), packet.chan_id_on_b.clone(), )); + // mint + let ibc_token = ibc_token(coin.denom.to_string()); + let mint_key = mint_amount_key(&ibc_token); + let bytes = Amount::from_str(coin.amount.to_string(), 0) + .unwrap() + .serialize_to_vec(); + state + .write_log_mut() + .write(&mint_key, bytes) + .expect("write failed"); + keys_changed.insert(mint_key); + // deposit + let deposit_key = deposit_key(&ibc_token); + let bytes = Amount::from_str(coin.amount.to_string(), 0) + .unwrap() + .serialize_to_vec(); + state + .write_log_mut() + .write(&deposit_key, bytes) + .expect("write failed"); + keys_changed.insert(deposit_key); + // denom let trace_hash = calc_hash(coin.denom.to_string()); - let denom_key = ibc_denom_key(receiver.to_string(), &trace_hash); + let trace_key = ibc_trace_key(receiver.to_string(), &trace_hash); let bytes = coin.denom.to_string().serialize_to_vec(); state .write_log_mut() - .write(&denom_key, bytes) + .write(&trace_key, bytes) .expect("write failed"); - keys_changed.insert(denom_key); - let denom_key = ibc_denom_key(nam().to_string(), &trace_hash); + keys_changed.insert(trace_key); + let trace_key = ibc_trace_key(nam().to_string(), &trace_hash); let bytes = coin.denom.to_string().serialize_to_vec(); state .write_log_mut() - .write(&denom_key, bytes) + .write(&trace_key, bytes) .expect("write failed"); - keys_changed.insert(denom_key); + keys_changed.insert(trace_key); // event let recv_event = RecvEvent { sender: sender.to_string().into(), @@ -2278,23 +2451,18 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } #[test] @@ -2321,7 +2489,7 @@ mod tests { .expect("write failed"); // commitment let sender = established_address_1(); - let transfer_msg = MsgTransfer { + let transfer_msg = IbcMsgTransfer { port_id_on_a: get_port_id(), chan_id_on_a: get_channel_id(), packet_data: PacketData { @@ -2426,23 +2594,18 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } #[test] @@ -2476,7 +2639,7 @@ mod tests { .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment - let transfer_msg = MsgTransfer { + let transfer_msg = IbcMsgTransfer { port_id_on_a: get_port_id(), chan_id_on_a: get_channel_id(), packet_data: PacketData { @@ -2536,9 +2699,22 @@ mod tests { .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); - // event + // deposit let data = serde_json::from_slice::(&packet.data) .expect("decoding packet data failed"); + let deposit_key = deposit_key(&nam()); + let bytes = Amount::from_str( + data.token.amount.to_string(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap() + .serialize_to_vec(); + state + .write_log_mut() + .write(&deposit_key, bytes) + .expect("write failed"); + keys_changed.insert(deposit_key); + // event let timeout_event = TimeoutEvent { refund_receiver: data.sender, refund_denom: data.token.denom, @@ -2578,23 +2754,18 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } #[test] @@ -2629,7 +2800,7 @@ mod tests { .expect("write failed"); // commitment let sender = established_address_1(); - let transfer_msg = MsgTransfer { + let transfer_msg = IbcMsgTransfer { port_id_on_a: get_port_id(), chan_id_on_a: get_channel_id(), packet_data: PacketData { @@ -2689,9 +2860,22 @@ mod tests { .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); - // event + // deposit let data = serde_json::from_slice::(&packet.data) .expect("decoding packet data failed"); + let deposit_key = deposit_key(&nam()); + let bytes = Amount::from_str( + data.token.amount.to_string(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap() + .serialize_to_vec(); + state + .write_log_mut() + .write(&deposit_key, bytes) + .expect("write failed"); + keys_changed.insert(deposit_key); + // event let timeout_event = TimeoutEvent { refund_receiver: data.sender, refund_denom: data.token.denom, @@ -2731,22 +2915,419 @@ mod tests { wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); + } + + #[test] + fn test_send_packet_for_nft() { + let mut keys_changed = BTreeSet::new(); + let mut state = init_storage(); + insert_init_client(&mut state); + + // insert an open connection + let conn_key = connection_key(&get_connection_id()); + let conn = get_connection(ConnState::Open); + let bytes = conn.encode_vec(); + state + .write_log_mut() + .write(&conn_key, bytes) + .expect("write failed"); + // insert an Open channel + let channel_key = channel_key(&get_nft_port_id(), &get_channel_id()); + let channel = get_channel_for_nft(ChanState::Open, Order::Unordered); + let bytes = channel.encode_vec(); + state + .write_log_mut() + .write(&channel_key, bytes) + .expect("write failed"); + // init nft + let class_id = get_nft_class_id(); + let token_id = get_nft_id(); + let sender = established_address_1(); + let ibc_token = ibc::storage::ibc_token_for_nft(&class_id, &token_id); + let balance_key = balance_key(&ibc_token, &sender); + let amount = Amount::from_u64(1); + state + .write_log_mut() + .write(&balance_key, amount.serialize_to_vec()) + .expect("write failed"); + // nft class + let class = dummy_nft_class(); + let class_key = ibc::storage::nft_class_key(&class_id); + state + .write_log_mut() + .write(&class_key, class.serialize_to_vec()) + .expect("write failed"); + // nft metadata + let metadata = dummy_nft_metadata(); + let metadata_key = ibc::storage::nft_metadata_key(&class_id, &token_id); + state + .write_log_mut() + .write(&metadata_key, metadata.serialize_to_vec()) + .expect("write failed"); + + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); + // for next block + state + .in_mem_mut() + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + state + .in_mem_mut() + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + + // prepare data + let msg = IbcMsgNftTransfer { + port_id_on_a: get_nft_port_id(), + chan_id_on_a: get_channel_id(), + packet_data: NftPacketData { + class_id, + class_uri: None, + class_data: None, + token_ids: TokenIds(vec![token_id]), + token_uris: None, + token_data: None, + sender: sender.to_string().into(), + receiver: "receiver".to_string().into(), + memo: Some("memo".to_string().into()), + }, + timeout_height_on_b: TimeoutHeight::At(Height::new(0, 10).unwrap()), + timeout_timestamp_on_b: Timestamp::none(), + }; + + // the sequence send + let seq_key = + next_sequence_send_key(&get_nft_port_id(), &get_channel_id()); + let sequence = get_next_seq(&state, &seq_key); + state + .write_log_mut() + .write(&seq_key, (u64::from(sequence) + 1).to_be_bytes().to_vec()) + .expect("write failed"); + keys_changed.insert(seq_key); + // packet commitment + let packet = nft_packet_from_message( + &msg, + sequence, + &get_channel_counterparty_for_nft(), ); + let commitment_key = + commitment_key(&msg.port_id_on_a, &msg.chan_id_on_a, sequence); + let commitment = commitment(&packet); + let bytes = commitment.into_vec(); + state + .write_log_mut() + .write(&commitment_key, bytes) + .expect("write failed"); + keys_changed.insert(commitment_key); + // withdraw + let withdraw_key = withdraw_key(&ibc_token); + let bytes = Amount::from_u64(1).serialize_to_vec(); + state + .write_log_mut() + .write(&withdraw_key, bytes) + .expect("write failed"); + keys_changed.insert(withdraw_key); + // event + let transfer_event = NftTransferEvent { + sender: msg.packet_data.sender.clone(), + receiver: msg.packet_data.receiver.clone(), + class: msg.packet_data.class_id.clone(), + tokens: msg.packet_data.token_ids.clone(), + memo: msg.packet_data.memo.clone().unwrap_or_default(), + }; + let event = RawIbcEvent::Module(ModuleEvent::from(transfer_event)); + state + .write_log_mut() + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::SendPacket(SendPacket::new( + packet, + Order::Unordered, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(MessageEvent::Channel); + state + .write_log_mut() + .emit_ibc_event(message_event.try_into().unwrap()); + state + .write_log_mut() + .emit_ibc_event(event.try_into().unwrap()); + + let tx_index = TxIndex::default(); + let tx_code = vec![]; + let tx_data = MsgNftTransfer { + message: msg, + transfer: None, + } + .serialize_to_vec(); + + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); + tx.add_code(tx_code, None) + .add_serialized_data(tx_data) + .sign_wrapper(keypair_1()); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + let ibc = Ibc { ctx }; + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); + } + + #[test] + fn test_recv_packet_for_nft() { + let mut keys_changed = BTreeSet::new(); + let mut state = init_storage(); + insert_init_client(&mut state); + + // insert an open connection + let conn_key = connection_key(&get_connection_id()); + let conn = get_connection(ConnState::Open); + let bytes = conn.encode_vec(); + state + .write_log_mut() + .write(&conn_key, bytes) + .expect("write failed"); + // insert an open channel + let channel_key = channel_key(&get_nft_port_id(), &get_channel_id()); + let channel = get_channel_for_nft(ChanState::Open, Order::Unordered); + let bytes = channel.encode_vec(); + state + .write_log_mut() + .write(&channel_key, bytes) + .expect("write failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); + // for next block + state + .in_mem_mut() + .set_header(get_dummy_header()) + .expect("Setting a dummy header shouldn't fail"); + state + .in_mem_mut() + .begin_block(BlockHash::default(), BlockHeight(2)) + .unwrap(); + + // prepare data + let sender = established_address_1(); + let receiver = established_address_2(); + let class = dummy_nft_class(); + let metadata = dummy_nft_metadata(); + let transfer_msg = IbcMsgNftTransfer { + port_id_on_a: get_nft_port_id(), + chan_id_on_a: get_channel_id(), + packet_data: NftPacketData { + class_id: class.class_id.clone(), + class_uri: class.class_uri.clone(), + class_data: class.class_data, + token_ids: TokenIds(vec![metadata.token_id.clone()]), + token_uris: Some(vec![metadata.token_uri.unwrap()]), + token_data: Some(vec![metadata.token_data.unwrap()]), + sender: sender.to_string().into(), + receiver: receiver.to_string().into(), + memo: Some("memo".to_string().into()), + }, + timeout_height_on_b: TimeoutHeight::At(Height::new(0, 10).unwrap()), + timeout_timestamp_on_b: Timestamp::none(), + }; + let counterparty = get_channel_counterparty_for_nft(); + let packet = + nft_packet_from_message(&transfer_msg, 1.into(), &counterparty); + let msg = MsgRecvPacket { + packet: packet.clone(), + proof_commitment_on_a: dummy_proof(), + proof_height_on_a: Height::new(0, 1).unwrap(), + signer: "account0".to_string().into(), + }; + + // the sequence send + let receipt_key = receipt_key( + &msg.packet.port_id_on_b, + &msg.packet.chan_id_on_b, + msg.packet.seq_on_a, + ); + let bytes = [1_u8].to_vec(); + state + .write_log_mut() + .write(&receipt_key, bytes) + .expect("write failed"); + keys_changed.insert(receipt_key); + // packet commitment + let ack_key = ack_key( + &packet.port_id_on_b, + &packet.chan_id_on_b, + msg.packet.seq_on_a, + ); + let transfer_ack = + AcknowledgementStatus::success(nft_types::ack_success_b64()); + let acknowledgement: Acknowledgement = transfer_ack.into(); + let bytes = sha2::Sha256::digest(acknowledgement.as_bytes()).to_vec(); + state + .write_log_mut() + .write(&ack_key, bytes) + .expect("write failed"); + keys_changed.insert(ack_key); + // trace + let mut class_id = transfer_msg.packet_data.class_id.clone(); + class_id.add_trace_prefix(nft_types::TracePrefix::new( + packet.port_id_on_b.clone(), + packet.chan_id_on_b.clone(), + )); + let token_id = transfer_msg.packet_data.token_ids.0.first().unwrap(); + let ibc_trace = format!("{class_id}/{token_id}"); + let trace_hash = calc_hash(&ibc_trace); + let trace_key = ibc_trace_key(receiver.to_string(), &trace_hash); + let bytes = ibc_trace.serialize_to_vec(); + state + .write_log_mut() + .write(&trace_key, bytes) + .expect("write failed"); + keys_changed.insert(trace_key); + let trace_key = ibc_trace_key(token_id, &trace_hash); + let bytes = ibc_trace.serialize_to_vec(); + state + .write_log_mut() + .write(&trace_key, bytes) + .expect("write failed"); + keys_changed.insert(trace_key); + // NFT class + let class_key = nft_class_key(&class_id); + let mut class = dummy_nft_class(); + class.class_id = class_id.clone(); + let bytes = class.serialize_to_vec(); + state + .write_log_mut() + .write(&class_key, bytes) + .expect("write failed"); + keys_changed.insert(class_key); + // NFT metadata + let metadata_key = nft_metadata_key(&class_id, token_id); + let mut metadata = dummy_nft_metadata(); + metadata.class_id = class_id.clone(); + let bytes = metadata.serialize_to_vec(); + state + .write_log_mut() + .write(&metadata_key, bytes) + .expect("write failed"); + keys_changed.insert(metadata_key); + // mint + let ibc_token = ibc_token(&ibc_trace); + let mint_key = mint_amount_key(&ibc_token); + let bytes = Amount::from_u64(1).serialize_to_vec(); + state + .write_log_mut() + .write(&mint_key, bytes) + .expect("write failed"); + keys_changed.insert(mint_key); + // deposit + let deposit_key = deposit_key(&ibc_token); + let bytes = Amount::from_u64(1).serialize_to_vec(); + state + .write_log_mut() + .write(&deposit_key, bytes) + .expect("write failed"); + keys_changed.insert(deposit_key); + // event + let recv_event = NftRecvEvent { + sender: sender.to_string().into(), + receiver: receiver.to_string().into(), + class: transfer_msg.packet_data.class_id.clone(), + tokens: TokenIds(vec![token_id.clone()]), + memo: "memo".to_string().into(), + success: true, + }; + let event = RawIbcEvent::Module(ModuleEvent::from(recv_event)); + state + .write_log_mut() + .emit_ibc_event(event.try_into().unwrap()); + let trace_event = TokenTraceEvent { + trace_hash: Some(trace_hash), + class: class_id, + token: token_id.clone(), + }; + let event = RawIbcEvent::Module(ModuleEvent::from(trace_event)); + state + .write_log_mut() + .emit_ibc_event(event.try_into().unwrap()); + let event = RawIbcEvent::ReceivePacket(ReceivePacket::new( + msg.packet.clone(), + Order::Unordered, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(MessageEvent::Channel); + state + .write_log_mut() + .emit_ibc_event(message_event.try_into().unwrap()); + state + .write_log_mut() + .emit_ibc_event(event.try_into().unwrap()); + let event = + RawIbcEvent::WriteAcknowledgement(WriteAcknowledgement::new( + packet, + acknowledgement, + get_connection_id(), + )); + let message_event = RawIbcEvent::Message(MessageEvent::Channel); + state + .write_log_mut() + .emit_ibc_event(message_event.try_into().unwrap()); + state + .write_log_mut() + .emit_ibc_event(event.try_into().unwrap()); + + let tx_index = TxIndex::default(); + let tx_code = vec![]; + let mut tx_data = vec![]; + msg.to_any().encode(&mut tx_data).expect("encoding failed"); + + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); + tx.add_code(tx_code, None) + .add_serialized_data(tx_data) + .sign_wrapper(keypair_1()); + + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + + let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + let ibc = Ibc { ctx }; + assert_matches!(ibc.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); } } diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index 874bb6a464..ad8989a628 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -1,7 +1,7 @@ //! MASP native VP use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::BTreeSet; use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; @@ -11,9 +11,10 @@ use masp_primitives::transaction::components::I128Sum; use masp_primitives::transaction::Transaction; use namada_core::address::Address; use namada_core::address::InternalAddress::Masp; +use namada_core::booleans::BoolResultUnitExt; +use namada_core::collections::{HashMap, HashSet}; use namada_core::masp::encode_asset_type; use namada_core::storage::{IndexedTx, Key}; -use namada_gas::MASP_VERIFY_SHIELDED_TX_GAS; use namada_sdk::masp::verify_shielded_tx; use namada_state::{OptionExt, ResultExt, StateRead}; use namada_token::read_denom; @@ -40,8 +41,8 @@ use crate::vm::WasmCacheAccess; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(native_vp::Error), + #[error("MASP VP error: Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), } /// MASP VP result @@ -74,18 +75,20 @@ where &self, keys_changed: &BTreeSet, transaction: &Transaction, - ) -> Result { + ) -> Result<()> { let mut revealed_nullifiers = HashSet::new(); let shielded_spends = match transaction.sapling_bundle() { Some(bundle) if !bundle.shielded_spends.is_empty() => { &bundle.shielded_spends } _ => { - tracing::debug!( + let error = native_vp::Error::new_const( "Missing expected spend descriptions in shielded \ - transaction" - ); - return Ok(false); + transaction", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } }; @@ -94,22 +97,29 @@ where if self.ctx.has_key_pre(&nullifier_key)? || revealed_nullifiers.contains(&nullifier_key) { - tracing::debug!( - "MASP double spending attempt, the nullifier {:#?} has \ + let error = native_vp::Error::new_alloc(format!( + "MASP double spending attempt, the nullifier {:?} has \ already been revealed previously", - description.nullifier.0 - ); - return Ok(false); + description.nullifier.0, + )) + .into(); + tracing::debug!("{error}"); + return Err(error); } // Check that the nullifier is indeed committed (no temp write // and no delete) and carries no associated data (the latter not // strictly necessary for validation, but we don't expect any // value for this key anyway) - match self.ctx.read_bytes_post(&nullifier_key)? { - Some(value) if value.is_empty() => (), - _ => return Ok(false), - } + self.ctx + .read_bytes_post(&nullifier_key)? + .is_some_and(|value| value.is_empty()) + .ok_or_else(|| { + Error::NativeVpError(native_vp::Error::new_const( + "The nullifier should have been committed with no \ + associated data", + )) + })?; revealed_nullifiers.insert(nullifier_key); } @@ -118,15 +128,17 @@ where keys_changed.iter().filter(|key| is_masp_nullifier_key(key)) { if !revealed_nullifiers.contains(nullifier_key) { - tracing::debug!( + let error = native_vp::Error::new_alloc(format!( "An unexpected MASP nullifier key {nullifier_key} has \ been revealed by the transaction" - ); - return Ok(false); + )) + .into(); + tracing::debug!("{error}"); + return Err(error); } } - Ok(true) + Ok(()) } // Check that a transaction carrying output descriptions correctly updates @@ -134,7 +146,7 @@ where fn valid_note_commitment_update( &self, transaction: &Transaction, - ) -> Result { + ) -> Result<()> { // Check that the merkle tree in storage has been correctly updated with // the output descriptions cmu let tree_key = masp_commitment_tree_key(); @@ -165,28 +177,33 @@ where // This verifies that all and only the necessary notes have been // appended to the tree if previous_tree != post_tree { - tracing::debug!("The note commitment tree was incorrectly updated"); - return Ok(false); + let error = Error::NativeVpError(native_vp::Error::SimpleMessage( + "The note commitment tree was incorrectly updated", + )); + tracing::debug!("{error}"); + return Err(error); } - Ok(true) + Ok(()) } // Check that the spend descriptions anchors of a transaction are valid fn valid_spend_descriptions_anchor( &self, transaction: &Transaction, - ) -> Result { + ) -> Result<()> { let shielded_spends = match transaction.sapling_bundle() { Some(bundle) if !bundle.shielded_spends.is_empty() => { &bundle.shielded_spends } _ => { - tracing::debug!( - "Missing expected spend descriptions in shielded \ - transaction" - ); - return Ok(false); + let error = + Error::NativeVpError(native_vp::Error::SimpleMessage( + "Missing expected spend descriptions in shielded \ + transaction", + )); + tracing::debug!("{error}"); + return Err(error); } }; @@ -195,21 +212,23 @@ where // Check if the provided anchor was published before if !self.ctx.has_key_pre(&anchor_key)? { - tracing::debug!( - "Spend description refers to an invalid anchor" - ); - return Ok(false); + let error = + Error::NativeVpError(native_vp::Error::SimpleMessage( + "Spend description refers to an invalid anchor", + )); + tracing::debug!("{error}"); + return Err(error); } } - Ok(true) + Ok(()) } // Check that the convert descriptions anchors of a transaction are valid fn valid_convert_descriptions_anchor( &self, transaction: &Transaction, - ) -> Result { + ) -> Result<()> { if let Some(bundle) = transaction.sapling_bundle() { if !bundle.shielded_converts.is_empty() { let anchor_key = masp_convert_anchor_key(); @@ -226,16 +245,20 @@ where if namada_core::hash::Hash(description.anchor.to_bytes()) != expected_anchor { - tracing::debug!( - "Convert description refers to an invalid anchor" + let error = Error::NativeVpError( + native_vp::Error::SimpleMessage( + "Convert description refers to an invalid \ + anchor", + ), ); - return Ok(false); + tracing::debug!("{error}"); + return Err(error); } } } } - Ok(true) + Ok(()) } fn validate_state_and_get_transfer_data( @@ -267,7 +290,7 @@ where .ctx .read_post::(pin_keys.first().unwrap())? { - Some(IndexedTx { height, index }) + Some(IndexedTx { height, index, .. }) if height == self.ctx.get_block_height()? && index == self.ctx.get_tx_index()? => {} Some(_) => { @@ -407,7 +430,7 @@ where tx_data: &Tx, keys_changed: &BTreeSet, _verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<()> { let epoch = self.ctx.get_block_epoch()?; let conversion_state = self.ctx.state.in_mem().get_conversion_state(); let shielded_tx = self.ctx.get_shielded_action(tx_data)?; @@ -415,8 +438,11 @@ where if u64::from(self.ctx.get_block_height()?) > u64::from(shielded_tx.expiry_height()) { - tracing::debug!("MASP transaction is expired"); - return Ok(false); + let error = + native_vp::Error::new_const("MASP transaction is expired") + .into(); + tracing::debug!("{error}"); + return Err(error); } let mut transparent_tx_pool = I128Sum::zero(); @@ -433,12 +459,17 @@ where )?; if transfer.source != Address::Internal(Masp) { - // No shielded spends nor shielded converts are allowed + // No shielded spends nor shielded conversions are allowed if shielded_tx.sapling_bundle().is_some_and(|bundle| { !(bundle.shielded_spends.is_empty() && bundle.shielded_converts.is_empty()) }) { - return Ok(false); + let error = native_vp::Error::new_const( + "No shielded spends nor shielded conversions are allowed", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } let transp_bundle = @@ -453,9 +484,14 @@ where // To help recognize asset types not in the conversion tree let unepoched_tokens = unepoched_tokens(&transfer.token, denom)?; // Handle transparent input - // The following boundary conditions must be satisfied + // + // The following boundary conditions must be satisfied: + // // 1. Total of transparent input values equals containing transfer - // amount 2. Asset type must be properly derived + // amount + // + // 2. Asset type must be properly derived + // // 3. Public key must be the hash of the source for vin in &transp_bundle.vin { // Non-masp sources add to the transparent tx pool @@ -474,11 +510,13 @@ where // Satisfies 3. if <[u8; 20]>::from(hash) != vin.address.0 { - tracing::debug!( - "the public key of the output account does not match \ - the transfer target" - ); - return Ok(false); + let error = native_vp::Error::new_const( + "The public key of the output account does not match \ + the transfer target", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } match conversion_state.assets.get(&vin.asset_type) { // Satisfies 2. Note how the asset's epoch must be equal to @@ -527,8 +565,12 @@ where // If such an epoched asset type is available in the // conversion tree, then we must reject the // unepoched variant - tracing::debug!("epoch is missing from asset type"); - return Ok(false); + let error = native_vp::Error::new_const( + "Epoch is missing from asset type", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } else { // Otherwise note the contribution to this // trransparent input @@ -548,12 +590,22 @@ where } } // unrecognized asset - _ => return Ok(false), + _ => { + return Err(native_vp::Error::new_alloc(format!( + "Unrecognized asset {}", + vin.asset_type + )) + .into()); + } }; } // Satisfies 1. if total_in_values != transfer.amount { - return Ok(false); + return Err(native_vp::Error::new_const( + "Total amount of transparent input values was not the \ + same as the transferred amount", + ) + .into()); } } else { // Handle shielded input @@ -566,33 +618,36 @@ where // nullifier is being revealed by the tx if let Some(transp_bundle) = shielded_tx.transparent_bundle() { if !transp_bundle.vin.is_empty() { - tracing::debug!( + let error = native_vp::Error::new_alloc(format!( "Transparent input to a transaction from the masp \ must be 0 but is {}", transp_bundle.vin.len() - ); - return Ok(false); + )) + .into(); + tracing::debug!("{error}"); + return Err(error); } } - if !(self.valid_spend_descriptions_anchor(&shielded_tx)? - && self.valid_convert_descriptions_anchor(&shielded_tx)? - && self.valid_nullifiers_reveal(keys_changed, &shielded_tx)?) - { - return Ok(false); - } + + self.valid_spend_descriptions_anchor(&shielded_tx)?; + self.valid_convert_descriptions_anchor(&shielded_tx)?; + self.valid_nullifiers_reveal(keys_changed, &shielded_tx)?; } // The transaction must correctly update the note commitment tree // in storage with the new output descriptions - if !self.valid_note_commitment_update(&shielded_tx)? { - return Ok(false); - } + self.valid_note_commitment_update(&shielded_tx)?; if transfer.target != Address::Internal(Masp) { // Handle transparent output - // The following boundary conditions must be satisfied + // + // The following boundary conditions must be satisfied: + // // 1. Total of transparent output values equals containing transfer - // amount 2. Asset type must be properly derived + // amount + // + // 2. Asset type must be properly derived + // // 3. Public key must be the hash of the target let transp_bundle = @@ -625,11 +680,13 @@ where // Satisfies 3. if <[u8; 20]>::from(hash) != out.address.0 { - tracing::debug!( - "the public key of the output account does not match \ - the transfer target" - ); - return Ok(false); + let error = native_vp::Error::new_const( + "The public key of the output account does not match \ + the transfer target", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } match conversion_state.assets.get(&out.asset_type) { // Satisfies 2. @@ -673,12 +730,22 @@ where })?; } // unrecognized asset - _ => return Ok(false), + _ => { + return Err(native_vp::Error::new_alloc(format!( + "Unrecognized asset {}", + out.asset_type + )) + .into()); + } }; } // Satisfies 1. if total_out_values != transfer.amount { - return Ok(false); + return Err(native_vp::Error::new_const( + "Total amount of transparent output values was not the \ + same as the transferred amount", + ) + .into()); } } else { // Handle shielded output @@ -689,54 +756,62 @@ where // Satisfies 1. if let Some(transp_bundle) = shielded_tx.transparent_bundle() { if !transp_bundle.vout.is_empty() { - tracing::debug!( + let error = native_vp::Error::new_alloc(format!( "Transparent output to a transaction from the masp \ must be 0 but is {}", transp_bundle.vout.len() - ); - return Ok(false); + )) + .into(); + tracing::debug!("{error}"); + return Err(error); } } // Staisfies 2. if shielded_tx .sapling_bundle() + // NOTE: when resolving git merge conflicts (you will, trust + // me), do **NOT** take this branch, because it + // is buggy. if the sapling bundle is empty, + // this branch is not executed, and in that case + // there were no shielded outputs. .is_some_and(|bundle| bundle.shielded_outputs.is_empty()) { - return Ok(false); + let error = native_vp::Error::new_const( + "There were no shielded outputs in the sapling bundle", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } } match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { None | Some(Ordering::Less) => { - tracing::debug!( + let error = native_vp::Error::new_const( "Transparent transaction value pool must be nonnegative. \ Violation may be caused by transaction being constructed \ - in previous epoch. Maybe try again." - ); + in previous epoch. Maybe try again.", + ) + .into(); + tracing::debug!("{error}"); // Section 3.4: The remaining value in the transparent // transaction value pool MUST be nonnegative. - return Ok(false); + return Err(error); } Some(Ordering::Greater) => { - tracing::debug!( - "Transaction fees cannot be paid inside MASP transaction." - ); - return Ok(false); + let error = native_vp::Error::new_const( + "Transaction fees cannot be paid inside MASP transaction.", + ) + .into(); + tracing::debug!("{error}"); + return Err(error); } _ => {} } - // Verify the proofs and charge the gas for the expensive execution - self.ctx - .charge_gas(MASP_VERIFY_SHIELDED_TX_GAS) - .map_err(Error::NativeVpError)?; - Ok(verify_shielded_tx(&shielded_tx)) - } -} - -impl From for Error { - fn from(err: native_vp::Error) -> Self { - Self::NativeVpError(err) + // Verify the proofs + verify_shielded_tx(&shielded_tx, |gas| self.ctx.charge_gas(gas)) + .map_err(Error::NativeVpError) } } diff --git a/crates/namada/src/ledger/native_vp/mod.rs b/crates/namada/src/ledger/native_vp/mod.rs index 1a6dc2e517..b5375d5702 100644 --- a/crates/namada/src/ledger/native_vp/mod.rs +++ b/crates/namada/src/ledger/native_vp/mod.rs @@ -12,10 +12,8 @@ use std::collections::BTreeSet; use std::fmt::Debug; use borsh::BorshDeserialize; -use eyre::WrapErr; use namada_core::storage; use namada_core::storage::Epochs; -use namada_core::validity_predicate::VpSentinel; use namada_gas::GasMetering; use namada_tx::Tx; pub use namada_vp_env::VpEnv; @@ -48,7 +46,7 @@ pub trait NativeVp { tx_data: &Tx, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> std::result::Result; + ) -> std::result::Result<(), Self::Error>; } /// A validity predicate's host context. @@ -68,8 +66,6 @@ where pub iterators: RefCell::D>>, /// VP gas meter. pub gas_meter: &'a RefCell, - /// Errors sentinel - pub sentinel: &'a RefCell, /// Read-only state access. pub state: &'a S, /// The transaction code is used for signature verification @@ -98,7 +94,7 @@ where S: StateRead, CA: WasmCacheAccess, { - ctx: &'view Ctx<'a, S, CA>, + pub(crate) ctx: &'view Ctx<'a, S, CA>, } /// Read access to the posterior storage (state after tx execution) via @@ -125,7 +121,6 @@ where tx: &'a Tx, tx_index: &'a TxIndex, gas_meter: &'a RefCell, - sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, #[cfg(feature = "wasm-runtime")] @@ -136,7 +131,6 @@ where state, iterators: RefCell::new(PrefixIterators::default()), gas_meter, - sentinel, tx, tx_index, keys_changed, @@ -173,23 +167,13 @@ where &self, key: &storage::Key, ) -> Result>, state::StorageError> { - vp_host_fns::read_pre( - self.ctx.gas_meter, - self.ctx.state, - key, - self.ctx.sentinel, - ) - .into_storage_result() + vp_host_fns::read_pre(self.ctx.gas_meter, self.ctx.state, key) + .into_storage_result() } fn has_key(&self, key: &storage::Key) -> Result { - vp_host_fns::has_key_pre( - self.ctx.gas_meter, - self.ctx.state, - key, - self.ctx.sentinel, - ) - .into_storage_result() + vp_host_fns::has_key_pre(self.ctx.gas_meter, self.ctx.state, key) + .into_storage_result() } fn iter_prefix<'iter>( @@ -201,7 +185,6 @@ where self.ctx.state.write_log(), self.ctx.state.db(), prefix, - self.ctx.sentinel, ) .into_storage_result() } @@ -213,12 +196,8 @@ where &'iter self, iter: &mut Self::PrefixIter<'iter>, ) -> Result)>, state::StorageError> { - vp_host_fns::iter_next::<::D>( - self.ctx.gas_meter, - iter, - self.ctx.sentinel, - ) - .into_storage_result() + vp_host_fns::iter_next::<::D>(self.ctx.gas_meter, iter) + .into_storage_result() } fn get_chain_id(&self) -> Result { @@ -269,23 +248,13 @@ where &self, key: &storage::Key, ) -> Result>, state::StorageError> { - vp_host_fns::read_post( - self.ctx.gas_meter, - self.ctx.state, - key, - self.ctx.sentinel, - ) - .into_storage_result() + vp_host_fns::read_post(self.ctx.gas_meter, self.ctx.state, key) + .into_storage_result() } fn has_key(&self, key: &storage::Key) -> Result { - vp_host_fns::has_key_post( - self.ctx.gas_meter, - self.ctx.state, - key, - self.ctx.sentinel, - ) - .into_storage_result() + vp_host_fns::has_key_post(self.ctx.gas_meter, self.ctx.state, key) + .into_storage_result() } fn iter_prefix<'iter>( @@ -297,7 +266,6 @@ where self.ctx.state.write_log(), self.ctx.state.db(), prefix, - self.ctx.sentinel, ) .into_storage_result() } @@ -309,12 +277,8 @@ where &'iter self, iter: &mut Self::PrefixIter<'iter>, ) -> Result)>, state::StorageError> { - vp_host_fns::iter_next::<::D>( - self.ctx.gas_meter, - iter, - self.ctx.sentinel, - ) - .into_storage_result() + vp_host_fns::iter_next::<::D>(self.ctx.gas_meter, iter) + .into_storage_result() } fn get_chain_id(&self) -> Result { @@ -374,7 +338,7 @@ where &self, key: &Key, ) -> Result, state::StorageError> { - vp_host_fns::read_temp(self.gas_meter, self.state, key, self.sentinel) + vp_host_fns::read_temp(self.gas_meter, self.state, key) .map(|data| data.and_then(|t| T::try_from_slice(&t[..]).ok())) .into_storage_result() } @@ -383,17 +347,17 @@ where &self, key: &Key, ) -> Result>, state::StorageError> { - vp_host_fns::read_temp(self.gas_meter, self.state, key, self.sentinel) + vp_host_fns::read_temp(self.gas_meter, self.state, key) .into_storage_result() } fn get_chain_id(&self) -> Result { - vp_host_fns::get_chain_id(self.gas_meter, self.state, self.sentinel) + vp_host_fns::get_chain_id(self.gas_meter, self.state) .into_storage_result() } fn get_block_height(&self) -> Result { - vp_host_fns::get_block_height(self.gas_meter, self.state, self.sentinel) + vp_host_fns::get_block_height(self.gas_meter, self.state) .into_storage_result() } @@ -401,37 +365,32 @@ where &self, height: BlockHeight, ) -> Result, state::StorageError> { - vp_host_fns::get_block_header( - self.gas_meter, - self.state, - height, - self.sentinel, - ) - .into_storage_result() + vp_host_fns::get_block_header(self.gas_meter, self.state, height) + .into_storage_result() } fn get_block_hash(&self) -> Result { - vp_host_fns::get_block_hash(self.gas_meter, self.state, self.sentinel) + vp_host_fns::get_block_hash(self.gas_meter, self.state) .into_storage_result() } fn get_block_epoch(&self) -> Result { - vp_host_fns::get_block_epoch(self.gas_meter, self.state, self.sentinel) + vp_host_fns::get_block_epoch(self.gas_meter, self.state) .into_storage_result() } fn get_tx_index(&self) -> Result { - vp_host_fns::get_tx_index(self.gas_meter, self.tx_index, self.sentinel) + vp_host_fns::get_tx_index(self.gas_meter, self.tx_index) .into_storage_result() } fn get_native_token(&self) -> Result { - vp_host_fns::get_native_token(self.gas_meter, self.state, self.sentinel) + vp_host_fns::get_native_token(self.gas_meter, self.state) .into_storage_result() } fn get_pred_epochs(&self) -> state::StorageResult { - vp_host_fns::get_pred_epochs(self.gas_meter, self.state, self.sentinel) + vp_host_fns::get_pred_epochs(self.gas_meter, self.state) .into_storage_result() } @@ -452,7 +411,6 @@ where self.state.write_log(), self.state.db(), prefix, - self.sentinel, ) .into_storage_result() } @@ -461,7 +419,7 @@ where &self, vp_code_hash: Hash, input_data: Tx, - ) -> Result { + ) -> Result<(), state::StorageError> { #[cfg(feature = "wasm-runtime")] { use std::marker::PhantomData; @@ -478,6 +436,7 @@ where let mut iterators: PrefixIterators<'_, ::D> = PrefixIterators::default(); let mut result_buffer: Option> = None; + let mut yielded_value: Option> = None; let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let ctx = VpCtx::new( @@ -486,27 +445,24 @@ where self.state.in_mem(), self.state.db(), self.gas_meter, - self.sentinel, self.tx, self.tx_index, &mut iterators, self.verifiers, &mut result_buffer, + &mut yielded_value, self.keys_changed, &eval_runner, &mut vp_wasm_cache, ); - match eval_runner.eval_native_result(ctx, vp_code_hash, input_data) - { - Ok(result) => Ok(result), - Err(err) => { + eval_runner + .eval_native_result(ctx, vp_code_hash, input_data) + .inspect_err(|err| { tracing::warn!( - "VP eval from a native VP failed with: {}", - err + "VP eval from a native VP failed with: {err}", ); - Ok(false) - } - } + }) + .into_storage_result() } #[cfg(not(feature = "wasm-runtime"))] @@ -527,7 +483,7 @@ where } fn get_tx_code_hash(&self) -> Result, state::StorageError> { - vp_host_fns::get_tx_code_hash(self.gas_meter, self.tx, self.sentinel) + vp_host_fns::get_tx_code_hash(self.gas_meter, self.tx) .into_storage_result() } @@ -568,28 +524,30 @@ where } } +impl namada_tx::action::Read for Ctx<'_, S, CA> +where + S: StateRead, + CA: 'static + WasmCacheAccess, +{ + type Err = Error; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result, Self::Err> { + VpEnv::read_temp(self, key) + } +} + /// A convenience trait for reading and automatically deserializing a value from /// storage pub trait StorageReader { - /// If `maybe_bytes` is not empty, return an `Option` containing the - /// deserialization of the bytes inside `maybe_bytes`. - fn deserialize_if_present( - maybe_bytes: Option>, - ) -> eyre::Result> { - maybe_bytes - .map(|ref bytes| { - T::try_from_slice(bytes) - .wrap_err_with(|| "couldn't deserialize".to_string()) - }) - .transpose() - } - /// Storage read prior state (before tx execution). It will try to read from /// the storage. fn read_pre_value( &self, key: &Key, - ) -> eyre::Result>; + ) -> Result, state::StorageError>; /// Storage read posterior state (after tx execution). It will try to read /// from the write log first and if no entry found then from the @@ -597,7 +555,35 @@ pub trait StorageReader { fn read_post_value( &self, key: &Key, - ) -> eyre::Result>; + ) -> Result, state::StorageError>; + + /// Calls `read_pre_value`, and returns an error on `Ok(None)`. + fn must_read_pre_value( + &self, + key: &Key, + ) -> Result { + match self.read_pre_value(key) { + Ok(None) => Err(state::StorageError::AllocMessage(format!( + "Expected a value to be present in the key {key}" + ))), + Ok(Some(x)) => Ok(x), + Err(err) => Err(err), + } + } + + /// Calls `read_post_value`, and returns an error on `Ok(None)`. + fn must_read_post_value( + &self, + key: &Key, + ) -> Result { + match self.read_post_value(key) { + Ok(None) => Err(state::StorageError::AllocMessage(format!( + "Expected a value to be present in the key {key}" + ))), + Ok(Some(x)) => Ok(x), + Err(err) => Err(err), + } + } } impl<'a, S, CA> StorageReader for &Ctx<'a, S, CA> @@ -607,30 +593,32 @@ where { /// Helper function. After reading posterior state, /// borsh deserialize to specified type - fn read_post_value(&self, key: &Key) -> eyre::Result> + fn read_post_value( + &self, + key: &Key, + ) -> Result, state::StorageError> where T: BorshDeserialize, { - let maybe_bytes = Ctx::read_bytes_post(self, key) - .wrap_err_with(|| format!("couldn't read_bytes_post {}", key))?; - Self::deserialize_if_present(maybe_bytes) + Ctx::read_post(self, key) } /// Helper function. After reading prior state, /// borsh deserialize to specified type - fn read_pre_value(&self, key: &Key) -> eyre::Result> + fn read_pre_value( + &self, + key: &Key, + ) -> Result, state::StorageError> where T: BorshDeserialize, { - let maybe_bytes = Ctx::read_bytes_pre(self, key) - .wrap_err_with(|| format!("couldn't read_bytes_pre {}", key))?; - Self::deserialize_if_present(maybe_bytes) + Ctx::read_pre(self, key) } } #[cfg(any(test, feature = "testing"))] pub(super) mod testing { - use std::collections::HashMap; + use namada_core::collections::HashMap; use super::*; @@ -644,23 +632,21 @@ pub(super) mod testing { fn read_pre_value( &self, key: &Key, - ) -> eyre::Result> { - let bytes = match self.pre.get(key) { - Some(bytes) => bytes.to_owned(), - None => return Ok(None), - }; - Self::deserialize_if_present(Some(bytes)) + ) -> Result, state::StorageError> { + self.pre + .get(key) + .map(|bytes| T::try_from_slice(bytes).into_storage_result()) + .transpose() } fn read_post_value( &self, key: &Key, - ) -> eyre::Result> { - let bytes = match self.post.get(key) { - Some(bytes) => bytes.to_owned(), - None => return Ok(None), - }; - Self::deserialize_if_present(Some(bytes)) + ) -> Result, state::StorageError> { + self.post + .get(key) + .map(|bytes| T::try_from_slice(bytes).into_storage_result()) + .transpose() } } } diff --git a/crates/namada/src/ledger/native_vp/multitoken.rs b/crates/namada/src/ledger/native_vp/multitoken.rs index 1286fa1a67..08676ca949 100644 --- a/crates/namada/src/ledger/native_vp/multitoken.rs +++ b/crates/namada/src/ledger/native_vp/multitoken.rs @@ -1,15 +1,18 @@ //! Native VP for multitokens -use std::collections::{BTreeSet, HashMap}; +use std::collections::BTreeSet; +use namada_core::booleans::BoolResultUnitExt; +use namada_core::collections::HashMap; use namada_governance::is_proposal_accepted; +use namada_parameters::storage::is_native_token_transferable; use namada_state::StateRead; use namada_token::storage_key::is_any_token_parameter_key; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; -use crate::address::{Address, InternalAddress}; +use crate::address::{Address, InternalAddress, GOV, POS}; use crate::ledger::native_vp::{self, Ctx, NativeVp}; use crate::storage::{Key, KeySeg}; use crate::token::storage_key::{ @@ -22,7 +25,7 @@ use crate::vm::WasmCacheAccess; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] + #[error("Multitoken VP error: Native VP error: {0}")] NativeVpError(#[from] native_vp::Error), } @@ -51,17 +54,44 @@ where tx_data: &Tx, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<()> { + let native_token = self.ctx.pre().ctx.get_native_token()?; + let is_native_token_transferable = + is_native_token_transferable(&self.ctx.pre())?; + // Native token can be transferred to `PoS` or `Gov` even if + // `is_native_token_transferable` is false + let is_allowed_inc = |token: &Address, target: &Address| -> bool { + *token != native_token + || is_native_token_transferable + || *target == POS + || *target == GOV + }; + let is_allowed_dec = |token: &Address, target: &Address| -> bool { + *token != native_token + || is_native_token_transferable + || (*target != POS && *target != GOV) + }; + let mut inc_changes: HashMap = HashMap::new(); let mut dec_changes: HashMap = HashMap::new(); let mut inc_mints: HashMap = HashMap::new(); let mut dec_mints: HashMap = HashMap::new(); for key in keys_changed { - if let Some([token, _]) = is_any_token_balance_key(key) { + if let Some([token, owner]) = is_any_token_balance_key(key) { let pre: Amount = self.ctx.read_pre(key)?.unwrap_or_default(); let post: Amount = self.ctx.read_post(key)?.unwrap_or_default(); match post.checked_sub(pre) { Some(diff) => { + if !is_allowed_inc(token, owner) { + tracing::debug!( + "Native token deposit isn't allowed" + ); + return Err(Error::NativeVpError( + native_vp::Error::SimpleMessage( + "Native token deposit isn't allowed", + ), + )); + } let change = inc_changes.entry(token.clone()).or_default(); *change = @@ -74,6 +104,16 @@ where })?; } None => { + if !is_allowed_dec(token, owner) { + tracing::debug!( + "Native token withdraw isn't allowed" + ); + return Err(Error::NativeVpError( + native_vp::Error::SimpleMessage( + "Native token deposit isn't allowed", + ), + )); + } let diff = pre .checked_sub(post) .expect("Underflow shouldn't happen here"); @@ -90,6 +130,17 @@ where } } } else if let Some(token) = is_any_minted_balance_key(key) { + if *token == native_token && !is_native_token_transferable { + tracing::debug!( + "Minting/Burning native token isn't allowed" + ); + return Err(Error::NativeVpError( + native_vp::Error::SimpleMessage( + "Minting/Burning native token isn't allowed", + ), + )); + } + let pre: Amount = self.ctx.read_pre(key)?.unwrap_or_default(); let post: Amount = self.ctx.read_post(key)?.unwrap_or_default(); match post.checked_sub(pre) { @@ -118,13 +169,9 @@ where } } // Check if the minter is set - if !self.is_valid_minter(token, verifiers)? { - return Ok(false); - } + self.is_valid_minter(token, verifiers)?; } else if let Some(token) = is_any_minter_key(key) { - if !self.is_valid_minter(token, verifiers)? { - return Ok(false); - } + self.is_valid_minter(token, verifiers)?; } else if is_any_token_parameter_key(key).is_some() { return self.is_valid_parameter(tx_data); } else if key.segments.first() @@ -134,7 +181,10 @@ where { // Reject when trying to update an unexpected key under // `#Multitoken/...` - return Ok(false); + return Err(native_vp::Error::new_alloc(format!( + "Unexpected change to the multitoken account: {key}" + )) + .into()); } } @@ -144,7 +194,21 @@ where all_tokens.extend(inc_mints.keys().cloned()); all_tokens.extend(dec_mints.keys().cloned()); - Ok(all_tokens.iter().all(|token| { + all_tokens.iter().try_for_each(|token| { + if token.is_internal() + && matches!(token, Address::Internal(InternalAddress::Nut(_))) + && !verifiers.contains(token) + { + // Established address tokens, IbcToken and Erc20 do not have + // VPs themselves, their validation is handled + // by the `Multitoken` internal address, + // but internal token Nut addresses have to verify the transfer + return Err(native_vp::Error::new_alloc(format!( + "Token {token} must verify the tx" + )) + .into()); + } + let inc_change = inc_changes.get(token).cloned().unwrap_or_default(); let dec_change = @@ -152,18 +216,26 @@ where let inc_mint = inc_mints.get(token).cloned().unwrap_or_default(); let dec_mint = dec_mints.get(token).cloned().unwrap_or_default(); - if inc_change >= dec_change && inc_mint >= dec_mint { - inc_change.checked_sub(dec_change) - == inc_mint.checked_sub(dec_mint) - } else if (inc_change < dec_change && inc_mint >= dec_mint) - || (inc_change >= dec_change && inc_mint < dec_mint) - { - false - } else { - dec_change.checked_sub(inc_change) - == dec_mint.checked_sub(inc_mint) - } - })) + let token_changes_are_balanced = + if inc_change >= dec_change && inc_mint >= dec_mint { + inc_change.checked_sub(dec_change) + == inc_mint.checked_sub(dec_mint) + } else if (inc_change < dec_change && inc_mint >= dec_mint) + || (inc_change >= dec_change && inc_mint < dec_mint) + { + false + } else { + dec_change.checked_sub(inc_change) + == dec_mint.checked_sub(inc_mint) + }; + + token_changes_are_balanced.ok_or_else(|| { + native_vp::Error::new_const( + "The transaction's token changes are unbalanced", + ) + .into() + }) + }) } } @@ -177,7 +249,7 @@ where &self, token: &Address, verifiers: &BTreeSet
, - ) -> Result { + ) -> Result<()> { match token { Address::Internal(InternalAddress::IbcToken(_)) => { // Check if the minter is set @@ -187,26 +259,47 @@ where if minter == Address::Internal(InternalAddress::Ibc) => { - Ok(verifiers.contains(&minter)) + verifiers.contains(&minter).ok_or_else(|| { + native_vp::Error::new_const( + "The IBC VP was not triggered", + ) + .into() + }) } - _ => Ok(false), + _ => Err(native_vp::Error::new_const( + "Only the IBC account is able to mint IBC tokens", + ) + .into()), } } - _ => { - // ERC20 and other tokens should not be minted by a wasm - // transaction - Ok(false) - } + _ => Err(native_vp::Error::new_const( + "Only IBC tokens can be minted by a user transaction", + ) + .into()), } } /// Return if the parameter change was done via a governance proposal - pub fn is_valid_parameter(&self, tx: &Tx) -> Result { - match tx.data() { - Some(data) => is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError), - None => Ok(false), - } + pub fn is_valid_parameter(&self, tx: &Tx) -> Result<()> { + tx.data().map_or_else( + || { + Err(native_vp::Error::new_const( + "Token parameter changes require tx data to be present", + ) + .into()) + }, + |data| { + is_proposal_accepted(&self.ctx.pre(), data.as_ref()) + .map_err(Error::NativeVpError)? + .ok_or_else(|| { + native_vp::Error::new_const( + "Token parameter changes can only be performed by \ + a governance proposal that has been accepted", + ) + .into() + }) + }, + ) } } @@ -214,12 +307,14 @@ where mod tests { use std::cell::RefCell; + use assert_matches::assert_matches; use borsh_ext::BorshSerializeExt; - use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; + use namada_parameters::storage::get_native_token_transferable_key; use namada_state::testing::TestState; + use namada_state::StorageWrite; use namada_tx::data::TxType; - use namada_tx::{Code, Data, Section, Signature}; + use namada_tx::{Authorization, Code, Data, Section}; use super::*; use crate::core::address::testing::{ @@ -234,6 +329,12 @@ mod tests { const ADDRESS: Address = Address::Internal(InternalAddress::Multitoken); + fn init_state() -> TestState { + let mut state = TestState::default(); + namada_parameters::init_test_storage(&mut state).unwrap(); + state + } + fn dummy_tx(state: &TestState) -> Tx { let tx_code = vec![]; let tx_data = vec![]; @@ -241,7 +342,7 @@ mod tests { tx.header.chain_id = state.in_mem().chain_id.clone(); tx.set_code(Code::new(tx_code, None)); tx.set_data(Data::new(tx_data)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( tx.sechashes(), [(0, keypair_1())].into_iter().collect(), None, @@ -249,33 +350,44 @@ mod tests { tx } - #[test] - fn test_valid_transfer() { - let mut state = TestState::default(); + fn transfer( + state: &mut TestState, + src: &Address, + dest: &Address, + ) -> BTreeSet { let mut keys_changed = BTreeSet::new(); - let sender = established_address_1(); - let sender_key = balance_key(&nam(), &sender); + let src_key = balance_key(&nam(), src); let amount = Amount::native_whole(100); state - .db_write(&sender_key, amount.serialize_to_vec()) + .db_write(&src_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); state .write_log_mut() - .write(&sender_key, amount.serialize_to_vec()) + .write(&src_key, amount.serialize_to_vec()) .expect("write failed"); - keys_changed.insert(sender_key); - let receiver = established_address_2(); - let receiver_key = balance_key(&nam(), &receiver); + keys_changed.insert(src_key); + + let dest_key = balance_key(&nam(), dest); let amount = Amount::native_whole(10); state .write_log_mut() - .write(&receiver_key, amount.serialize_to_vec()) + .write(&dest_key, amount.serialize_to_vec()) .expect("write failed"); - keys_changed.insert(receiver_key); + keys_changed.insert(dest_key); + + keys_changed + } + + #[test] + fn test_valid_transfer() { + let mut state = init_state(); + let src = established_address_1(); + let dest = established_address_2(); + let keys_changed = transfer(&mut state, &src, &dest); let tx_index = TxIndex::default(); let tx = dummy_tx(&state); @@ -284,55 +396,36 @@ mod tests { )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); - verifiers.insert(sender); - let sentinel = RefCell::new(VpSentinel::default()); + verifiers.insert(src); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_ok()); } #[test] fn test_invalid_transfer() { - let mut state = TestState::default(); - let mut keys_changed = BTreeSet::new(); - - let sender = established_address_1(); - let sender_key = balance_key(&nam(), &sender); - let amount = Amount::native_whole(100); - state - .db_write(&sender_key, amount.serialize_to_vec()) - .expect("write failed"); + let mut state = init_state(); + let src = established_address_1(); + let dest = established_address_2(); + let keys_changed = transfer(&mut state, &src, &dest); - // transfer 10 - let amount = Amount::native_whole(90); - state - .write_log_mut() - .write(&sender_key, amount.serialize_to_vec()) - .expect("write failed"); - keys_changed.insert(sender_key); - let receiver = established_address_2(); - let receiver_key = balance_key(&nam(), &receiver); // receive more than 10 + let dest_key = balance_key(&nam(), &dest); let amount = Amount::native_whole(100); state .write_log_mut() - .write(&receiver_key, amount.serialize_to_vec()) + .write(&dest_key, amount.serialize_to_vec()) .expect("write failed"); - keys_changed.insert(receiver_key); let tx_index = TxIndex::default(); let tx = dummy_tx(&state); @@ -341,29 +434,24 @@ mod tests { )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - !vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_err()); } #[test] fn test_valid_mint() { - let mut state = TestState::default(); + let mut state = init_state(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -404,29 +492,26 @@ mod tests { let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); - let sentinel = RefCell::new(VpSentinel::default()); + // The token must be part of the verifier set (checked by MultitokenVp) + verifiers.insert(token); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_ok()); } #[test] fn test_invalid_mint() { - let mut state = TestState::default(); + let mut state = init_state(); let mut keys_changed = BTreeSet::new(); // mint 100 @@ -465,29 +550,24 @@ mod tests { let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - !vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_err()); } #[test] fn test_no_minter() { - let mut state = TestState::default(); + let mut state = init_state(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -519,29 +599,24 @@ mod tests { )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - !vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_err()); } #[test] fn test_invalid_minter() { - let mut state = TestState::default(); + let mut state = init_state(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -582,29 +657,24 @@ mod tests { let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - !vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_err()); } #[test] fn test_invalid_minter_update() { - let mut state = TestState::default(); + let mut state = init_state(); let mut keys_changed = BTreeSet::new(); let minter_key = minter_key(&nam()); @@ -625,29 +695,24 @@ mod tests { let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - !vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") - ); + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_err()); } #[test] fn test_invalid_key_update() { - let mut state = TestState::default(); + let mut state = init_state(); let mut keys_changed = BTreeSet::new(); let key = Key::from( @@ -669,23 +734,120 @@ mod tests { )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &state, &tx, &tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, ); let vp = MultitokenVp { ctx }; - assert!( - !vp.validate_tx(&tx, &keys_changed, &verifiers) - .expect("validation failed") + assert!(vp.validate_tx(&tx, &keys_changed, &verifiers).is_err()); + } + + #[test] + fn test_native_token_not_transferable() { + let mut state = init_state(); + let src = established_address_1(); + let dest = established_address_2(); + let keys_changed = transfer(&mut state, &src, &dest); + + // disable native token transfer + let key = get_native_token_transferable_key(); + state.write(&key, false).unwrap(); + + let tx_index = TxIndex::default(); + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); + let mut verifiers = BTreeSet::new(); + verifiers.insert(src); + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let vp = MultitokenVp { ctx }; + assert_matches!(vp.validate_tx(&tx, &keys_changed, &verifiers), Err(_)); + } + + #[test] + fn test_native_token_transferable_to_pos() { + let mut state = init_state(); + let src = established_address_1(); + let dest = POS; + let keys_changed = transfer(&mut state, &src, &dest); + + // disable native token transfer + let key = get_native_token_transferable_key(); + state.write(&key, false).unwrap(); + + let tx_index = TxIndex::default(); + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); + let mut verifiers = BTreeSet::new(); + verifiers.insert(src); + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); + + let vp = MultitokenVp { ctx }; + assert_matches!(vp.validate_tx(&tx, &keys_changed, &verifiers), Ok(_)); + } + + #[test] + fn test_native_token_transferable_from_gov() { + let mut state = init_state(); + let src = GOV; + let dest = POS; + let keys_changed = transfer(&mut state, &src, &dest); + + // disable native token transfer + let key = get_native_token_transferable_key(); + state.write(&key, false).unwrap(); + + let tx_index = TxIndex::default(); + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); + let mut verifiers = BTreeSet::new(); + verifiers.insert(src); + let ctx = Ctx::new( + &ADDRESS, + &state, + &tx, + &tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, ); + + let vp = MultitokenVp { ctx }; + assert_matches!(vp.validate_tx(&tx, &keys_changed, &verifiers), Err(_)); } } diff --git a/crates/namada/src/ledger/native_vp/parameters.rs b/crates/namada/src/ledger/native_vp/parameters.rs index 4fe0f11f76..7a039b4336 100644 --- a/crates/namada/src/ledger/native_vp/parameters.rs +++ b/crates/namada/src/ledger/native_vp/parameters.rs @@ -3,6 +3,7 @@ use std::collections::BTreeSet; use namada_core::address::Address; +use namada_core::booleans::BoolResultUnitExt; use namada_core::storage::Key; use namada_state::StateRead; use namada_tx::Tx; @@ -14,8 +15,8 @@ use crate::vm::WasmCacheAccess; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(native_vp::Error), + #[error("Parameters VP error: Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), } /// Parameters functions result @@ -43,13 +44,16 @@ where tx_data: &Tx, keys_changed: &BTreeSet, _verifiers: &BTreeSet
, - ) -> Result { - let result = keys_changed.iter().all(|key| { + ) -> Result<()> { + keys_changed.iter().try_for_each(|key| { let key_type: KeyType = key.into(); let data = if let Some(data) = tx_data.data() { data } else { - return false; + return Err(native_vp::Error::new_const( + "Token parameter changes require tx data to be present", + ) + .into()); }; match key_type { KeyType::PARAMETER => { @@ -57,19 +61,26 @@ where &self.ctx.pre(), &data, ) - .unwrap_or(false) + .map_err(Error::NativeVpError)? + .ok_or_else(|| { + native_vp::Error::new_alloc(format!( + "Attempted to change a protocol parameter from \ + outside of a governance proposal, or from a \ + non-accepted governance proposal: {key}", + )) + .into() + }) } - KeyType::UNKNOWN_PARAMETER => false, - KeyType::UNKNOWN => true, + KeyType::UNKNOWN_PARAMETER => { + Err(native_vp::Error::new_alloc(format!( + "Attempted to change an unknown protocol parameter: \ + {key}", + )) + .into()) + } + KeyType::UNKNOWN => Ok(()), } - }); - Ok(result) - } -} - -impl From for Error { - fn from(err: native_vp::Error) -> Self { - Self::NativeVpError(err) + }) } } diff --git a/crates/namada/src/ledger/pgf/mod.rs b/crates/namada/src/ledger/pgf/mod.rs index 264f96691b..43fb175f45 100644 --- a/crates/namada/src/ledger/pgf/mod.rs +++ b/crates/namada/src/ledger/pgf/mod.rs @@ -5,9 +5,11 @@ pub mod utils; use std::collections::BTreeSet; +use namada_core::booleans::BoolResultUnitExt; use namada_governance::pgf::storage::keys as pgf_storage; use namada_governance::{is_proposal_accepted, pgf}; use namada_state::StateRead; +use namada_tx::action::{Action, PgfAction, Read}; use namada_tx::Tx; use thiserror::Error; @@ -26,8 +28,12 @@ pub const ADDRESS: Address = Address::Internal(InternalAddress::Pgf); #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] + #[error("PGF VP error: Native VP error: {0}")] NativeVpError(#[from] native_vp::Error), + #[error( + "Action {0} not authorized by {1} which is not part of verifier set" + )] + Unauthorized(&'static str, Address), } /// Pgf VP @@ -52,61 +58,142 @@ where tx_data: &Tx, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result { - let result = keys_changed.iter().all(|key| { - let key_type = KeyType::from(key); + ) -> Result<()> { + // Find the actions applied in the tx + let actions = self.ctx.read_actions()?; - let result = match key_type { - KeyType::STEWARDS => { - let total_stewards_pre = pgf_storage::stewards_handle() - .len(&self.ctx.pre()) - .unwrap_or_default(); - let total_stewards_post = pgf_storage::stewards_handle() - .len(&self.ctx.post()) - .unwrap_or_default(); - - // stewards can only be added via governance proposals - let is_valid = if total_stewards_pre < total_stewards_post { - false - } else { - // if a steward resign, check the signature - // if a steward update the reward distribution (so - // total_stewards_pre == total_stewards_post) check - // signature and if commission are valid - let steward_address = pgf_storage::is_stewards_key(key); - if let Some(address) = steward_address { - let steward_post = pgf::storage::get_steward( - &self.ctx.post(), + // There must be at least one action if any of the keys belong to PGF + if actions.is_empty() + && keys_changed.iter().any(pgf_storage::is_pgf_key) + { + tracing::info!( + "Rejecting tx without any action written to temp storage" + ); + return Err(native_vp::Error::new_const( + "Rejecting tx without any action written to temp storage", + ) + .into()); + } + + // Check action authorization + for action in actions { + match action { + Action::Pgf(pgf_action) => match pgf_action { + PgfAction::UpdateStewardCommission(address) => { + if !verifiers.contains(&address) { + tracing::info!( + "Unauthorized \ + PgfAction::UpdateStewardCommission" + ); + return Err(Error::Unauthorized( + "UpdateStewardCommission", address, + )); + } + } + PgfAction::ResignSteward(address) => { + if !verifiers.contains(&address) { + tracing::info!( + "Unauthorized PgfAction::ResignSteward" ); - match steward_post { - Ok(Some(steward)) => { - steward.is_valid_reward_distribution() - && verifiers.contains(address) - } - Ok(None) => verifiers.contains(address), - // if reading from storage returns an error, - // just return false - Err(_) => false, - } - } else { - false + return Err(Error::Unauthorized( + "ResignSteward", + address, + )); } + } + }, + _ => { + // Other actions are not relevant to PoS VP + continue; + } + } + } + + keys_changed.iter().try_for_each(|key| { + let key_type = KeyType::from(key); + + match key_type { + KeyType::Stewards(steward_address) => { + let stewards_have_increased = { + // TODO: maybe we should check errors here, which could + // be out-of-gas related? + let total_stewards_pre = pgf_storage::stewards_handle() + .len(&self.ctx.pre()) + .unwrap_or_default(); + let total_stewards_post = + pgf_storage::stewards_handle() + .len(&self.ctx.post()) + .unwrap_or_default(); + + total_stewards_pre < total_stewards_post }; - Ok(is_valid) + if stewards_have_increased { + return Err(native_vp::Error::new_const( + "Stewards can only be added via governance \ + proposals", + ) + .into()); + } + + pgf::storage::get_steward( + &self.ctx.post(), + steward_address, + )? + .map_or_else( + // if a steward resigns, check their signature + || { + verifiers.contains(steward_address).ok_or_else( + || { + native_vp::Error::new_alloc(format!( + "The VP of the steward \ + {steward_address} should have been \ + triggered to check their signature" + )) + .into() + }, + ) + }, + // if a steward updates the reward distribution (so + // total_stewards_pre == total_stewards_post) check + // their signature and if commissions are valid + |steward| { + if !verifiers.contains(steward_address) { + return Err(native_vp::Error::new_alloc( + format!( + "The VP of the steward \ + {steward_address} should have been \ + triggered to check their signature" + ), + ) + .into()); + } + steward.is_valid_reward_distribution().ok_or_else( + || { + native_vp::Error::new_const( + "Steward commissions are invalid", + ) + .into() + }, + ) + }, + ) } - KeyType::FUNDINGS => Ok(false), - KeyType::PGF_INFLATION_RATE - | KeyType::STEWARD_INFLATION_RATE => { + KeyType::Fundings => Err(native_vp::Error::new_alloc(format!( + "Cannot update PGF fundings key: {key}" + )) + .into()), + KeyType::PgfInflationRate | KeyType::StewardInflationRate => { self.is_valid_parameter_change(tx_data) } - KeyType::UNKNOWN_PGF => Ok(false), - KeyType::UNKNOWN => Ok(true), - }; - result.unwrap_or(false) - }); - Ok(result) + KeyType::UnknownPgf => Err(native_vp::Error::new_alloc( + format!("Unknown PGF state update on key: {key}"), + ) + .into()), + KeyType::Unknown => Ok(()), + } + }) } } @@ -116,46 +203,54 @@ where CA: 'static + WasmCacheAccess, { /// Validate a governance parameter - pub fn is_valid_parameter_change(&self, tx: &Tx) -> Result { - match tx.data() { - Some(data) => is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError), - None => Ok(false), - } + pub fn is_valid_parameter_change(&self, tx: &Tx) -> Result<()> { + tx.data().map_or_else( + || { + Err(native_vp::Error::new_const( + "PGF parameter changes require tx data to be present", + ) + .into()) + }, + |data| { + is_proposal_accepted(&self.ctx.pre(), data.as_ref()) + .map_err(Error::NativeVpError)? + .ok_or_else(|| { + native_vp::Error::new_const( + "PGF parameter changes can only be performed by a \ + governance proposal that has been accepted", + ) + .into() + }) + }, + ) } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug)] -enum KeyType { - #[allow(non_camel_case_types)] - STEWARDS, - #[allow(non_camel_case_types)] - FUNDINGS, - #[allow(non_camel_case_types)] - PGF_INFLATION_RATE, - #[allow(non_camel_case_types)] - STEWARD_INFLATION_RATE, - #[allow(non_camel_case_types)] - UNKNOWN_PGF, - #[allow(non_camel_case_types)] - UNKNOWN, +enum KeyType<'a> { + Stewards(&'a Address), + Fundings, + PgfInflationRate, + StewardInflationRate, + UnknownPgf, + Unknown, } -impl From<&Key> for KeyType { - fn from(key: &Key) -> Self { - if pgf_storage::is_stewards_key(key).is_some() { - Self::STEWARDS +impl<'k> From<&'k Key> for KeyType<'k> { + fn from(key: &'k Key) -> Self { + if let Some(addr) = pgf_storage::is_stewards_key(key) { + Self::Stewards(addr) } else if pgf_storage::is_fundings_key(key) { - KeyType::FUNDINGS + KeyType::Fundings } else if pgf_storage::is_pgf_inflation_rate_key(key) { - Self::PGF_INFLATION_RATE + Self::PgfInflationRate } else if pgf_storage::is_steward_inflation_rate_key(key) { - Self::STEWARD_INFLATION_RATE + Self::StewardInflationRate } else if pgf_storage::is_pgf_key(key) { - KeyType::UNKNOWN_PGF + KeyType::UnknownPgf } else { - KeyType::UNKNOWN + KeyType::Unknown } } } diff --git a/crates/namada/src/ledger/pgf/utils.rs b/crates/namada/src/ledger/pgf/utils.rs index 6f6153885e..88945f43f6 100644 --- a/crates/namada/src/ledger/pgf/utils.rs +++ b/crates/namada/src/ledger/pgf/utils.rs @@ -1,6 +1,5 @@ -use std::collections::HashMap; - use namada_core::address::Address; +use namada_core::collections::HashMap; use crate::ledger::events::EventType; use crate::token; diff --git a/crates/namada/src/ledger/pos/vp.rs b/crates/namada/src/ledger/pos/vp.rs index 28ac392a43..6863418980 100644 --- a/crates/namada/src/ledger/pos/vp.rs +++ b/crates/namada/src/ledger/pos/vp.rs @@ -1,27 +1,36 @@ //! Proof-of-Stake native validity predicate. -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; +use namada_core::booleans::BoolResultUnitExt; pub use namada_proof_of_stake; pub use namada_proof_of_stake::parameters::PosParams; -// use namada_proof_of_stake::validation::validate; use namada_proof_of_stake::storage::read_pos_params; use namada_proof_of_stake::storage_key::is_params_key; pub use namada_proof_of_stake::types; +use namada_proof_of_stake::types::BondId; +use namada_proof_of_stake::{storage_key, token}; use namada_state::StateRead; +use namada_tx::action::{ + Action, Bond, ClaimRewards, PosAction, Read, Redelegation, Unbond, Withdraw, +}; use namada_tx::Tx; use thiserror::Error; -use crate::address::{self, Address}; +use crate::address::Address; use crate::ledger::native_vp::{self, Ctx, NativeVp}; -use crate::storage::{Key, KeySeg}; +use crate::storage::Key; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Native VP error: {0}")] - NativeVpError(native_vp::Error), + #[error("PoS VP error: Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), + #[error( + "Action {0} not authorized by {1} which is not part of verifier set" + )] + Unauthorized(&'static str, Address), } /// PoS functions result @@ -37,17 +46,6 @@ where pub ctx: Ctx<'a, S, CA>, } -impl<'a, S, CA> PosVP<'a, S, CA> -where - S: StateRead, - CA: 'static + WasmCacheAccess, -{ - /// Instantiate a `PosVP`. - pub fn new(ctx: Ctx<'a, S, CA>) -> Self { - Self { ctx } - } -} - impl<'a, S, CA> NativeVp for PosVP<'a, S, CA> where S: StateRead, @@ -57,68 +55,287 @@ where fn validate_tx( &self, - tx_data: &Tx, + tx: &Tx, keys_changed: &BTreeSet, - _verifiers: &BTreeSet
, - ) -> Result { - // use validation::Data; - // use validation::DataUpdate::{self, *}; - // use validation::ValidatorUpdate::*; - - // let mut changes: Vec = vec![]; - // let _current_epoch = self.ctx.pre().get_block_epoch()?; + verifiers: &BTreeSet
, + ) -> Result<()> { + tracing::debug!("\nValidating PoS Tx\n"); - tracing::debug!("\nValidating PoS storage changes\n"); - - for key in keys_changed { - if is_params_key(key) { - let data = if let Some(data) = tx_data.data() { - data - } else { - return Ok(false); - }; - if !namada_governance::is_proposal_accepted( + // Check if this is a governance proposal first + if tx + .data() + .map(|tx_data| { + namada_governance::is_proposal_accepted( &self.ctx.pre(), - &data, + &tx_data, ) - .map_err(Error::NativeVpError)? - { - return Ok(false); + }) + .transpose() + .map_err(Error::NativeVpError)? + .unwrap_or(false) + { + for key in keys_changed { + if is_params_key(key) { + // If governance changes PoS params, the params have to be + // valid + self.is_valid_parameter_change()?; } - let params = read_pos_params(&self.ctx.post())?.owned; - if !params.validate().is_empty() { - return Ok(false); + // Any other change from governance is allowed without further + // checks + } + return Ok(()); + } + + // Find the actions applied in the tx + let actions = self.ctx.read_actions()?; + + // There must be at least one action + if actions.is_empty() + && keys_changed.iter().any(storage_key::is_pos_key) + { + tracing::info!( + "Rejecting tx without any action written to temp storage" + ); + return Err(native_vp::Error::new_const( + "Rejecting tx without any action written to temp storage", + ) + .into()); + } + + let mut became_validator: BTreeSet
= Default::default(); + let mut deactivated: BTreeSet
= Default::default(); + let mut reactivated: BTreeSet
= Default::default(); + let mut unjailed: BTreeSet
= Default::default(); + let mut bonds: BTreeMap = Default::default(); + let mut unbonds: BTreeMap = Default::default(); + let mut withdrawals: BTreeSet = Default::default(); + // The key is src bond ID and value is pair of (dest_validator, amount) + let mut redelegations: BTreeMap = + Default::default(); + let mut claimed_rewards: BTreeSet = Default::default(); + let mut changed_commission: BTreeSet
= Default::default(); + let mut changed_metadata: BTreeSet
= Default::default(); + let mut changed_consensus_key: BTreeSet
= Default::default(); + + // Accumulate changes from the actions + for action in actions { + match action { + Action::Pos(pos_action) => match pos_action { + PosAction::BecomeValidator(address) => { + if !verifiers.contains(&address) { + tracing::info!( + "Unauthorized PosAction::BecomeValidator" + ); + return Err(Error::Unauthorized( + "BecomeValidator", + address, + )); + } + became_validator.insert(address); + } + PosAction::DeactivateValidator(validator) => { + if !verifiers.contains(&validator) { + tracing::info!( + "Unauthorized PosAction::DeactivateValidator" + ); + return Err(Error::Unauthorized( + "DeactivateValidator", + validator, + )); + } + deactivated.insert(validator); + } + PosAction::ReactivateValidator(validator) => { + if !verifiers.contains(&validator) { + tracing::info!( + "Unauthorized PosAction::ReactivateValidator" + ); + return Err(Error::Unauthorized( + "ReactivateValidator", + validator, + )); + } + reactivated.insert(validator); + } + PosAction::Unjail(validator) => { + if !verifiers.contains(&validator) { + tracing::info!("Unauthorized PosAction::Unjail"); + return Err(Error::Unauthorized( + "Unjail", validator, + )); + } + unjailed.insert(validator); + } + PosAction::Bond(Bond { + validator, + amount, + source, + }) => { + let bond_id = BondId { + source: source.unwrap_or_else(|| validator.clone()), + validator, + }; + if !verifiers.contains(&bond_id.source) { + tracing::info!("Unauthorized PosAction::Bond"); + return Err(Error::Unauthorized( + "Bond", + bond_id.source, + )); + } + bonds.insert(bond_id, amount); + } + PosAction::Unbond(Unbond { + validator, + amount, + source, + }) => { + let bond_id = BondId { + source: source.unwrap_or_else(|| validator.clone()), + validator, + }; + if !verifiers.contains(&bond_id.source) { + tracing::info!("Unauthorized PosAction::Unbond"); + return Err(Error::Unauthorized( + "Unbond", + bond_id.source, + )); + } + unbonds.insert(bond_id, amount); + } + PosAction::Withdraw(Withdraw { validator, source }) => { + let bond_id = BondId { + source: source.unwrap_or_else(|| validator.clone()), + validator, + }; + if !verifiers.contains(&bond_id.source) { + tracing::info!("Unauthorized PosAction::Withdraw"); + return Err(Error::Unauthorized( + "Withdraw", + bond_id.source, + )); + } + withdrawals.insert(bond_id); + } + PosAction::Redelegation(Redelegation { + src_validator, + dest_validator, + owner, + amount, + }) => { + if !verifiers.contains(&owner) { + tracing::info!( + "Unauthorized PosAction::Redelegation" + ); + return Err(Error::Unauthorized( + "Redelegation", + owner, + )); + } + let bond_id = BondId { + source: owner, + validator: src_validator, + }; + redelegations.insert(bond_id, (dest_validator, amount)); + } + PosAction::ClaimRewards(ClaimRewards { + validator, + source, + }) => { + let bond_id = BondId { + source: source.unwrap_or_else(|| validator.clone()), + validator, + }; + if !verifiers.contains(&bond_id.source) { + tracing::info!( + "Unauthorized PosAction::ClaimRewards" + ); + return Err(Error::Unauthorized( + "ClaimRewards", + bond_id.source, + )); + } + claimed_rewards.insert(bond_id); + } + PosAction::CommissionChange(validator) => { + if !verifiers.contains(&validator) { + tracing::info!( + "Unauthorized PosAction::CommissionChange" + ); + return Err(Error::Unauthorized( + "CommissionChange", + validator, + )); + } + changed_commission.insert(validator); + } + PosAction::MetadataChange(validator) => { + if !verifiers.contains(&validator) { + tracing::info!( + "Unauthorized PosAction::MetadataChange" + ); + return Err(Error::Unauthorized( + "MetadataChange", + validator, + )); + } + changed_metadata.insert(validator); + } + PosAction::ConsensusKeyChange(validator) => { + if !verifiers.contains(&validator) { + tracing::info!( + "Unauthorized PosAction::ConsensusKeyChange" + ); + return Err(Error::Unauthorized( + "ConsensusKeyChange", + validator, + )); + } + changed_consensus_key.insert(validator); + } + }, + _ => { + // Other actions are not relevant to PoS VP + continue; } - } else if key.segments.first() == Some(&address::POS.to_db_key()) { - // No VP logic applied to all other PoS keys for now, as PoS txs - // are all whitelisted - tracing::debug!( - "PoS key change {} - No action is taken currently.", - key - ); - } else { - // Unknown changes anywhere else are permitted - tracing::debug!("PoS unrecognized key change {}", key); } } - // let _params = read_pos_params(&self.ctx.pre())?; - // let errors = validate(¶ms, changes, current_epoch); - // Ok(if errors.is_empty() { - // true - // } else { - // tracing::info!( - // "PoS validation errors:\n - {}", - // errors.iter().format("\n - ") - // ); - // false - // }) - Ok(true) + for key in keys_changed { + if is_params_key(key) { + return Err(Error::NativeVpError(native_vp::Error::new_const( + "PoS parameter changes can only be performed by a \ + governance proposal that has been accepted", + ))); + } + // TODO: validate changes keys against the accumulated changes + } + Ok(()) } } -impl From for Error { - fn from(err: native_vp::Error) -> Self { - Self::NativeVpError(err) +impl<'a, S, CA> PosVP<'a, S, CA> +where + S: StateRead, + CA: 'static + WasmCacheAccess, +{ + /// Instantiate a `PosVP`. + pub fn new(ctx: Ctx<'a, S, CA>) -> Self { + Self { ctx } + } + + /// Return `Ok` if the changed parameters are valid + fn is_valid_parameter_change(&self) -> Result<()> { + let validation_errors = read_pos_params(&self.ctx.post()) + .map_err(Error::NativeVpError)? + .owned + .validate(); + validation_errors.is_empty().ok_or_else(|| { + let validation_errors_str = + itertools::join(validation_errors, ", "); + native_vp::Error::new_alloc(format!( + "PoS parameter changes were invalid: {validation_errors_str}", + )) + .into() + }) } } diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 6a7991ef65..b3c0b7c649 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -6,15 +6,15 @@ use std::fmt::Debug; use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use masp_primitives::transaction::Transaction; +use namada_core::booleans::BoolResultUnitExt; use namada_core::hash::Hash; use namada_core::storage::Key; -use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; use namada_sdk::tx::TX_TRANSFER_WASM; use namada_state::StorageWrite; use namada_tx::data::protocol::ProtocolTxType; use namada_tx::data::{ - DecryptedTx, GasLimit, TxResult, TxType, VpsResult, WrapperTx, + GasLimit, TxResult, TxType, VpStatusFlags, VpsResult, WrapperTx, }; use namada_tx::{Section, Tx}; use namada_vote_ext::EthereumTxData; @@ -50,6 +50,8 @@ pub enum Error { StateError(namada_state::Error), #[error("Storage error: {0}")] StorageError(namada_state::StorageError), + #[error("Wrapper tx runner error: {0}")] + WrapperRunnerError(String), #[error("Transaction runner error: {0}")] TxRunnerError(vm::wasm::run::Error), #[error("{0:?}")] @@ -62,8 +64,8 @@ pub enum Error { GasError(String), #[error("Error while processing transaction's fees: {0}")] FeeError(String), - #[error("Invalid transaction signature")] - InvalidTxSignature, + #[error("Invalid transaction section signature: {0}")] + InvalidSectionSignature(String), #[error( "The decrypted transaction {0} has already been applied in this block" )] @@ -96,8 +98,18 @@ pub enum Error { MaspNativeVpError(native_vp::masp::Error), #[error("Access to an internal address {0:?} is forbidden")] AccessForbidden(InternalAddress), - #[error("Tx is not allowed in allowlist parameter.")] - DisallowedTx, +} + +impl Error { + /// Determine if the error originates from an invalid transaction + /// section signature. This is required for replay protection. + const fn invalid_section_signature_flag(&self) -> VpStatusFlags { + if matches!(self, Self::InvalidSectionSignature(_)) { + VpStatusFlags::INVALID_SIGNATURE + } else { + VpStatusFlags::empty() + } + } } /// Shell parameters for running wasm transactions. @@ -174,8 +186,8 @@ where CA: 'static + WasmCacheAccess + Sync, { match tx.header().tx_type { - TxType::Raw => Err(Error::TxTypeError), - TxType::Decrypted(DecryptedTx::Decrypted) => apply_wasm_tx( + // Raw trasaction type is allowed only for governance proposals + TxType::Raw => apply_wasm_tx( tx, &tx_index, ShellParams { @@ -192,7 +204,7 @@ where let fee_unshielding_transaction = get_fee_unshielding_transaction(&tx, wrapper); let changed_keys = apply_wrapper_tx( - tx, + tx.clone(), wrapper, fee_unshielding_transaction, tx_bytes, @@ -203,18 +215,21 @@ where tx_wasm_cache, }, wrapper_args, + ) + .map_err(|e| Error::WrapperRunnerError(e.to_string()))?; + let mut inner_res = apply_wasm_tx( + tx, + &tx_index, + ShellParams { + tx_gas_meter, + state, + vp_wasm_cache, + tx_wasm_cache, + }, )?; - Ok(TxResult { - gas_used: tx_gas_meter.borrow().get_tx_consumed_gas(), - changed_keys, - vps_result: VpsResult::default(), - initialized_accounts: vec![], - ibc_events: BTreeSet::default(), - eth_bridge_events: BTreeSet::default(), - }) - } - TxType::Decrypted(DecryptedTx::Undecryptable) => { - Ok(TxResult::default()) + + inner_res.wrapper_changed_keys = changed_keys; + Ok(inner_res) } } } @@ -618,6 +633,7 @@ where Ok(TxResult { gas_used, + wrapper_changed_keys: Default::default(), changed_keys, vps_result, initialized_accounts, @@ -626,29 +642,6 @@ where }) } -/// Returns [`Error::DisallowedTx`] when the given tx is inner (decrypted) tx -/// and its code `Hash` is not included in the `tx_allowlist` parameter. -pub fn check_tx_allowed(tx: &Tx, state: &WlState) -> Result<()> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - if let TxType::Decrypted(DecryptedTx::Decrypted) = tx.header().tx_type { - if let Some(code_sec) = tx - .get_section(tx.code_sechash()) - .and_then(|x| Section::code_sec(&x)) - { - if crate::parameters::is_tx_allowed(state, &code_sec.code.hash()) - .map_err(Error::StorageError)? - { - return Ok(()); - } - } - return Err(Error::DisallowedTx); - } - Ok(()) -} - /// Apply a derived transaction to storage based on some protocol transaction. /// The logic here must be completely deterministic and will be executed by all /// full nodes every time a protocol transaction is included in a block. Storage @@ -823,7 +816,7 @@ where .try_fold(VpsResult::default, |mut result, addr| { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter(tx_gas_meter)); - let accept = match &addr { + let tx_accepted = match &addr { Address::Implicit(_) | Address::Established(_) => { let (vp_hash, gas) = state .validity_predicate(addr) @@ -836,11 +829,6 @@ where return Err(Error::MissingAddress(addr.clone())); }; - // NOTE: because of the whitelisted gas and the gas - // metering for the exposed vm - // env functions, the first - // signature verification (if any) is accounted - // twice wasm::run::vp( vp_code_hash, tx, @@ -854,27 +842,25 @@ where ) .map_err(|err| match err { wasm::run::Error::GasError(msg) => Error::GasError(msg), - wasm::run::Error::InvalidTxSignature => { - Error::InvalidTxSignature + wasm::run::Error::InvalidSectionSignature(msg) => { + Error::InvalidSectionSignature(msg) } _ => Error::VpRunnerError(err), }) } Address::Internal(internal_addr) => { - let sentinel = RefCell::new(VpSentinel::default()); let ctx = native_vp::Ctx::new( addr, state, tx, tx_index, &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache.clone(), ); - let accepted: Result = match internal_addr { + match internal_addr { InternalAddress::PoS => { let pos = PosVP { ctx }; pos.validate_tx(tx, &keys_changed, &verifiers) @@ -930,64 +916,53 @@ where .validate_tx(tx, &keys_changed, &verifiers) .map_err(Error::NutNativeVpError) } - InternalAddress::IbcToken(_) - | InternalAddress::Erc20(_) => { + internal_addr @ (InternalAddress::IbcToken(_) + | InternalAddress::Erc20(_)) => { // The address should be a part of a multitoken // key - Ok(verifiers.contains(&Address::Internal( - InternalAddress::Multitoken, - ))) + verifiers + .contains(&Address::Internal( + InternalAddress::Multitoken, + )) + .ok_or_else(|| { + Error::AccessForbidden( + internal_addr.clone(), + ) + }) } InternalAddress::Masp => { let masp = MaspVp { ctx }; masp.validate_tx(tx, &keys_changed, &verifiers) .map_err(Error::MaspNativeVpError) } - }; - - accepted.map_err(|err| { - // No need to check invalid sig because internal vps - // don't check the signature - if sentinel.borrow().is_out_of_gas() { - Error::GasError(err.to_string()) - } else { - err - } - }) + InternalAddress::TempStorage => Err( + // Temp storage changes must never be committed + Error::AccessForbidden((*internal_addr).clone()), + ), + } } }; - match accept { - Ok(accepted) => { - if accepted { - result.accepted_vps.insert(addr.clone()); - } else { - result.rejected_vps.insert(addr.clone()); - } - } - Err(err) => match err { - // Execution of VPs can (and must) be short-circuited - // only in case of a gas overflow to prevent the - // transaction from consuming resources that have not - // been acquired in the corresponding wrapper tx. For - // all the other errors we keep evaluating the vps. This - // allows to display a consistent VpsResult across all - // nodes and find any invalid signatures - Error::GasError(_) => { - return Err(err); - } - Error::InvalidTxSignature => { - result.invalid_sig = true; - result.rejected_vps.insert(addr.clone()); - // Don't push the error since this is just a flag error - } - _ => { - result.rejected_vps.insert(addr.clone()); - result.errors.push((addr.clone(), err.to_string())); - } + tx_accepted.map_or_else( + |err| { + result + .status_flags + .insert(err.invalid_section_signature_flag()); + result.rejected_vps.insert(addr.clone()); + result.errors.push((addr.clone(), err.to_string())); }, - } + |()| { + result.accepted_vps.insert(addr.clone()); + }, + ); + // Execution of VPs can (and must) be short-circuited + // only in case of a gas overflow to prevent the + // transaction from consuming resources that have not + // been acquired in the corresponding wrapper tx. For + // all the other errors we keep evaluating the vps. This + // allows to display a consistent VpsResult across all + // nodes and find any invalid signatures result .gas_used .set(gas_meter.into_inner()) @@ -1014,7 +989,7 @@ fn merge_vp_results( rejected_vps.extend(b.rejected_vps); let mut errors = a.errors; errors.append(&mut b.errors); - let invalid_sig = a.invalid_sig || b.invalid_sig; + let status_flags = a.status_flags | b.status_flags; let mut gas_used = a.gas_used; gas_used @@ -1026,17 +1001,15 @@ fn merge_vp_results( rejected_vps, gas_used, errors, - invalid_sig, + status_flags, }) } #[cfg(test)] mod tests { - use std::collections::HashMap; - use borsh::BorshDeserialize; use eyre::Result; - use namada_core::chain::ChainId; + use namada_core::collections::HashMap; use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada_core::ethereum_events::{EthereumEvent, TransferToNamada}; use namada_core::keccak::keccak_hash; @@ -1189,42 +1162,83 @@ mod tests { } #[test] - fn test_apply_wasm_tx_allowlist() { + fn test_native_vp_out_of_gas() { let (mut state, _validators) = test_utils::setup_default_storage(); - let mut tx = Tx::new(ChainId::default(), None); - tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - // pseudo-random code hash - let code = vec![1_u8, 2, 3]; - let tx_hash = Hash::sha256(&code); - tx.set_code(namada_tx::Code::new(code, None)); - - // Check that using a disallowed tx leads to an error - { - let allowlist = vec![format!("{}-bad", tx_hash)]; - crate::parameters::update_tx_allowlist_parameter( - &mut state, allowlist, - ) - .unwrap(); - state.commit_tx(); + // some random token address + let token_address = Address::Established([0xff; 20].into()); - let result = check_tx_allowed(&tx, &state); - assert_matches!(result.unwrap_err(), Error::DisallowedTx); - } + let src_address = Address::Established([0xab; 20].into()); + let dst_address = Address::Established([0xba; 20].into()); - // Check that using an allowed tx doesn't lead to `Error::DisallowedTx` - { - let allowlist = vec![tx_hash.to_string()]; - crate::parameters::update_tx_allowlist_parameter( - &mut state, allowlist, + // supply an address with 1000 of said token + namada_token::credit_tokens( + &mut state, + &token_address, + &src_address, + 1000.into(), + ) + .unwrap(); + + // commit storage changes. this will act as the + // initial state of the chain + state.commit_tx(); + state.commit_block().unwrap(); + + // "execute" a dummy tx, by manually performing its state changes + let (dummy_tx, changed_keys, verifiers) = { + let mut tx = Tx::from_type(TxType::Raw); + tx.set_code(namada_tx::Code::new(vec![], None)); + tx.set_data(namada_tx::Data::new(vec![])); + + // transfer half of the supply of src to dst + namada_token::transfer( + &mut state, + &token_address, + &src_address, + &dst_address, + 500.into(), ) .unwrap(); - state.commit_tx(); - let result = check_tx_allowed(&tx, &state); - if let Err(result) = result { - assert!(!matches!(result, Error::DisallowedTx)); - } - } + let changed_keys = { + let mut set = BTreeSet::new(); + set.insert(namada_token::storage_key::balance_key( + &token_address, + &src_address, + )); + set.insert(namada_token::storage_key::balance_key( + &token_address, + &dst_address, + )); + set + }; + + let verifiers = { + let mut set = BTreeSet::new(); + set.insert(Address::Internal(InternalAddress::Multitoken)); + set + }; + + (tx, changed_keys, verifiers) + }; + + // temp vp cache + let (mut vp_cache, _) = + wasm::compilation_cache::common::testing::cache(); + + // gas meter with no gas left + let gas_meter = TxGasMeter::new(0); + + let result = execute_vps( + verifiers, + changed_keys, + &dummy_tx, + &TxIndex::default(), + &state, + &gas_meter, + &mut vp_cache, + ); + assert!(matches!(result.unwrap_err(), Error::GasError(_))); } } diff --git a/crates/namada/src/ledger/vp_host_fns.rs b/crates/namada/src/ledger/vp_host_fns.rs index 738a25deae..393e18d01f 100644 --- a/crates/namada/src/ledger/vp_host_fns.rs +++ b/crates/namada/src/ledger/vp_host_fns.rs @@ -10,7 +10,6 @@ use namada_core::storage::{ BlockHash, BlockHeight, Epoch, Epochs, Header, Key, TxIndex, TX_INDEX_LENGTH, }; -use namada_core::validity_predicate::VpSentinel; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_state::write_log::WriteLog; use namada_state::{write_log, DBIter, StateRead, DB}; @@ -37,14 +36,14 @@ pub enum RuntimeError { NumConversionError(TryFromIntError), #[error("Memory error: {0}")] MemoryError(Box), - #[error("Trying to read a temporary value with read_post")] - ReadTemporaryValueError, - #[error("Trying to read a permanent value with read_temp")] - ReadPermanentValueError, #[error("Invalid transaction code hash")] InvalidCodeHash, #[error("No value found in result buffer")] NoValueInResultBuffer, + #[error("The section signature is invalid: {0}")] + InvalidSectionSignature(String), + #[error("{0}")] + Erased(String), // type erased error } /// VP environment function result @@ -54,10 +53,8 @@ pub type EnvResult = std::result::Result; pub fn add_gas( gas_meter: &RefCell, used_gas: u64, - sentinel: &RefCell, ) -> EnvResult<()> { gas_meter.borrow_mut().consume(used_gas).map_err(|err| { - sentinel.borrow_mut().set_out_of_gas(); tracing::info!("Stopping VP execution because of gas error: {}", err); RuntimeError::OutOfGas(err) }) @@ -69,13 +66,12 @@ pub fn read_pre( gas_meter: &RefCell, state: &S, key: &Key, - sentinel: &RefCell, ) -> EnvResult>> where S: StateRead + Debug, { let (log_val, gas) = state.write_log().read_pre(key); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { Ok(Some(value.clone())) @@ -90,14 +86,12 @@ where // Read the VP of a new account Ok(Some(vp_code_hash.to_vec())) } - Some(&write_log::StorageModification::Temp { .. }) => { - Err(RuntimeError::ReadTemporaryValueError) - } - None => { - // When not found in write log, try to read from the storage + Some(&write_log::StorageModification::Temp { .. }) | None => { + // When not found in write log or only found a temporary value, try + // to read from the storage let (value, gas) = state.db_read(key).map_err(RuntimeError::StorageError)?; - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(value) } } @@ -109,36 +103,33 @@ pub fn read_post( gas_meter: &RefCell, state: &S, key: &Key, - sentinel: &RefCell, ) -> EnvResult>> where S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = state.write_log().read(key); - add_gas(gas_meter, gas, sentinel)?; + let (log_val, gas) = state.write_log().read_persistent(key); + add_gas(gas_meter, gas)?; match log_val { - Some(write_log::StorageModification::Write { ref value }) => { + Some(write_log::PersistentStorageModification::Write { value }) => { Ok(Some(value.clone())) } - Some(&write_log::StorageModification::Delete) => { + Some(write_log::PersistentStorageModification::Delete) => { // Given key has been deleted Ok(None) } - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, + Some(write_log::PersistentStorageModification::InitAccount { + vp_code_hash, }) => { // Read the VP code hash of a new account Ok(Some(vp_code_hash.to_vec())) } - Some(&write_log::StorageModification::Temp { .. }) => { - Err(RuntimeError::ReadTemporaryValueError) - } None => { - // When not found in write log, try to read from the storage + // When not found in write log, try + // to read from the storage let (value, gas) = state.db_read(key).map_err(RuntimeError::StorageError)?; - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(value) } } @@ -150,20 +141,17 @@ pub fn read_temp( gas_meter: &RefCell, state: &S, key: &Key, - sentinel: &RefCell, ) -> EnvResult>> where S: StateRead + Debug, { - // Try to read from the write log first let (log_val, gas) = state.write_log().read(key); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; match log_val { Some(write_log::StorageModification::Temp { ref value }) => { Ok(Some(value.clone())) } - None => Ok(None), - _ => Err(RuntimeError::ReadPermanentValueError), + _ => Ok(None), } } @@ -173,14 +161,13 @@ pub fn has_key_pre( gas_meter: &RefCell, state: &S, key: &Key, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, { // Try to read from the write log first let (log_val, gas) = state.write_log().read_pre(key); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), Some(&write_log::StorageModification::Delete) => { @@ -188,12 +175,12 @@ where Ok(false) } Some(&write_log::StorageModification::InitAccount { .. }) => Ok(true), - Some(&write_log::StorageModification::Temp { .. }) => Ok(true), - None => { - // When not found in write log, try to check the storage + Some(&write_log::StorageModification::Temp { .. }) | None => { + // When not found in write log or only found a temporary value, try + // to check the storage let (present, gas) = state.db_has_key(key).map_err(RuntimeError::StorageError)?; - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(present) } } @@ -205,27 +192,30 @@ pub fn has_key_post( gas_meter: &RefCell, state: &S, key: &Key, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = state.write_log().read(key); - add_gas(gas_meter, gas, sentinel)?; + let (log_val, gas) = state.write_log().read_persistent(key); + add_gas(gas_meter, gas)?; match log_val { - Some(&write_log::StorageModification::Write { .. }) => Ok(true), - Some(&write_log::StorageModification::Delete) => { + Some(write_log::PersistentStorageModification::Write { .. }) => { + Ok(true) + } + Some(write_log::PersistentStorageModification::Delete) => { // The given key has been deleted Ok(false) } - Some(&write_log::StorageModification::InitAccount { .. }) => Ok(true), - Some(&write_log::StorageModification::Temp { .. }) => Ok(true), + Some(write_log::PersistentStorageModification::InitAccount { + .. + }) => Ok(true), None => { - // When not found in write log, try to check the storage + // When not found in write log, try + // to check the storage let (present, gas) = state.db_has_key(key).map_err(RuntimeError::StorageError)?; - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(present) } } @@ -235,13 +225,12 @@ where pub fn get_chain_id( gas_meter: &RefCell, state: &S, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, { let (chain_id, gas) = state.in_mem().get_chain_id(); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(chain_id) } @@ -250,13 +239,12 @@ where pub fn get_block_height( gas_meter: &RefCell, state: &S, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, { let (height, gas) = state.in_mem().get_block_height(); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(height) } @@ -265,14 +253,13 @@ pub fn get_block_header( gas_meter: &RefCell, state: &S, height: BlockHeight, - sentinel: &RefCell, ) -> EnvResult> where S: StateRead + Debug, { let (header, gas) = StateRead::get_block_header(state, Some(height)) .map_err(RuntimeError::StorageError)?; - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(header) } @@ -281,13 +268,12 @@ where pub fn get_block_hash( gas_meter: &RefCell, state: &S, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, { let (hash, gas) = state.in_mem().get_block_hash(); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(hash) } @@ -296,13 +282,8 @@ where pub fn get_tx_code_hash( gas_meter: &RefCell, tx: &Tx, - sentinel: &RefCell, ) -> EnvResult> { - add_gas( - gas_meter, - HASH_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - sentinel, - )?; + add_gas(gas_meter, HASH_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE)?; let hash = tx .get_section(tx.code_sechash()) .and_then(|x| Section::code_sec(x.as_ref())) @@ -315,13 +296,12 @@ pub fn get_tx_code_hash( pub fn get_block_epoch( gas_meter: &RefCell, state: &S, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, { let (epoch, gas) = state.in_mem().get_current_epoch(); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(epoch) } @@ -330,12 +310,10 @@ where pub fn get_tx_index( gas_meter: &RefCell, tx_index: &TxIndex, - sentinel: &RefCell, ) -> EnvResult { add_gas( gas_meter, TX_INDEX_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - sentinel, )?; Ok(*tx_index) } @@ -344,7 +322,6 @@ pub fn get_tx_index( pub fn get_native_token( gas_meter: &RefCell, state: &S, - sentinel: &RefCell, ) -> EnvResult
where S: StateRead + Debug, @@ -352,7 +329,6 @@ where add_gas( gas_meter, ESTABLISHED_ADDRESS_BYTES_LEN as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - sentinel, )?; Ok(state.in_mem().native_token.clone()) } @@ -361,7 +337,6 @@ where pub fn get_pred_epochs( gas_meter: &RefCell, state: &S, - sentinel: &RefCell, ) -> EnvResult where S: StateRead + Debug, @@ -371,7 +346,6 @@ where state.in_mem().block.pred_epochs.first_block_heights.len() as u64 * 8 * MEMORY_ACCESS_GAS_PER_BYTE, - sentinel, )?; Ok(state.in_mem().block.pred_epochs.clone()) } @@ -404,13 +378,12 @@ pub fn iter_prefix_pre<'a, D>( write_log: &'a WriteLog, db: &'a D, prefix: &Key, - sentinel: &RefCell, ) -> EnvResult> where D: DB + for<'iter> DBIter<'iter>, { let (iter, gas) = namada_state::iter_prefix_pre(write_log, db, prefix); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(iter) } @@ -424,13 +397,12 @@ pub fn iter_prefix_post<'a, D>( write_log: &'a WriteLog, db: &'a D, prefix: &Key, - sentinel: &RefCell, ) -> EnvResult> where D: DB + for<'iter> DBIter<'iter>, { let (iter, gas) = namada_state::iter_prefix_post(write_log, db, prefix); - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; Ok(iter) } @@ -438,13 +410,12 @@ where pub fn iter_next( gas_meter: &RefCell, iter: &mut namada_state::PrefixIter, - sentinel: &RefCell, ) -> EnvResult)>> where DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, { if let Some((key, val, gas)) = iter.next() { - add_gas(gas_meter, gas, sentinel)?; + add_gas(gas_meter, gas)?; return Ok(Some((key, val))); } Ok(None) diff --git a/crates/namada/src/vm/host_env.rs b/crates/namada/src/vm/host_env.rs index 5139421d0a..9327a8838e 100644 --- a/crates/namada/src/vm/host_env.rs +++ b/crates/namada/src/vm/host_env.rs @@ -7,11 +7,11 @@ use std::num::TryFromIntError; use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; +use gas::IBC_TX_GAS; use masp_primitives::transaction::Transaction; use namada_core::address::ESTABLISHED_ADDRESS_BYTES_LEN; use namada_core::internal::KeyVal; use namada_core::storage::TX_INDEX_LENGTH; -use namada_core::validity_predicate::VpSentinel; use namada_gas::{ self as gas, GasMetering, TxGasMeter, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, @@ -67,6 +67,8 @@ pub enum TxRuntimeError { StorageError(#[from] StorageError), #[error("Storage data error: {0}")] StorageDataError(crate::storage::Error), + #[error("Trying to read a permanent value with read_temp")] + ReadPermanentValueError, #[error("Encoding error: {0}")] EncodingError(std::io::Error), #[error("Address error: {0}")] @@ -131,6 +133,8 @@ where pub verifiers: MutHostRef<'a, &'a BTreeSet
>, /// Cache for 2-step reads from host environment. pub result_buffer: MutHostRef<'a, &'a Option>>, + /// Storage for byte buffer values yielded from the guest. + pub yielded_value: MutHostRef<'a, &'a Option>>, /// VP WASM compilation cache (this is available in tx context, because /// we're pre-compiling VPs from [`tx_init_account`]) #[cfg(feature = "wasm-runtime")] @@ -170,6 +174,7 @@ where tx_index: &TxIndex, verifiers: &mut BTreeSet
, result_buffer: &mut Option>, + yielded_value: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, ) -> Self { @@ -183,6 +188,7 @@ where let tx_index = unsafe { HostRef::new(tx_index) }; let verifiers = unsafe { MutHostRef::new(verifiers) }; let result_buffer = unsafe { MutHostRef::new(result_buffer) }; + let yielded_value = unsafe { MutHostRef::new(yielded_value) }; #[cfg(feature = "wasm-runtime")] let vp_wasm_cache = unsafe { MutHostRef::new(vp_wasm_cache) }; #[cfg(feature = "wasm-runtime")] @@ -198,6 +204,7 @@ where tx_index, verifiers, result_buffer, + yielded_value, #[cfg(feature = "wasm-runtime")] vp_wasm_cache, #[cfg(feature = "wasm-runtime")] @@ -280,6 +287,7 @@ where tx_index: self.tx_index.clone(), verifiers: self.verifiers.clone(), result_buffer: self.result_buffer.clone(), + yielded_value: self.yielded_value.clone(), #[cfg(feature = "wasm-runtime")] vp_wasm_cache: self.vp_wasm_cache.clone(), #[cfg(feature = "wasm-runtime")] @@ -325,8 +333,6 @@ where pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, D>>, /// VP gas meter. In `RefCell` to charge gas in read-only fns. pub gas_meter: HostRef<'a, &'a RefCell>, - /// Errors sentinel. In `RefCell` to charge gas in read-only fns. - pub sentinel: HostRef<'a, &'a RefCell>, /// The transaction code is used for signature verification pub tx: HostRef<'a, &'a Tx>, /// The transaction index is used to identify a shielded transaction's @@ -336,6 +342,8 @@ where pub eval_runner: HostRef<'a, &'a EVAL>, /// Cache for 2-step reads from host environment. pub result_buffer: MutHostRef<'a, &'a Option>>, + /// Storage for byte buffer values yielded from the guest. + pub yielded_value: MutHostRef<'a, &'a Option>>, /// The storage keys that have been changed. Used for calls to `eval`. pub keys_changed: HostRef<'a, &'a BTreeSet>, /// The verifiers whose validity predicates should be triggered. Used for @@ -396,12 +404,12 @@ where in_mem: &InMemory, db: &D, gas_meter: &RefCell, - sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, iterators: &mut PrefixIterators<'a, D>, verifiers: &BTreeSet
, result_buffer: &mut Option>, + yielded_value: &mut Option>, keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, @@ -412,12 +420,12 @@ where in_mem, db, gas_meter, - sentinel, tx, tx_index, iterators, verifiers, result_buffer, + yielded_value, keys_changed, eval_runner, #[cfg(feature = "wasm-runtime")] @@ -470,12 +478,12 @@ where in_mem: &InMemory, db: &D, gas_meter: &RefCell, - sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, iterators: &mut PrefixIterators<'a, D>, verifiers: &BTreeSet
, result_buffer: &mut Option>, + yielded_value: &mut Option>, keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, @@ -488,9 +496,9 @@ where let tx_index = unsafe { HostRef::new(tx_index) }; let iterators = unsafe { MutHostRef::new(iterators) }; let gas_meter = unsafe { HostRef::new(gas_meter) }; - let sentinel = unsafe { HostRef::new(sentinel) }; let verifiers = unsafe { HostRef::new(verifiers) }; let result_buffer = unsafe { MutHostRef::new(result_buffer) }; + let yielded_value = unsafe { MutHostRef::new(yielded_value) }; let keys_changed = unsafe { HostRef::new(keys_changed) }; let eval_runner = unsafe { HostRef::new(eval_runner) }; #[cfg(feature = "wasm-runtime")] @@ -502,11 +510,11 @@ where in_mem, iterators, gas_meter, - sentinel, tx, tx_index, eval_runner, result_buffer, + yielded_value, keys_changed, verifiers, #[cfg(feature = "wasm-runtime")] @@ -522,23 +530,18 @@ where let db = unsafe { self.db.get() }; let in_mem = unsafe { self.in_mem.get() }; let gas_meter = unsafe { self.gas_meter.get() }; - let sentinel = unsafe { self.sentinel.get() }; VpHostEnvState { write_log, db, in_mem, gas_meter, - sentinel, } } - /// Use gas meter and sentinel - pub fn gas_meter_and_sentinel( - &self, - ) -> (&RefCell, &RefCell) { + /// Use gas meter + pub fn gas_meter(&self) -> &RefCell { let gas_meter = unsafe { self.gas_meter.get() }; - let sentinel = unsafe { self.sentinel.get() }; - (gas_meter, sentinel) + gas_meter } } @@ -557,11 +560,11 @@ where in_mem: self.in_mem.clone(), iterators: self.iterators.clone(), gas_meter: self.gas_meter.clone(), - sentinel: self.sentinel.clone(), tx: self.tx.clone(), tx_index: self.tx_index.clone(), eval_runner: self.eval_runner.clone(), result_buffer: self.result_buffer.clone(), + yielded_value: self.yielded_value.clone(), keys_changed: self.keys_changed.clone(), verifiers: self.verifiers.clone(), #[cfg(feature = "wasm-runtime")] @@ -608,8 +611,8 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, used_gas, sentinel) + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, used_gas) } /// Storage `has_key` function exposed to the wasm VM Tx environment. It will @@ -683,6 +686,57 @@ where } } +/// Read temporary value (not committed to storage) from the given key function +/// exposed to the wasm VM Tx environment. It will try to read from the write +/// log only. +/// +/// Returns `-1` when the key is not present, or the length of the data when +/// the key is present (the length may be `0`). +pub fn tx_read_temp( + env: &TxVmEnv, + key_ptr: u64, + key_len: u64, +) -> TxResult +where + MEM: VmMemory, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, +{ + let (key, gas) = env + .memory + .read_string(key_ptr, key_len as _) + .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; + tx_charge_gas::(env, gas)?; + + tracing::debug!("tx_read {}, key {}", key, key_ptr,); + + let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + + let write_log = unsafe { env.ctx.write_log.get() }; + let (log_val, gas) = write_log.read(&key); + tx_charge_gas::(env, gas)?; + let value = match log_val { + Some(write_log::StorageModification::Temp { ref value }) => { + Ok(Some(value.clone())) + } + None => Ok(None), + _ => Err(TxRuntimeError::ReadPermanentValueError), + }?; + match value { + Some(value) => { + let len: i64 = value + .len() + .try_into() + .map_err(TxRuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) + } + None => Ok(HostEnvResult::Fail.to_i64()), + } +} + /// This function is a helper to handle the first step of reading var-len /// values from the host. /// @@ -796,19 +850,9 @@ where // a VP of a new account doesn't need to be iterated continue; } - Some(write_log::StorageModification::Temp { ref value }) => { - let key_val = borsh::to_vec(&KeyVal { - key, - val: value.clone(), - }) - .map_err(TxRuntimeError::EncodingError)?; - let len: i64 = key_val - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(key_val); - return Ok(len); + Some(write_log::StorageModification::Temp { .. }) => { + // temporary values are not returned by the iterator + continue; } None => { let key_val = borsh::to_vec(&KeyVal { key, val }) @@ -938,25 +982,16 @@ where continue; } let vp_key = Key::validity_predicate(&addr); - let (vp, gas) = state.write_log().read(&vp_key); - tx_charge_gas::(env, gas)?; - // just check the existence because the write log should not have the - // delete log of the VP - if vp.is_none() { - let (is_present, gas) = state - .db_has_key(&vp_key) - .map_err(TxRuntimeError::StateError)?; - tx_charge_gas::(env, gas)?; - if !is_present { - tracing::info!( - "Trying to write into storage with a key containing an \ - address that doesn't exist: {}", - addr - ); - return Err(TxRuntimeError::UnknownAddressStorageModification( - addr, - )); - } + let is_present = state.has_key(&vp_key)?; + if !is_present { + tracing::info!( + "Trying to write into storage with a key containing an \ + address that doesn't exist: {}", + addr + ); + return Err(TxRuntimeError::UnknownAddressStorageModification( + addr, + )); } } Ok(()) @@ -1073,14 +1108,14 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; // try to read from the storage let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; let state = env.state(); - let value = vp_host_fns::read_pre(gas_meter, &state, &key, sentinel)?; + let value = vp_host_fns::read_pre(gas_meter, &state, &key)?; tracing::debug!( "vp_read_pre addr {}, key {}, value {:?}", unsafe { env.ctx.address.get() }, @@ -1123,8 +1158,8 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_read_post {}, key {}", key, key_ptr,); @@ -1132,7 +1167,7 @@ where let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; let state = env.state(); - let value = vp_host_fns::read_post(gas_meter, &state, &key, sentinel)?; + let value = vp_host_fns::read_post(gas_meter, &state, &key)?; Ok(match value { Some(value) => { let len: i64 = value @@ -1168,8 +1203,8 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_read_temp {}, key {}", key, key_ptr); @@ -1177,7 +1212,7 @@ where let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; let state = env.state(); - let value = vp_host_fns::read_temp(gas_meter, &state, &key, sentinel)?; + let value = vp_host_fns::read_temp(gas_meter, &state, &key)?; Ok(match value { Some(value) => { let len: i64 = value @@ -1219,8 +1254,8 @@ where .memory .write_bytes(result_ptr, value) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel) + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas) } /// Storage `has_key` in prior state (before tx execution) function exposed to @@ -1241,15 +1276,15 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_has_key_pre {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; let state = env.state(); - let present = vp_host_fns::has_key_pre(gas_meter, &state, &key, sentinel)?; + let present = vp_host_fns::has_key_pre(gas_meter, &state, &key)?; Ok(HostEnvResult::from(present).to_i64()) } @@ -1272,15 +1307,15 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_has_key_post {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; let state = env.state(); - let present = vp_host_fns::has_key_post(gas_meter, &state, &key, sentinel)?; + let present = vp_host_fns::has_key_post(gas_meter, &state, &key)?; Ok(HostEnvResult::from(present).to_i64()) } @@ -1304,8 +1339,8 @@ where .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_iter_prefix_pre {}", prefix); @@ -1314,9 +1349,7 @@ where let write_log = unsafe { env.ctx.write_log.get() }; let db = unsafe { env.ctx.db.get() }; - let iter = vp_host_fns::iter_prefix_pre( - gas_meter, write_log, db, &prefix, sentinel, - )?; + let iter = vp_host_fns::iter_prefix_pre(gas_meter, write_log, db, &prefix)?; let iterators = unsafe { env.ctx.iterators.get() }; Ok(iterators.insert(iter).id()) @@ -1342,8 +1375,8 @@ where .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_iter_prefix_post {}", prefix); @@ -1352,9 +1385,8 @@ where let write_log = unsafe { env.ctx.write_log.get() }; let db = unsafe { env.ctx.db.get() }; - let iter = vp_host_fns::iter_prefix_post( - gas_meter, write_log, db, &prefix, sentinel, - )?; + let iter = + vp_host_fns::iter_prefix_post(gas_meter, write_log, db, &prefix)?; let iterators = unsafe { env.ctx.iterators.get() }; Ok(iterators.insert(iter).id()) @@ -1381,10 +1413,8 @@ where let iterators = unsafe { env.ctx.iterators.get() }; let iter_id = PrefixIteratorId::new(iter_id); if let Some(iter) = iterators.get_mut(iter_id) { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - if let Some((key, val)) = - vp_host_fns::iter_next(gas_meter, iter, sentinel)? - { + let gas_meter = env.ctx.gas_meter(); + if let Some((key, val)) = vp_host_fns::iter_next(gas_meter, iter)? { let key_val = borsh::to_vec(&KeyVal { key, val }) .map_err(vp_host_fns::RuntimeError::EncodingError)?; let len: i64 = key_val @@ -1481,12 +1511,15 @@ where } /// Initialize a new account established address. +#[allow(clippy::too_many_arguments)] pub fn tx_init_account( env: &TxVmEnv, code_hash_ptr: u64, code_hash_len: u64, code_tag_ptr: u64, code_tag_len: u64, + entropy_source_ptr: u64, + entropy_source_len: u64, result_ptr: u64, ) -> TxResult<()> where @@ -1511,6 +1544,12 @@ where tx_validate_vp_code_hash::(env, &code_hash, &code_tag)?; + let (entropy_source, gas) = env + .memory + .read_bytes(entropy_source_ptr, entropy_source_len as _) + .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; + tx_charge_gas::(env, gas)?; + tracing::debug!("tx_init_account"); let code_hash = Hash::try_from(&code_hash[..]) @@ -1518,7 +1557,7 @@ where let mut state = env.state(); let (write_log, in_mem, _db) = state.split_borrow(); let gen = &in_mem.address_gen; - let (addr, gas) = write_log.init_account(gen, code_hash); + let (addr, gas) = write_log.init_account(gen, code_hash, &entropy_source); let addr_bytes = addr.serialize_to_vec(); tx_charge_gas::(env, gas)?; let gas = env @@ -1600,9 +1639,9 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let tx_index = unsafe { env.ctx.tx_index.get() }; - let tx_idx = vp_host_fns::get_tx_index(gas_meter, tx_index, sentinel)?; + let tx_idx = vp_host_fns::get_tx_index(gas_meter, tx_index)?; Ok(tx_idx.0) } @@ -1742,14 +1781,14 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); - let chain_id = vp_host_fns::get_chain_id(gas_meter, &state, sentinel)?; + let chain_id = vp_host_fns::get_chain_id(gas_meter, &state)?; let gas = env .memory .write_string(result_ptr, chain_id) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(gas_meter, gas) } /// Getting the block height function exposed to the wasm VM VP @@ -1765,9 +1804,9 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); - let height = vp_host_fns::get_block_height(gas_meter, &state, sentinel)?; + let height = vp_host_fns::get_block_height(gas_meter, &state)?; Ok(height.0) } @@ -1783,12 +1822,12 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); let (header, gas) = StateRead::get_block_header(&state, Some(BlockHeight(height))) .map_err(vp_host_fns::RuntimeError::StorageError)?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(gas_meter, gas)?; Ok(match header { Some(h) => { let value = h.serialize_to_vec(); @@ -1817,14 +1856,14 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); - let hash = vp_host_fns::get_block_hash(gas_meter, &state, sentinel)?; + let hash = vp_host_fns::get_block_hash(gas_meter, &state)?; let gas = env .memory .write_bytes(result_ptr, hash.0) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(gas_meter, gas) } /// Getting the transaction hash function exposed to the wasm VM VP environment. @@ -1839,9 +1878,9 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let tx = unsafe { env.ctx.tx.get() }; - let hash = vp_host_fns::get_tx_code_hash(gas_meter, tx, sentinel)?; + let hash = vp_host_fns::get_tx_code_hash(gas_meter, tx)?; let mut result_bytes = vec![]; if let Some(hash) = hash { result_bytes.push(1); @@ -1853,7 +1892,7 @@ where .memory .write_bytes(result_ptr, result_bytes) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(gas_meter, gas) } /// Getting the block epoch function exposed to the wasm VM VP @@ -1869,9 +1908,9 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); - let epoch = vp_host_fns::get_block_epoch(gas_meter, &state, sentinel)?; + let epoch = vp_host_fns::get_block_epoch(gas_meter, &state)?; Ok(epoch.0) } @@ -1886,10 +1925,9 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); - let pred_epochs = - vp_host_fns::get_pred_epochs(gas_meter, &state, sentinel)?; + let pred_epochs = vp_host_fns::get_pred_epochs(gas_meter, &state)?; let bytes = pred_epochs.serialize_to_vec(); let len: i64 = bytes .len() @@ -1917,8 +1955,8 @@ where .memory .read_string(event_type_ptr, event_type_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; let state = env.state(); let events = vp_host_fns::get_ibc_events(gas_meter, &state, event_type)?; @@ -1932,10 +1970,8 @@ where Ok(len) } -/// Verify a transaction signature -/// TODO: this is just a warkaround to track gas for multiple signature -/// verifications. When the runtime gas meter is implemented, this function can -/// be removed +/// Verify a transaction signature in the host environment for better +/// performance #[allow(clippy::too_many_arguments)] pub fn vp_verify_tx_section_signature( env: &VpVmEnv, @@ -1948,7 +1984,7 @@ pub fn vp_verify_tx_section_signature( threshold: u8, max_signatures_ptr: u64, max_signatures_len: u64, -) -> vp_host_fns::EnvResult +) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1961,8 +1997,8 @@ where .read_bytes(hash_list_ptr, hash_list_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1970,7 +2006,7 @@ where .memory .read_bytes(public_keys_map_ptr, public_keys_map_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(gas_meter, gas)?; let public_keys_map = namada_core::account::AccountPublicKeysMap::try_from_slice( &public_keys_map, @@ -1981,7 +2017,7 @@ where .memory .read_bytes(signer_ptr, signer_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(gas_meter, gas)?; let signer = Address::try_from_slice(&signer) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1989,7 +2025,7 @@ where .memory .read_bytes(max_signatures_ptr, max_signatures_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(gas_meter, gas)?; let max_signatures = Option::::try_from_slice(&max_signatures) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -2003,17 +2039,15 @@ where max_signatures, || gas_meter.borrow_mut().consume(gas::VERIFY_TX_SIG_GAS), ) { - Ok(_) => Ok(HostEnvResult::Success.to_i64()), + Ok(_) => Ok(()), Err(err) => match err { namada_tx::VerifySigError::Gas(inner) => { - sentinel.borrow_mut().set_out_of_gas(); Err(vp_host_fns::RuntimeError::OutOfGas(inner)) } - namada_tx::VerifySigError::InvalidSectionSignature(_) => { - sentinel.borrow_mut().set_invalid_signature(); - Ok(HostEnvResult::Fail.to_i64()) + namada_tx::VerifySigError::InvalidSectionSignature(inner) => { + Err(vp_host_fns::RuntimeError::InvalidSectionSignature(inner)) } - _ => Ok(HostEnvResult::Fail.to_i64()), + err => Err(vp_host_fns::RuntimeError::Erased(err.to_string())), }, } } @@ -2045,7 +2079,7 @@ where // workaround wasm issue. pub fn tx_ibc_execute( env: &TxVmEnv, -) -> TxResult<()> +) -> TxResult where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -2054,20 +2088,51 @@ where { use std::rc::Rc; - use namada_ibc::{CompatibleIbcTxHostEnvState, IbcActions, TransferModule}; + use namada_ibc::{IbcActions, NftTransferModule, TransferModule}; - let tx_data = unsafe { env.ctx.tx.get().data() }.ok_or_else(|| { + tx_charge_gas::(env, IBC_TX_GAS)?; + + let tx = unsafe { env.ctx.tx.get() }; + let tx_data = tx.data().ok_or_else(|| { let sentinel = unsafe { env.ctx.sentinel.get() }; sentinel.borrow_mut().set_invalid_commitment(); TxRuntimeError::MissingTxData })?; - let state = Rc::new(RefCell::new(CompatibleIbcTxHostEnvState(env.state()))); - let mut actions = IbcActions::new(state.clone()); - let module = TransferModule::new(state); - actions.add_transfer_module(module.module_id(), module); - actions.execute(&tx_data)?; + let state = Rc::new(RefCell::new(env.state())); + // Verifier set populated in tx execution + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + // Scoped to drop `verifiers.clone`s after `actions.execute` + let transfer = { + let mut actions = IbcActions::new(state.clone(), verifiers.clone()); + let module = TransferModule::new(state.clone(), verifiers.clone()); + actions.add_transfer_module(module); + let module = NftTransferModule::new(state); + actions.add_transfer_module(module); + actions.execute(&tx_data)? + }; + // NB: There must be no other strong references to this Rc + let verifiers = Rc::into_inner(verifiers) + .expect("There must be only one strong ref to verifiers set") + .into_inner(); + + // Insert all the verifiers from the tx into the verifier set in env + let verifiers_in_env = unsafe { env.ctx.verifiers.get() }; + for addr in verifiers.into_iter() { + tx_charge_gas::( + env, + ESTABLISHED_ADDRESS_BYTES_LEN as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + )?; + verifiers_in_env.insert(addr); + } - Ok(()) + let value = transfer.serialize_to_vec(); + let len: i64 = value + .len() + .try_into() + .map_err(TxRuntimeError::NumConversionError)?; + let result_buffer = unsafe { env.ctx.result_buffer.get() }; + result_buffer.replace(value); + Ok(len) } /// Validate a VP WASM code hash in a tx environment. @@ -2120,18 +2185,11 @@ where // Then check that the corresponding VP code does indeed exist let code_key = Key::wasm_code(&code_hash); - let (result, gas) = state.write_log().read(&code_key); - tx_charge_gas::(env, gas)?; - if result.is_none() { - let (is_present, gas) = state - .db_has_key(&code_key) - .map_err(TxRuntimeError::StateError)?; - tx_charge_gas::(env, gas)?; - if !is_present { - return Err(TxRuntimeError::InvalidVpCodeHash( - "The corresponding VP code doesn't exist".to_string(), - )); - } + let is_present = state.has_key(&code_key)?; + if !is_present { + return Err(TxRuntimeError::InvalidVpCodeHash( + "The corresponding VP code doesn't exist".to_string(), + )); } Ok(()) } @@ -2258,6 +2316,28 @@ where } } +/// Yield a byte array value from the guest. +pub fn tx_yield_value( + env: &TxVmEnv, + buf_ptr: u64, + buf_len: u64, +) -> TxResult<()> +where + MEM: VmMemory, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, +{ + let (value_to_yield, gas) = env + .memory + .read_bytes(buf_ptr, buf_len as _) + .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; + tx_charge_gas::(env, gas)?; + let host_buf = unsafe { env.ctx.yielded_value.get() }; + host_buf.replace(value_to_yield); + Ok(()) +} + /// Evaluate a validity predicate with the given input data. pub fn vp_eval( env: &VpVmEnv<'static, MEM, D, H, EVAL, CA>, @@ -2278,17 +2358,17 @@ where .read_bytes(vp_code_hash_ptr, vp_code_hash_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - // The borrowed `gas_meter` and `sentinel` must be dropped before eval, - // which has to borrow these too. + // The borrowed `gas_meter` must be dropped before eval, + // which has to borrow it too. let tx = { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let gas_meter = env.ctx.gas_meter(); + vp_host_fns::add_gas(gas_meter, gas)?; let (input_data, gas) = env .memory .read_bytes(input_data_ptr, input_data_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(gas_meter, gas)?; let tx: Tx = BorshDeserialize::try_from_slice(&input_data) .map_err(vp_host_fns::RuntimeError::EncodingError)?; tx @@ -2316,16 +2396,15 @@ where EVAL: VpEvaluator, CA: WasmCacheAccess, { - let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); + let gas_meter = env.ctx.gas_meter(); let state = env.state(); - let native_token = - vp_host_fns::get_native_token(gas_meter, &state, sentinel)?; + let native_token = vp_host_fns::get_native_token(gas_meter, &state)?; let native_token_string = native_token.encode(); let gas = env .memory .write_string(result_ptr, native_token_string) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(gas_meter, gas) } /// Log a string from exposed to the wasm VM VP environment. The message will be @@ -2351,6 +2430,29 @@ where Ok(()) } +/// Yield a byte array value from the guest. +pub fn vp_yield_value( + env: &VpVmEnv, + buf_ptr: u64, + buf_len: u64, +) -> vp_host_fns::EnvResult<()> +where + MEM: VmMemory, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, + CA: WasmCacheAccess, +{ + let (value_to_yield, gas) = env + .memory + .read_bytes(buf_ptr, buf_len as _) + .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + vp_host_fns::add_gas(env.ctx.gas_meter(), gas)?; + let host_buf = unsafe { env.ctx.yielded_value.get() }; + host_buf.replace(value_to_yield); + Ok(()) +} + /// A helper module for testing #[cfg(feature = "testing")] pub mod testing { @@ -2369,6 +2471,7 @@ pub mod testing { tx: &Tx, tx_index: &TxIndex, result_buffer: &mut Option>, + yielded_value: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, ) -> TxVmEnv< @@ -2395,6 +2498,7 @@ pub mod testing { tx_index, verifiers, result_buffer, + yielded_value, #[cfg(feature = "wasm-runtime")] vp_wasm_cache, #[cfg(feature = "wasm-runtime")] @@ -2413,6 +2517,7 @@ pub mod testing { tx: &Tx, tx_index: &TxIndex, result_buffer: &mut Option>, + yielded_value: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, ) -> TxVmEnv< @@ -2445,6 +2550,7 @@ pub mod testing { tx_index, verifiers, result_buffer, + yielded_value, #[cfg(feature = "wasm-runtime")] vp_wasm_cache, #[cfg(feature = "wasm-runtime")] @@ -2459,11 +2565,11 @@ pub mod testing { state: &S, iterators: &mut PrefixIterators<'static, ::D>, gas_meter: &RefCell, - sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, verifiers: &BTreeSet
, result_buffer: &mut Option>, + yielded_value: &mut Option>, keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, @@ -2487,12 +2593,12 @@ pub mod testing { state.in_mem(), state.db(), gas_meter, - sentinel, tx, tx_index, iterators, verifiers, result_buffer, + yielded_value, keys_changed, eval_runner, #[cfg(feature = "wasm-runtime")] diff --git a/crates/namada/src/vm/prefix_iter.rs b/crates/namada/src/vm/prefix_iter.rs index 0044e2d17a..2d9ceac4b7 100644 --- a/crates/namada/src/vm/prefix_iter.rs +++ b/crates/namada/src/vm/prefix_iter.rs @@ -1,8 +1,7 @@ //! The storage prefix iterators can be used to iterate over a common prefix of //! storage keys. -use std::collections::HashMap; - +use namada_core::collections::HashMap; use namada_state::PrefixIter; /// A temporary iterators storage, used during a wasm run after which it's diff --git a/crates/namada/src/vm/wasm/compilation_cache/common.rs b/crates/namada/src/vm/wasm/compilation_cache/common.rs index 08f03e0d04..56b31607ca 100644 --- a/crates/namada/src/vm/wasm/compilation_cache/common.rs +++ b/crates/namada/src/vm/wasm/compilation_cache/common.rs @@ -5,7 +5,6 @@ //! `universal` module). use std::collections::hash_map::RandomState; -use std::collections::HashMap; use std::fs; use std::marker::PhantomData; use std::num::NonZeroUsize; @@ -15,6 +14,7 @@ use std::thread::sleep; use std::time::Duration; use clru::{CLruCache, CLruCacheConfig, WeightScale}; +use namada_core::collections::HashMap; use wasmer::{Module, Store}; use wasmer_cache::{FileSystemCache, Hash as CacheHash}; @@ -366,7 +366,7 @@ impl Cache { err ); let mut progress = self.progress.write().unwrap(); - progress.remove(&hash); + progress.swap_remove(&hash); Err(err) } }, @@ -377,7 +377,7 @@ impl Cache { err ); let mut progress = self.progress.write().unwrap(); - progress.remove(&hash); + progress.swap_remove(&hash); Err(err) } } @@ -441,7 +441,7 @@ impl Cache { hash.to_string(), err ); - progress.remove(&hash); + progress.swap_remove(&hash); return Err(err); } }, @@ -453,7 +453,7 @@ impl Cache { hash.to_string(), err ); - progress.remove(&hash); + progress.swap_remove(&hash); return Err(err); } }; diff --git a/crates/namada/src/vm/wasm/host_env.rs b/crates/namada/src/vm/wasm/host_env.rs index 58655037f6..87600662b7 100644 --- a/crates/namada/src/vm/wasm/host_env.rs +++ b/crates/namada/src/vm/wasm/host_env.rs @@ -61,9 +61,8 @@ where "memory" => initial_memory, // Wasm middleware gas injection hook "gas" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_charge_gas), - // Whitelisted gas exposed function, we need two different functions just because of colliding names in the vm_host_env macro to generate implementations - "namada_tx_charge_gas" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_charge_gas), "namada_tx_read" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_read), + "namada_tx_read_temp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_read_temp), "namada_tx_result_buffer" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_result_buffer), "namada_tx_has_key" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_has_key), "namada_tx_write" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_write), @@ -89,6 +88,7 @@ where "namada_tx_set_commitment_sentinel" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_set_commitment_sentinel), "namada_tx_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_verify_tx_section_signature), "namada_tx_update_masp_note_commitment_tree" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_masp_note_commitment_tree), + "namada_tx_yield_value" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_yield_value), }, } } @@ -112,8 +112,6 @@ where "memory" => initial_memory, // Wasm middleware gas injection hook "gas" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_charge_gas), - // Whitelisted gas exposed function, we need two different functions just because of colliding names in the vm_host_env macro to generate implementations - "namada_vp_charge_gas" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_charge_gas), "namada_vp_read_pre" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_read_pre), "namada_vp_read_post" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_read_post), "namada_vp_read_temp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_read_temp), @@ -132,6 +130,7 @@ where "namada_vp_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_epoch), "namada_vp_get_pred_epochs" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_pred_epochs), "namada_vp_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_ibc_events), + "namada_vp_yield_value" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_yield_value), "namada_vp_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_tx_section_signature), "namada_vp_eval" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_eval), "namada_vp_get_native_token" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_native_token), diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index 817e5c6432..1b3d946a8b 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -2,17 +2,19 @@ use std::cell::RefCell; use std::collections::BTreeSet; +use std::error::Error as _; use std::fmt::Debug; use std::marker::PhantomData; +use std::num::NonZeroU32; use borsh::BorshDeserialize; -use namada_core::validity_predicate::VpSentinel; +use namada_core::validity_predicate::VpError; use namada_gas::{GasMetering, TxGasMeter, WASM_MEMORY_PAGE_GAS}; -use namada_state::write_log::StorageModification; -use namada_state::{DBIter, State, StateRead, StorageHasher, DB}; -use namada_tx::data::TxSentinel; +use namada_state::{DBIter, State, StateRead, StorageHasher, StorageRead, DB}; +use namada_tx::data::{TxSentinel, TxType}; use namada_tx::{Commitment, Section, Tx}; -use parity_wasm::elements; +use parity_wasm::elements::Instruction::*; +use parity_wasm::elements::{self, SignExtInstruction}; use thiserror::Error; use wasmer::{BaseTunables, Module, Store}; @@ -22,6 +24,7 @@ use crate::address::Address; use crate::hash::{Error as TxHashError, Hash}; use crate::internal::HostEnvResult; use crate::ledger::gas::VpGasMeter; +use crate::ledger::vp_host_fns; use crate::storage::{Key, TxIndex}; use crate::vm::host_env::{TxVmEnv, VpCtx, VpEvaluator, VpVmEnv}; use crate::vm::prefix_iter::PrefixIterators; @@ -29,16 +32,25 @@ use crate::vm::types::VpInput; use crate::vm::wasm::host_env::{tx_imports, vp_imports}; use crate::vm::wasm::{memory, Cache, CacheName, VpCache}; use crate::vm::{ - validate_untrusted_wasm, WasmCacheAccess, WasmValidationError, + validate_untrusted_wasm, MutHostRef, WasmCacheAccess, WasmValidationError, }; const TX_ENTRYPOINT: &str = "_apply_tx"; const VP_ENTRYPOINT: &str = "_validate_tx"; const WASM_STACK_LIMIT: u32 = u16::MAX as u32; +/// The error type returned by transactions. +// TODO: move this to `core`, to be shared with the wasm vm, +// and make it an `enum` of different variants +type TxError = String; + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { + #[error("VP error: {0}")] + VpError(VpError), + #[error("Transaction error: {0}")] + TxError(TxError), #[error("Missing tx section: {0}")] MissingSection(String), #[error("Memory error: {0}")] @@ -82,13 +94,39 @@ pub enum Error { GasError(String), #[error("Failed type conversion: {0}")] ConversionError(String), - #[error("Invalid transaction signature")] - InvalidTxSignature, + #[error("Storage error: {0}")] + StorageError(String), + #[error("Tx is not allowed in allowlist parameter")] + DisallowedTx, + #[error("Invalid transaction section signature: {0}")] + InvalidSectionSignature(String), } /// Result for functions that may fail pub type Result = std::result::Result; +/// Returns [`Error::DisallowedTx`] when the given tx is a user tx and its code +/// `Hash` is not included in the `tx_allowlist` parameter. +pub fn check_tx_allowed(tx: &Tx, storage: &S) -> Result<()> +where + S: StorageRead, +{ + if let TxType::Wrapper(_) = tx.header().tx_type { + if let Some(code_sec) = tx + .get_section(tx.code_sechash()) + .and_then(|x| Section::code_sec(&x)) + { + if crate::parameters::is_tx_allowed(storage, &code_sec.code.hash()) + .map_err(|e| Error::StorageError(e.to_string()))? + { + return Ok(()); + } + } + return Err(Error::DisallowedTx); + } + Ok(()) +} + /// Execute a transaction code. Returns the set verifiers addresses requested by /// the transaction. #[allow(clippy::too_many_arguments)] @@ -101,7 +139,7 @@ pub fn tx( tx_wasm_cache: &mut TxCache, ) -> Result> where - S: StateRead + State, + S: StateRead + State + StorageRead, CA: 'static + WasmCacheAccess, { let tx_code = tx @@ -109,6 +147,11 @@ where .and_then(|x| Section::code_sec(x.as_ref())) .ok_or(Error::MissingSection(tx.code_sechash().to_string()))?; + // Check if the tx code is allowed (to be done after the check on the code + // section commitment to let the replay protection mechanism run some + // optimizations) + check_tx_allowed(tx, state)?; + // If the transaction code has a tag, ensure that the tag hash equals the // transaction code's hash. if let Some(tag) = &tx_code.tag { @@ -147,6 +190,7 @@ where PrefixIterators::default(); let mut verifiers = BTreeSet::new(); let mut result_buffer: Option> = None; + let mut yielded_value: Option> = None; let sentinel = RefCell::new(TxSentinel::default()); let (write_log, in_mem, db) = state.split_borrow(); @@ -162,6 +206,7 @@ where tx_index, &mut verifiers, &mut result_buffer, + &mut yielded_value, vp_wasm_cache, tx_wasm_cache, ); @@ -189,12 +234,12 @@ where .exports .get_function(TX_ENTRYPOINT) .map_err(Error::MissingModuleEntrypoint)? - .native::<(u64, u64), ()>() + .native::<(u64, u64), u64>() .map_err(|error| Error::UnexpectedModuleEntrypointInterface { entrypoint: TX_ENTRYPOINT, error, })?; - apply_tx.call(tx_data_ptr, tx_data_len).map_err(|err| { + let ok = apply_tx.call(tx_data_ptr, tx_data_len).map_err(|err| { tracing::debug!("Tx WASM failed with {}", err); match *sentinel.borrow() { TxSentinel::None => Error::RuntimeError(err), @@ -205,7 +250,28 @@ where } })?; - Ok(verifiers) + if ok == 1 { + Ok(verifiers) + } else { + // NB: drop imports so we can safely access the + // `&mut` ptrs we shared with the guest + _ = (instance, imports); + + let err = yielded_value.take().map_or_else( + || Ok("Execution ended abruptly with an unknown error".to_owned()), + |borsh_encoded_err| { + let tx_err = TxError::try_from_slice(&borsh_encoded_err) + .map_err(|e| Error::ConversionError(e.to_string()))?; + Ok(tx_err) + }, + )?; + + Err(match *sentinel.borrow() { + TxSentinel::None => Error::TxError(err), + TxSentinel::OutOfGas => Error::GasError(err), + TxSentinel::InvalidCommitment => Error::MissingSection(err), + }) + } } /// Execute a validity predicate code. Returns whether the validity @@ -222,7 +288,7 @@ pub fn vp( keys_changed: &BTreeSet, verifiers: &BTreeSet
, mut vp_wasm_cache: VpCache, -) -> Result +) -> Result<()> where S: StateRead, CA: 'static + WasmCacheAccess, @@ -238,13 +304,13 @@ where let mut iterators: PrefixIterators<'_, ::D> = PrefixIterators::default(); let mut result_buffer: Option> = None; + let mut yielded_value: Option> = None; let eval_runner = VpEvalWasm::<::D, ::H, CA> { db: PhantomData, hasher: PhantomData, cache_access: PhantomData, }; - let sentinel = RefCell::new(VpSentinel::default()); let env = VpVmEnv::new( WasmMemory::default(), address, @@ -252,12 +318,12 @@ where state.in_mem(), state.db(), gas_meter, - &sentinel, tx, tx_index, &mut iterators, verifiers, &mut result_buffer, + &mut yielded_value, keys_changed, &eval_runner, &mut vp_wasm_cache, @@ -265,9 +331,10 @@ where let initial_memory = memory::prepare_vp_memory(&store).map_err(Error::MemoryError)?; + let yielded_value_borrow = env.ctx.yielded_value.clone(); let imports = vp_imports(&store, initial_memory, env); - match run_vp( + run_vp( module, imports, &vp_code_hash, @@ -275,46 +342,21 @@ where address, keys_changed, verifiers, - ) { - Ok(accept) => { - if sentinel.borrow().is_invalid_signature() { - if accept { - // This is unexpected, if the signature is invalid the vp - // should have rejected the tx. Something must be wrong with - // the VP logic and we take the signature verification - // result as the reference. In this case we override the vp - // result and log the issue - tracing::warn!( - "VP of {address} accepted the transaction but \ - signaled that the signature was invalid. Overriding \ - the vp result to reject the transaction..." - ); - } - Err(Error::InvalidTxSignature) - } else { - Ok(accept) - } - } - Err(err) => { - if sentinel.borrow().is_out_of_gas() { - Err(Error::GasError(err.to_string())) - } else { - Err(err) - } - } - } + yielded_value_borrow, + ) } #[allow(clippy::too_many_arguments)] fn run_vp( module: wasmer::Module, vp_imports: wasmer::ImportObject, - _vp_code_hash: &Hash, + vp_code_hash: &Hash, input_data: &Tx, address: &Address, keys_changed: &BTreeSet, verifiers: &BTreeSet
, -) -> Result { + yielded_value: MutHostRef<'_, &'_ Option>>, +) -> Result<()> { let input: VpInput = VpInput { addr: address, data: input_data, @@ -364,9 +406,48 @@ fn run_vp( verifiers_ptr, verifiers_len, ) - .map_err(Error::RuntimeError)?; - tracing::debug!("is_valid {}", is_valid); - Ok(is_valid == 1) + .map_err(|rt_error| { + let downcasted_err = || { + let source_err = rt_error.source()?; + let downcasted_vp_rt_err: &vp_host_fns::RuntimeError = + source_err.downcast_ref()?; + + match downcasted_vp_rt_err { + vp_host_fns::RuntimeError::OutOfGas(_) => { + Some(Error::GasError(rt_error.to_string())) + } + vp_host_fns::RuntimeError::InvalidSectionSignature(_) => { + Some(Error::InvalidSectionSignature( + rt_error.to_string(), + )) + } + _ => None, + } + }; + downcasted_err().unwrap_or(Error::RuntimeError(rt_error)) + })?; + tracing::debug!( + is_valid, + %vp_code_hash, + "wasm vp" + ); + + if is_valid == 1 { + Ok(()) + } else { + // NB: drop imports so we can safely access the + // `&mut` ptrs we shared with the guest + _ = (instance, vp_imports); + + unsafe { yielded_value.get() }.take().map_or_else( + || Err(Error::VpError(VpError::Unspecified)), + |borsh_encoded_err| { + let vp_err = VpError::try_from_slice(&borsh_encoded_err) + .map_err(|e| Error::ConversionError(e.to_string()))?; + Err(Error::VpError(vp_err)) + }, + ) + } } /// Validity predicate wasm evaluator for `eval` host function calls. @@ -402,13 +483,14 @@ where vp_code_hash: Hash, input_data: Tx, ) -> HostEnvResult { - match self.eval_native_result(ctx, vp_code_hash, input_data) { - Ok(ok) => HostEnvResult::from(ok), - Err(err) => { - tracing::warn!("VP eval error {}", err); - HostEnvResult::Fail - } - } + self.eval_native_result(ctx, vp_code_hash, input_data) + .map_or_else( + |err| { + tracing::warn!("VP eval error {err}"); + HostEnvResult::Fail + }, + |()| HostEnvResult::Success, + ) } } @@ -424,7 +506,7 @@ where ctx: VpCtx<'static, D, H, Self, CA>, vp_code_hash: Hash, input_data: Tx, - ) -> Result { + ) -> Result<()> { let address = unsafe { ctx.address.get() }; let keys_changed = unsafe { ctx.keys_changed.get() }; let verifiers = unsafe { ctx.verifiers.get() }; @@ -446,6 +528,7 @@ where memory: WasmMemory::default(), ctx, }; + let yielded_value_borrow = env.ctx.yielded_value.clone(); let imports = vp_imports(&store, initial_memory, env); run_vp( @@ -456,6 +539,7 @@ where address, keys_changed, verifiers, + yielded_value_borrow, ) } } @@ -479,7 +563,7 @@ pub fn prepare_wasm_code>(code: T) -> Result> { wasm_instrument::gas_metering::host_function::Injector::new( "env", "gas", ), - &get_gas_rules(), + &GasRules, ) .map_err(|_original_module| Error::GasMeterInjection)?; let module = @@ -503,83 +587,56 @@ where { match code_or_hash { Commitment::Hash(code_hash) => { - let (module, store, tx_len) = match wasm_cache.fetch(code_hash)? { - Some((module, store)) => { - // Gas accounting even if the compiled module is in cache - let key = Key::wasm_code_len(code_hash); - let tx_len = match state.write_log().read(&key).0 { - Some(StorageModification::Write { value }) => { - u64::try_from_slice(value).map_err(|e| { - Error::ConversionError(e.to_string()) - }) - } - _ => match state - .db_read(&key) - .map_err(|e| { - Error::LoadWasmCode(format!( - "Read wasm code length failed from \ - storage: key {}, error {}", - key, e - )) - })? - .0 - { - Some(v) => u64::try_from_slice(&v).map_err(|e| { - Error::ConversionError(e.to_string()) - }), - None => Err(Error::LoadWasmCode(format!( - "No wasm code length in storage: key {}", - key - ))), - }, - }?; - - (module, store, tx_len) - } + let code_len_key = Key::wasm_code_len(code_hash); + let tx_len = state + .read::(&code_len_key) + .map_err(|e| { + Error::LoadWasmCode(format!( + "Read wasm code length failed: key {code_len_key}, \ + error {e}" + )) + })? + .ok_or_else(|| { + Error::LoadWasmCode(format!( + "No wasm code length in storage: key {code_len_key}" + )) + })?; + + // Gas accounting in any case, even if the compiled module is in + // cache + gas_meter + .borrow_mut() + .add_wasm_load_from_storage_gas(tx_len) + .map_err(|e| Error::GasError(e.to_string()))?; + gas_meter + .borrow_mut() + .add_compiling_gas(tx_len) + .map_err(|e| Error::GasError(e.to_string()))?; + + let (module, store) = match wasm_cache.fetch(code_hash)? { + Some((module, store)) => (module, store), None => { let key = Key::wasm_code(code_hash); - let code = match state.write_log().read(&key).0 { - Some(StorageModification::Write { value }) => { - value.clone() - } - _ => match state - .db_read(&key) - .map_err(|e| { - Error::LoadWasmCode(format!( - "Read wasm code failed from storage: key \ - {}, error {}", - key, e - )) - })? - .0 - { - Some(v) => v, - None => { - return Err(Error::LoadWasmCode(format!( - "No wasm code in storage: key {}", - key - ))); - } - }, - }; - let tx_len = u64::try_from(code.len()) - .map_err(|e| Error::ConversionError(e.to_string()))?; + let code = state + .read_bytes(&key) + .map_err(|e| { + Error::LoadWasmCode(format!( + "Read wasm code failed: key {key}, error {e}" + )) + })? + .ok_or_else(|| { + Error::LoadWasmCode(format!( + "No wasm code in storage: key {key}" + )) + })?; match wasm_cache.compile_or_fetch(code)? { - Some((module, store)) => (module, store, tx_len), + Some((module, store)) => (module, store), None => return Err(Error::NoCompiledWasmCode), } } }; - gas_meter - .borrow_mut() - .add_wasm_load_from_storage_gas(tx_len) - .map_err(|e| Error::GasError(e.to_string()))?; - gas_meter - .borrow_mut() - .add_compiling_gas(tx_len) - .map_err(|e| Error::GasError(e.to_string()))?; Ok((module, store)) } Commitment::Id(code) => { @@ -602,18 +659,217 @@ where } } -/// Get the gas rules used to meter wasm operations -fn get_gas_rules() -> wasm_instrument::gas_metering::ConstantCostRules { - // NOTE: costs set to 0 don't actually trigger the injection of a call to - // the gas host function (no useless instructions are injected) - let instruction_cost = 0; - let memory_grow_cost = WASM_MEMORY_PAGE_GAS; - let call_per_local_cost = 0; - wasm_instrument::gas_metering::ConstantCostRules::new( - instruction_cost, - memory_grow_cost, - call_per_local_cost, - ) +struct GasRules; + +impl wasm_instrument::gas_metering::Rules for GasRules { + fn instruction_cost( + &self, + instruction: &wasm_instrument::parity_wasm::elements::Instruction, + ) -> Option { + // NOTE: costs set to 0 don't actually trigger the injection of a call + // to the gas host function (no useless instructions are + // injected) + // NOTE: these costs are taken from the benchmarks crate. None of them + // should be zero + let gas = match instruction { + Unreachable => 129_358, + // Just a flag, aribitrary cost of 1 + End => 1, + // Just a flag, aribitrary cost of 1 + Else => 1, + Nop => 1, + Block(_) => 1, + Loop(_) => 1, + If(_) => 4, + Br(_) => 27, + BrIf(_) => 36, + BrTable(_) => 70, + Return => 7, + Call(_) => 43, + CallIndirect(_, _) => 140, + Drop => 1, + Select => 37, + GetLocal(_) => 2, + SetLocal(_) => 2, + TeeLocal(_) => 2, + GetGlobal(_) => 3, + SetGlobal(_) => 4, + I32Load(_, _) => 5, + I64Load(_, _) => 5, + F32Load(_, _) => 6, + F64Load(_, _) => 6, + I32Load8S(_, _) => 5, + I32Load8U(_, _) => 5, + I32Load16S(_, _) => 5, + I32Load16U(_, _) => 5, + I64Load8S(_, _) => 5, + I64Load8U(_, _) => 5, + I64Load16S(_, _) => 5, + I64Load16U(_, _) => 5, + I64Load32S(_, _) => 5, + I64Load32U(_, _) => 5, + I32Store(_, _) => 5, + I64Store(_, _) => 7, + F32Store(_, _) => 5, + F64Store(_, _) => 6, + I32Store8(_, _) => 5, + I32Store16(_, _) => 15, + I64Store8(_, _) => 5, + I64Store16(_, _) => 15, + I64Store32(_, _) => 6, + CurrentMemory(_) => 108, + GrowMemory(_) => 394, + I32Const(_) => 1, + I64Const(_) => 1, + F32Const(_) => 1, + F64Const(_) => 1, + I32Eqz => 6, + I32Eq => 6, + I32Ne => 6, + I32LtS => 6, + I32LtU => 6, + I32GtS => 6, + I32GtU => 6, + I32LeS => 6, + I32LeU => 6, + I32GeS => 6, + I32GeU => 6, + I64Eqz => 7, + I64Eq => 7, + I64Ne => 7, + I64LtS => 7, + I64LtU => 7, + I64GtS => 7, + I64GtU => 7, + I64LeS => 7, + I64LeU => 7, + I64GeS => 7, + I64GeU => 7, + F32Eq => 8, + F32Ne => 8, + F32Lt => 8, + F32Gt => 8, + F32Le => 8, + F32Ge => 8, + F64Eq => 10, + F64Ne => 10, + F64Lt => 9, + F64Gt => 9, + F64Le => 9, + F64Ge => 9, + I32Clz => 35, + I32Ctz => 34, + I32Popcnt => 3, + I32Add => 3, + I32Sub => 3, + I32Mul => 5, + I32DivS => 17, + I32DivU => 17, + I32RemS => 41, + I32RemU => 17, + I32And => 3, + I32Or => 3, + I32Xor => 3, + I32Shl => 3, + I32ShrS => 3, + I32ShrU => 3, + I32Rotl => 3, + I32Rotr => 3, + I64Clz => 35, + I64Ctz => 34, + I64Popcnt => 3, + I64Add => 5, + I64Sub => 5, + I64Mul => 6, + I64DivS => 28, + I64DivU => 28, + I64RemS => 46, + I64RemU => 28, + I64And => 5, + I64Or => 5, + I64Xor => 5, + I64Shl => 4, + I64ShrS => 4, + I64ShrU => 4, + I64Rotl => 4, + I64Rotr => 4, + F32Abs => 4, + F32Neg => 3, + F32Ceil => 6, + F32Floor => 6, + F32Trunc => 6, + F32Nearest => 6, + F32Sqrt => 9, + F32Add => 6, + F32Sub => 6, + F32Mul => 6, + F32Div => 9, + F32Min => 50, + F32Max => 47, + F32Copysign => 6, + F64Abs => 6, + F64Neg => 4, + F64Ceil => 7, + F64Floor => 7, + F64Trunc => 7, + F64Nearest => 7, + F64Sqrt => 17, + F64Add => 7, + F64Sub => 7, + F64Mul => 7, + F64Div => 12, + F64Min => 52, + F64Max => 49, + F64Copysign => 11, + I32WrapI64 => 2, + I32TruncSF32 => 54, + I32TruncUF32 => 54, + I32TruncSF64 => 57, + I32TruncUF64 => 57, + I64ExtendSI32 => 2, + I64ExtendUI32 => 2, + I64TruncSF32 => 73, + I64TruncUF32 => 70, + I64TruncSF64 => 89, + I64TruncUF64 => 70, + F32ConvertSI32 => 12, + F32ConvertUI32 => 6, + F32ConvertSI64 => 6, + F32ConvertUI64 => 39, + F32DemoteF64 => 9, + F64ConvertSI32 => 12, + F64ConvertUI32 => 12, + F64ConvertSI64 => 12, + F64ConvertUI64 => 39, + F64PromoteF32 => 9, + I32ReinterpretF32 => 2, + I64ReinterpretF64 => 2, + F32ReinterpretI32 => 3, + F64ReinterpretI64 => 3, + SignExt(SignExtInstruction::I32Extend8S) => 1, + SignExt(SignExtInstruction::I32Extend16S) => 1, + SignExt(SignExtInstruction::I64Extend8S) => 1, + SignExt(SignExtInstruction::I64Extend16S) => 1, + SignExt(SignExtInstruction::I64Extend32S) => 1, + }; + + // We always return a cost, forbidden instructions should be rejected at + // validation time not here + Some(gas) + } + + fn memory_grow_cost( + &self, + ) -> wasm_instrument::gas_metering::MemoryGrowCost { + wasm_instrument::gas_metering::MemoryGrowCost::Linear( + NonZeroU32::new(WASM_MEMORY_PAGE_GAS) + .expect("Memory grow gas cost should be non-zero"), + ) + } + + fn call_per_local_cost(&self) -> u32 { + 1 + } } #[cfg(test)] @@ -624,7 +880,8 @@ mod tests { use itertools::Either; use namada_state::StorageWrite; use namada_test_utils::TestWasms; - use namada_tx::data::TxType; + use namada_token::DenominatedAmount; + use namada_tx::data::{Fee, TxType}; use namada_tx::{Code, Data}; use test_log::test; use wasmer_vm::TrapCode; @@ -635,7 +892,8 @@ mod tests { use crate::vm::host_env::TxRuntimeError; use crate::vm::wasm; - const TX_GAS_LIMIT: u64 = 10_000_000_000; + const TX_GAS_LIMIT: u64 = 10_000_000_000_000; + const OUT_OF_GAS_LIMIT: u64 = 10_000; /// Test that we sanitize accesses to invalid addresses in wasm memory. #[test] @@ -644,11 +902,10 @@ mod tests { r#" (module (import "env" "namada_tx_read" (func (param i64 i64) (result i64))) - (func (param i64 i64) + (func (param i64 i64) (result i64) i64.const 18446744073709551615 i64.const 1 (call 0) - drop ) (memory 16) (export "memory" (memory 0)) @@ -858,19 +1115,20 @@ mod tests { let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // When the `eval`ed VP doesn't run out of memory, it should return // `true` - let passed = vp( - code_hash, - &outer_tx, - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache.clone(), - ) - .unwrap(); - assert!(passed); + assert!( + vp( + code_hash, + &outer_tx, + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache.clone(), + ) + .is_ok() + ); // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail @@ -889,20 +1147,20 @@ mod tests { // When the `eval`ed VP runs out of memory, its result should be // `false`, hence we should also get back `false` from the VP that // called `eval`. - let passed = vp( - code_hash, - &outer_tx, - &tx_index, - &addr, - &state, - &gas_meter, - &keys_changed, - &verifiers, - vp_cache, - ) - .unwrap(); - - assert!(!passed); + assert!( + vp( + code_hash, + &outer_tx, + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache, + ) + .is_err() + ); } /// Test that when a validity predicate wasm goes over the memory limit @@ -1268,7 +1526,195 @@ mod tests { outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); - let passed = vp( + assert!( + vp( + code_hash, + &outer_tx, + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache, + ) + .is_err() + ); + } + + #[test] + fn test_apply_wasm_tx_allowlist() { + let mut state = TestState::default(); + + let tx_read_key = TestWasms::TxReadStorageKey.read_bytes(); + // store the wasm code + let read_code_hash = Hash::sha256(&tx_read_key); + let code_len = (tx_read_key.len() as u64).serialize_to_vec(); + let key = Key::wasm_code(&read_code_hash); + let len_key = Key::wasm_code_len(&read_code_hash); + state.write_bytes(&key, tx_read_key).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); + + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); + let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new( + namada_tx::data::WrapperTx::new( + Fee { + amount_per_gas_unit: DenominatedAmount::native(1.into()), + token: state.in_mem().native_token.clone(), + }, + namada_core::key::testing::common_sk_from_simple_seed(0) + .to_public(), + namada_state::Epoch(0), + 0.into(), + None, + ), + ))); + tx.add_code_from_hash(read_code_hash, None); + wrapper_tx.add_code_from_hash(read_code_hash, None); + tx.add_serialized_data(vec![]); + wrapper_tx.add_serialized_data(vec![]); + + // Check that using a disallowed wrapper tx leads to an error, but a raw + // tx is ok even if not allowlisted + { + let allowlist = vec![format!("{}-bad", read_code_hash)]; + crate::parameters::update_tx_allowlist_parameter( + &mut state, allowlist, + ) + .unwrap(); + state.commit_tx(); + + let result = check_tx_allowed(&wrapper_tx, &state); + assert_matches!(result.unwrap_err(), Error::DisallowedTx); + let result = check_tx_allowed(&tx, &state); + if let Err(result) = result { + assert!(!matches!(result, Error::DisallowedTx)); + } + } + + // Check that using an allowed wrapper tx doesn't lead to + // `Error::DisallowedTx` + { + let allowlist = vec![read_code_hash.to_string()]; + crate::parameters::update_tx_allowlist_parameter( + &mut state, allowlist, + ) + .unwrap(); + state.commit_tx(); + + let result = check_tx_allowed(&wrapper_tx, &state); + if let Err(result) = result { + assert!(!matches!(result, Error::DisallowedTx)); + } + } + } + + /// Test that when a function runs out of gas in guest, the execution is + /// aborted + #[test] + fn test_tx_out_of_gas_in_guest() { + let mut state = TestState::default(); + let gas_meter = RefCell::new(TxGasMeter::new_from_sub_limit( + OUT_OF_GAS_LIMIT.into(), + )); + let tx_index = TxIndex::default(); + + // This code will charge gas in a host function indefinetely + let tx_code = TestWasms::TxInfiniteGuestGas.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_code); + let key = Key::wasm_code(&code_hash); + let len_key = Key::wasm_code_len(&code_hash); + let code_len = (tx_code.len() as u64).serialize_to_vec(); + state.write_log_mut().write(&key, tx_code.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); + + let (mut vp_cache, _) = + wasm::compilation_cache::common::testing::cache(); + let (mut tx_cache, _) = + wasm::compilation_cache::common::testing::cache(); + let mut outer_tx = Tx::from_type(TxType::Raw); + outer_tx.set_code(Code::new(tx_code.clone(), None)); + outer_tx.set_data(Data::new(vec![])); + let result = tx( + &mut state, + &gas_meter, + &tx_index, + &outer_tx, + &mut vp_cache, + &mut tx_cache, + ); + + assert!(matches!(result.unwrap_err(), Error::GasError(_))); + } + + /// Test that when a function runs out of gas in host, the execution is + /// aborted from the host env (no cooperation required by the guest). + #[test] + fn test_tx_out_of_gas_in_host() { + let mut state = TestState::default(); + let gas_meter = RefCell::new(TxGasMeter::new_from_sub_limit( + OUT_OF_GAS_LIMIT.into(), + )); + let tx_index = TxIndex::default(); + + // This code will charge gas in a host function indefinetely + let tx_code = TestWasms::TxInfiniteHostGas.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_code); + let key = Key::wasm_code(&code_hash); + let len_key = Key::wasm_code_len(&code_hash); + let code_len = (tx_code.len() as u64).serialize_to_vec(); + state.write_log_mut().write(&key, tx_code.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); + + let (mut vp_cache, _) = + wasm::compilation_cache::common::testing::cache(); + let (mut tx_cache, _) = + wasm::compilation_cache::common::testing::cache(); + let mut outer_tx = Tx::from_type(TxType::Raw); + outer_tx.set_code(Code::new(tx_code.clone(), None)); + outer_tx.set_data(Data::new(vec![])); + let result = tx( + &mut state, + &gas_meter, + &tx_index, + &outer_tx, + &mut vp_cache, + &mut tx_cache, + ); + + assert!(matches!(result.unwrap_err(), Error::GasError(_))); + } + + /// Test that when a vp runs out of gas in guest, the execution is aborted + #[test] + fn test_vp_out_of_gas_in_guest() { + let mut state = TestState::default(); + let tx_index = TxIndex::default(); + + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(OUT_OF_GAS_LIMIT.into()), + )); + let keys_changed = BTreeSet::new(); + let verifiers = BTreeSet::new(); + + // This code will charge gas in a host function indefinetely + let tx_code = TestWasms::VpInfiniteGuestGas.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_code); + let key = Key::wasm_code(&code_hash); + let len_key = Key::wasm_code_len(&code_hash); + let code_len = (tx_code.len() as u64).serialize_to_vec(); + state.write_log_mut().write(&key, tx_code.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); + + let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); + let mut outer_tx = Tx::from_type(TxType::Raw); + outer_tx.set_code(Code::new(tx_code.clone(), None)); + outer_tx.set_data(Data::new(vec![])); + let result = vp( code_hash, &outer_tx, &tx_index, @@ -1277,10 +1723,53 @@ mod tests { &gas_meter, &keys_changed, &verifiers, - vp_cache, - ) - .unwrap(); - assert!(!passed); + vp_cache.clone(), + ); + + assert!(matches!(result.unwrap_err(), Error::GasError(_))); + } + + /// Test that when a vp runs out of gas in host, the execution is aborted + /// from the host env (no cooperation required by the guest). + #[test] + fn test_vp_out_of_gas_in_host() { + let mut state = TestState::default(); + let tx_index = TxIndex::default(); + + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(OUT_OF_GAS_LIMIT.into()), + )); + let keys_changed = BTreeSet::new(); + let verifiers = BTreeSet::new(); + + // This code will charge gas in a host function indefinetely + let tx_code = TestWasms::VpInfiniteHostGas.read_bytes(); + // store the wasm code + let code_hash = Hash::sha256(&tx_code); + let key = Key::wasm_code(&code_hash); + let len_key = Key::wasm_code_len(&code_hash); + let code_len = (tx_code.len() as u64).serialize_to_vec(); + state.write_log_mut().write(&key, tx_code.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); + + let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); + let mut outer_tx = Tx::from_type(TxType::Raw); + outer_tx.set_code(Code::new(tx_code.clone(), None)); + outer_tx.set_data(Data::new(vec![])); + let result = vp( + code_hash, + &outer_tx, + &tx_index, + &addr, + &state, + &gas_meter, + &keys_changed, + &verifiers, + vp_cache.clone(), + ); + + assert!(matches!(result.unwrap_err(), Error::GasError(_))); } fn execute_tx_with_code(tx_code: Vec) -> Result> { @@ -1324,27 +1813,25 @@ mod tests { format!( r#" (module - (type (;0;) (func (param i64 i64))) + (type (;0;) (func (param i64 i64) (result i64))) ;; recursive loop, the param is the number of loops (func $loop (param i64) (result i64) (if (result i64) (i64.eqz (get_local 0)) - (then (get_local 0)) + (then (i64.const 1)) (else (call $loop (i64.sub (get_local 0) (i64.const 1)))))) - (func $_apply_tx (type 0) (param i64 i64) - (call $loop (i64.const {})) - drop) + (func $_apply_tx (type 0) (param i64 i64) (result i64) + (call $loop (i64.const {loops}))) (table (;0;) 1 1 funcref) (memory (;0;) 16) (global (;0;) (mut i32) (i32.const 1048576)) (export "memory" (memory 0)) (export "_apply_tx" (func $_apply_tx))) - "#, - loops + "# ) .as_bytes(), ) @@ -1354,7 +1841,7 @@ mod tests { execute_tx_with_code(tx_code) } - fn loop_in_vp_wasm(loops: u32) -> Result { + fn loop_in_vp_wasm(loops: u32) -> Result<()> { // A validity predicate with a recursive loop. // The boilerplate code is generated from vp_template.wasm using // `wasm2wat` and the loop code is hand-written. @@ -1368,7 +1855,7 @@ mod tests { (if (result i64) (i64.eqz (get_local 0)) - (then (get_local 0)) + (then (i64.const 1)) (else (call $loop (i64.sub (get_local 0) (i64.const 1)))))) (func $_validate_tx (type 0) (param i64 i64 i64 i64 i64 i64 i64 i64) (result i64) diff --git a/crates/parameters/Cargo.toml b/crates/parameters/Cargo.toml index fc212110af..6be22eda9c 100644 --- a/crates/parameters/Cargo.toml +++ b/crates/parameters/Cargo.toml @@ -12,6 +12,13 @@ readme.workspace = true repository.workspace = true version.workspace = true +[features] +default = [] +testing = [ + "namada_core/testing", + "namada_storage/testing", +] + [dependencies] namada_core = { path = "../core" } namada_macros = { path = "../macros" } diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index b0c0a93410..b2ad19e167 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -5,7 +5,6 @@ use std::collections::BTreeMap; use namada_core::address::{Address, InternalAddress}; use namada_core::chain::ProposalBytes; -use namada_core::dec::Dec; use namada_core::hash::Hash; pub use namada_core::parameters::*; use namada_core::storage::Key; @@ -59,11 +58,10 @@ where implicit_vp_code_hash, epochs_per_year, max_signatures_per_transaction, - staked_ratio, - pos_inflation_amount, minimum_gas_price, fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, + is_native_token_transferable, } = parameters; // write max tx bytes parameter @@ -138,15 +136,14 @@ where max_signatures_per_transaction, )?; - let staked_ratio_key = storage::get_staked_ratio_key(); - storage.write(&staked_ratio_key, staked_ratio)?; - - let pos_inflation_key = storage::get_pos_inflation_amount_key(); - storage.write(&pos_inflation_key, pos_inflation_amount)?; - let gas_cost_key = storage::get_gas_cost_key(); storage.write(&gas_cost_key, minimum_gas_price)?; + let native_token_transferable_key = + storage::get_native_token_transferable_key(); + storage + .write(&native_token_transferable_key, is_native_token_transferable)?; + Ok(()) } @@ -238,32 +235,6 @@ where storage.write(&key, value) } -/// Update the PoS staked ratio parameter in storage. Returns the parameters and -/// gas cost. -pub fn update_staked_ratio_parameter( - storage: &mut S, - value: &Dec, -) -> namada_storage::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = storage::get_staked_ratio_key(); - storage.write(&key, value) -} - -/// Update the PoS inflation rate parameter in storage. Returns the parameters -/// and gas cost. -pub fn update_pos_inflation_amount_parameter( - storage: &mut S, - value: &u64, -) -> namada_storage::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = storage::get_pos_inflation_amount_key(); - storage.write(&key, value) -} - /// Update the implicit VP parameter in storage. Return the gas cost. pub fn update_implicit_vp( storage: &mut S, @@ -409,20 +380,6 @@ where .ok_or(ReadError::ParametersMissing) .into_storage_result()?; - // read staked ratio - let staked_ratio_key = storage::get_staked_ratio_key(); - let value = storage.read(&staked_ratio_key)?; - let staked_ratio = value - .ok_or(ReadError::ParametersMissing) - .into_storage_result()?; - - // read PoS inflation rate - let pos_inflation_key = storage::get_pos_inflation_amount_key(); - let value = storage.read(&pos_inflation_key)?; - let pos_inflation_amount = value - .ok_or(ReadError::ParametersMissing) - .into_storage_result()?; - // read gas cost let gas_cost_key = storage::get_gas_cost_key(); let value = storage.read(&gas_cost_key)?; @@ -437,6 +394,13 @@ where .ok_or(ReadError::ParametersMissing) .into_storage_result()?; + let native_token_transferable_key = + storage::get_native_token_transferable_key(); + let value = storage.read(&native_token_transferable_key)?; + let is_native_token_transferable = value + .ok_or(ReadError::ParametersMissing) + .into_storage_result()?; + Ok(Parameters { max_tx_bytes, epoch_duration, @@ -448,11 +412,10 @@ where implicit_vp_code_hash: Some(implicit_vp_code_hash), epochs_per_year, max_signatures_per_transaction, - staked_ratio, - pos_inflation_amount, minimum_gas_price, fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, + is_native_token_transferable, }) } @@ -474,3 +437,31 @@ where pub fn native_erc20_key() -> Key { storage::get_native_erc20_key_at_addr(ADDRESS) } + +/// Initialize parameters to the storage for testing +#[cfg(any(test, feature = "testing"))] +pub fn init_test_storage(storage: &mut S) -> namada_storage::Result<()> +where + S: StorageRead + StorageWrite, +{ + let params = Parameters { + max_tx_bytes: 1024 * 1024, + epoch_duration: EpochDuration { + min_num_of_blocks: 1, + min_duration: DurationSecs(3600), + }, + max_expected_time_per_block: DurationSecs(3600), + max_proposal_bytes: Default::default(), + max_block_gas: 100, + vp_allowlist: vec![], + tx_allowlist: vec![], + implicit_vp_code_hash: Default::default(), + epochs_per_year: 365, + max_signatures_per_transaction: 10, + fee_unshielding_gas_limit: 0, + fee_unshielding_descriptions_limit: 0, + minimum_gas_price: Default::default(), + is_native_token_transferable: true, + }; + init_storage(¶ms, storage) +} diff --git a/crates/parameters/src/storage.rs b/crates/parameters/src/storage.rs index abf3fa743f..b29a5f7594 100644 --- a/crates/parameters/src/storage.rs +++ b/crates/parameters/src/storage.rs @@ -25,11 +25,6 @@ struct Keys { /// Sub-lkey for storing the Ethereum address of the bridge contract. bridge_contract_address: &'static str, // ======================================== - // PoS parameters - // ======================================== - pos_inflation_amount: &'static str, - staked_ratio: &'static str, - // ======================================== // Core parameters // ======================================== epoch_duration: &'static str, @@ -45,6 +40,7 @@ struct Keys { fee_unshielding_gas_limit: &'static str, fee_unshielding_descriptions_limit: &'static str, max_signatures_per_transaction: &'static str, + native_token_transferable: &'static str, } /// Returns if the key is a parameter key. @@ -95,16 +91,6 @@ pub fn is_epochs_per_year_key(key: &Key) -> bool { is_epochs_per_year_key_at_addr(key, &ADDRESS) } -/// Returns if the key is the staked ratio key. -pub fn is_staked_ratio_key(key: &Key) -> bool { - is_staked_ratio_key_at_addr(key, &ADDRESS) -} - -/// Returns if the key is the PoS reward rate key. -pub fn is_pos_inflation_amount_key(key: &Key) -> bool { - is_pos_inflation_amount_key_at_addr(key, &ADDRESS) -} - /// Returns if the key is the max proposal bytes key. pub fn is_max_proposal_bytes_key(key: &Key) -> bool { is_max_proposal_bytes_key_at_addr(key, &ADDRESS) @@ -155,16 +141,6 @@ pub fn get_epochs_per_year_key() -> Key { get_epochs_per_year_key_at_addr(ADDRESS) } -/// Storage key used for staked ratio parameter. -pub fn get_staked_ratio_key() -> Key { - get_staked_ratio_key_at_addr(ADDRESS) -} - -/// Storage key used for the inflation amount parameter. -pub fn get_pos_inflation_amount_key() -> Key { - get_pos_inflation_amount_key_at_addr(ADDRESS) -} - /// Storage key used for the max proposal bytes. pub fn get_max_proposal_bytes_key() -> Key { get_max_proposal_bytes_key_at_addr(ADDRESS) @@ -201,3 +177,20 @@ pub fn get_max_block_gas( ), ) } + +/// Storage key used for the flag to enable the native token transfer +pub fn get_native_token_transferable_key() -> Key { + get_native_token_transferable_key_at_addr(ADDRESS) +} + +/// Helper function to retrieve the `is_native_token_transferable` protocol +/// parameter from storage +pub fn is_native_token_transferable( + storage: &impl StorageRead, +) -> std::result::Result { + storage.read(&get_native_token_transferable_key())?.ok_or( + namada_storage::Error::SimpleMessage( + "Missing is_native_token_transferable parameter from storage", + ), + ) +} diff --git a/crates/proof_of_stake/Cargo.toml b/crates/proof_of_stake/Cargo.toml index ae04753520..f14c3ad49b 100644 --- a/crates/proof_of_stake/Cargo.toml +++ b/crates/proof_of_stake/Cargo.toml @@ -23,6 +23,7 @@ migrations = [ [dependencies] namada_account = { path = "../account" } +namada_controller = { path = "../controller" } namada_core = { path = "../core" } namada_governance = { path = "../governance" } namada_macros = { path = "../macros" } diff --git a/crates/proof_of_stake/src/epoched.rs b/crates/proof_of_stake/src/epoched.rs index 44bf869f49..033f6d6f13 100644 --- a/crates/proof_of_stake/src/epoched.rs +++ b/crates/proof_of_stake/src/epoched.rs @@ -1,12 +1,12 @@ //! [`Epoched`] and [`EpochedDelta`] are structures for data that is set for //! future (and possibly past) epochs. -use std::collections::HashMap; use std::fmt::Debug; use std::marker::PhantomData; use std::{cmp, ops}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::storage::{self, Epoch}; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -447,13 +447,11 @@ where // panic!("WARNING: no data existing in // {new_oldest_epoch}"); } self.set_oldest_epoch(storage, new_oldest_epoch)?; - - // Update the epoch of the last update to the current epoch - let key = self.get_last_update_storage_key(); - storage.write(&key, current_epoch)?; - return Ok(()); } } + // Update the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; Ok(()) } diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 02b9ada912..aa28e63e35 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -24,10 +24,11 @@ mod tests; use core::fmt::Debug; use std::cmp::{self}; -use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; pub use error::*; use namada_core::address::{Address, InternalAddress}; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::event::EmitEvents; use namada_core::key::common; @@ -67,8 +68,10 @@ use crate::storage::{ validator_rewards_products_handle, validator_set_positions_handle, validator_slashes_handle, validator_state_handle, validator_total_redelegated_bonded_handle, - validator_total_redelegated_unbonded_handle, write_last_reward_claim_epoch, - write_pos_params, write_validator_address_raw_hash, write_validator_avatar, + validator_total_redelegated_unbonded_handle, + write_last_pos_inflation_amount, write_last_reward_claim_epoch, + write_last_staked_ratio, write_pos_params, + write_validator_address_raw_hash, write_validator_avatar, write_validator_description, write_validator_discord_handle, write_validator_email, write_validator_max_commission_rate_change, write_validator_metadata, write_validator_website, @@ -113,6 +116,11 @@ where tracing::debug!("Initializing PoS genesis"); write_pos_params(storage, params)?; + // Initialize values for PoS inflation + write_last_staked_ratio(storage, Dec::zero())?; + write_last_pos_inflation_amount(storage, token::Amount::zero())?; + + // Initialize validator set data consensus_validator_set_handle().init(storage, current_epoch)?; below_capacity_validator_set_handle().init(storage, current_epoch)?; validator_set_positions_handle().init(storage, current_epoch)?; @@ -267,11 +275,11 @@ where // Update the validator set // Allow bonding even if the validator is jailed. However, if jailed, there // must be no changes to the validator set. Check at the pipeline epoch. - let is_jailed_or_inactive_at_pipeline = matches!( + let is_jailed_or_inactive_at_offset = matches!( validator_state_handle.get(storage, offset_epoch, ¶ms)?, Some(ValidatorState::Jailed) | Some(ValidatorState::Inactive) ); - if !is_jailed_or_inactive_at_pipeline { + if !is_jailed_or_inactive_at_offset { update_validator_set( storage, ¶ms, @@ -298,6 +306,7 @@ where amount.change(), current_epoch, offset_opt, + !is_jailed_or_inactive_at_offset, )?; Ok(()) @@ -680,6 +689,7 @@ where change_after_slashing, current_epoch, None, + !is_jailed_or_inactive_at_pipeline, )?; if tracing::level_enabled!(tracing::Level::DEBUG) { @@ -2159,6 +2169,7 @@ where amount_after_slashing.change(), current_epoch, None, + !is_jailed_or_inactive_at_pipeline, )?; Ok(()) @@ -2205,7 +2216,8 @@ where // Promote the next below-capacity validator to consensus promote_next_below_capacity_validator_to_consensus( storage, - pipeline_epoch, + current_epoch, + params.pipeline_len, )?; } @@ -2290,8 +2302,8 @@ where validator_state_handle(validator).set( storage, ValidatorState::Jailed, - pipeline_epoch, - 0, + current_epoch, + params.pipeline_len, )?; return Ok(()); } @@ -2485,6 +2497,10 @@ where #[cfg(any(test, feature = "testing"))] /// PoS related utility functions to help set up tests. pub mod test_utils { + use namada_core::chain::ProposalBytes; + use namada_core::hash::Hash; + use namada_core::time::DurationSecs; + use namada_parameters::{init_storage, EpochDuration}; use namada_trans_token::credit_tokens; use super::*; @@ -2568,6 +2584,26 @@ pub mod test_utils { namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(storage)?; let params = read_non_pos_owned_params(storage, owned)?; + let chain_parameters = namada_parameters::Parameters { + max_tx_bytes: 123456789, + epoch_duration: EpochDuration { + min_num_of_blocks: 2, + min_duration: DurationSecs(4), + }, + max_expected_time_per_block: DurationSecs(2), + max_proposal_bytes: ProposalBytes::default(), + max_block_gas: 10000000, + vp_allowlist: vec![], + tx_allowlist: vec![], + implicit_vp_code_hash: Some(Hash::default()), + epochs_per_year: 10000000, + max_signatures_per_transaction: 15, + fee_unshielding_gas_limit: 10000, + fee_unshielding_descriptions_limit: 15, + minimum_gas_price: BTreeMap::new(), + is_native_token_transferable: true, + }; + init_storage(&chain_parameters, storage).unwrap(); init_genesis_helper(storage, ¶ms, validators, current_epoch)?; Ok(params) } @@ -2698,10 +2734,14 @@ where // Remove the validator from the set starting at the update epoch and up // thru the pipeline epoch. - let pipeline_epoch = current_epoch + params.pipeline_len; - for epoch in - Epoch::iter_bounds_inclusive(validator_set_update_epoch, pipeline_epoch) - { + let start = validator_set_update_epoch + .0 + .checked_sub(current_epoch.0) + .unwrap(); // Safe unwrap + let end = params.pipeline_len; + + for offset in start..=end { + let epoch = current_epoch + offset; let prev_state = validator_state_handle(validator) .get(storage, epoch, params)? .expect("Expected to find a valid validator."); @@ -2716,9 +2756,11 @@ where // For the pipeline epoch only: // promote the next max inactive validator to the active // validator set at the pipeline offset - if epoch == pipeline_epoch { + if offset == params.pipeline_len { promote_next_below_capacity_validator_to_consensus( - storage, epoch, + storage, + current_epoch, + offset, )?; } } diff --git a/crates/proof_of_stake/src/parameters.rs b/crates/proof_of_stake/src/parameters.rs index df0be06c71..c35f8fd59b 100644 --- a/crates/proof_of_stake/src/parameters.rs +++ b/crates/proof_of_stake/src/parameters.rs @@ -135,6 +135,9 @@ pub enum ValidationError { UnbondingLenTooShort(u64, u64), } +/// The maximum string length of any validator metadata +pub const MAX_VALIDATOR_METADATA_LEN: u64 = 500; + /// The number of fundamental units per whole token of the native staking token pub const TOKENS_PER_NAM: u64 = 1_000_000; diff --git a/crates/proof_of_stake/src/queries.rs b/crates/proof_of_stake/src/queries.rs index 361f82d258..fb3d2e0c7e 100644 --- a/crates/proof_of_stake/src/queries.rs +++ b/crates/proof_of_stake/src/queries.rs @@ -1,10 +1,11 @@ //! Queriezzz use std::cmp; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use borsh::BorshDeserialize; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; use namada_core::storage::Epoch; use namada_core::token; diff --git a/crates/proof_of_stake/src/rewards.rs b/crates/proof_of_stake/src/rewards.rs index 05bc89613a..6258d1c94d 100644 --- a/crates/proof_of_stake/src/rewards.rs +++ b/crates/proof_of_stake/src/rewards.rs @@ -1,8 +1,8 @@ //! PoS rewards distribution. -use std::collections::{HashMap, HashSet}; - +use namada_controller::PDController; use namada_core::address::{self, Address}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::{self, Amount}; @@ -10,16 +10,18 @@ use namada_core::uint::{Uint, I256}; use namada_parameters::storage as params_storage; use namada_storage::collections::lazy_map::NestedSubKey; use namada_storage::{ResultExt, StorageRead, StorageWrite}; +use namada_trans_token::get_effective_total_native_supply; use thiserror::Error; use crate::storage::{ consensus_validator_set_handle, get_last_reward_claim_epoch, - read_pos_params, read_total_stake, read_validator_stake, - rewards_accumulator_handle, validator_commission_rate_handle, - validator_rewards_products_handle, validator_state_handle, + read_last_pos_inflation_amount, read_last_staked_ratio, read_pos_params, + read_total_stake, read_validator_stake, rewards_accumulator_handle, + validator_commission_rate_handle, validator_rewards_products_handle, + validator_state_handle, write_last_pos_inflation_amount, + write_last_staked_ratio, }; -use crate::token::storage_key::minted_balance_key; -use crate::token::{credit_tokens, inflation}; +use crate::token::credit_tokens; use crate::types::{into_tm_voting_power, BondId, ValidatorState, VoteInfo}; use crate::{ bond_amounts_for_rewards, get_total_consensus_stake, staking_token_address, @@ -48,6 +50,36 @@ pub enum RewardsError { CoeffsNotSet, } +/// Compute PoS inflation amount +#[allow(clippy::too_many_arguments)] +pub fn compute_inflation( + locked_amount: token::Amount, + total_native_amount: token::Amount, + max_reward_rate: Dec, + last_inflation_amount: token::Amount, + p_gain_nom: Dec, + d_gain_nom: Dec, + epochs_per_year: u64, + target_ratio: Dec, + last_ratio: Dec, +) -> namada_storage::Result { + let controller = PDController::new( + total_native_amount.into(), + max_reward_rate, + last_inflation_amount.into(), + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + last_ratio, + ); + let metric = Dec::from(locked_amount) / Dec::from(total_native_amount); + let control_coeff = controller.get_total_native_dec() * max_reward_rate + / controller.get_epochs_per_year(); + let amount_uint = controller.compute_inflation(control_coeff, metric); + token::Amount::from_uint(amount_uint, 0).into_storage_result() +} + /// Holds coefficients for the three different ways to get PoS rewards #[derive(Debug, Copy, Clone)] #[allow(missing_docs)] @@ -309,47 +341,37 @@ where // Read from Parameters storage let epochs_per_year: u64 = storage .read(¶ms_storage::get_epochs_per_year_key())? - .expect("Epochs per year should exist in storage"); - let pos_last_staked_ratio: Dec = storage - .read(¶ms_storage::get_staked_ratio_key())? - .expect("PoS staked ratio should exist in storage"); - let pos_last_inflation_amount: token::Amount = storage - .read(¶ms_storage::get_pos_inflation_amount_key())? - .expect("PoS inflation amount should exist in storage"); + .expect("Epochs per year should exist in parameters storage"); + + let staking_token = staking_token_address(storage); + let total_tokens = get_effective_total_native_supply(storage)?; // Read from PoS storage let params = read_pos_params(storage)?; - let staking_token = staking_token_address(storage); - let pos_p_gain_nom = params.rewards_gain_p; - let pos_d_gain_nom = params.rewards_gain_d; - - let total_tokens: token::Amount = storage - .read(&minted_balance_key(&staking_token))? - .expect("Total NAM balance should exist in storage"); - let pos_locked_supply = read_total_stake(storage, ¶ms, last_epoch)?; - let pos_locked_ratio_target = params.target_staked_ratio; - let pos_max_inflation_rate = params.max_inflation_rate; - - // Run rewards PD controller - let pos_controller = inflation::PosRewardsController { - locked_tokens: pos_locked_supply.raw_amount(), - total_native_tokens: total_tokens.raw_amount(), - locked_ratio_target: pos_locked_ratio_target, - locked_ratio_last: pos_last_staked_ratio, - max_reward_rate: pos_max_inflation_rate, - last_inflation_amount: pos_last_inflation_amount.raw_amount(), - p_gain_nom: pos_p_gain_nom, - d_gain_nom: pos_d_gain_nom, - epochs_per_year, - }; - // Run the rewards controllers - let inflation::PosValsToUpdate { - locked_ratio, - inflation, - } = pos_controller.run(); + let locked_amount = read_total_stake(storage, ¶ms, last_epoch)?; + + let last_staked_ratio = read_last_staked_ratio(storage)? + .expect("Last staked ratio should exist in PoS storage"); + let last_inflation_amount = read_last_pos_inflation_amount(storage)? + .expect("Last inflation amount should exist in PoS storage"); - let inflation = - token::Amount::from_uint(inflation, 0).into_storage_result()?; + let locked_ratio_target = params.target_staked_ratio; + let max_inflation_rate = params.max_inflation_rate; + let p_gain_nom = params.rewards_gain_p; + let d_gain_nom = params.rewards_gain_d; + + // Compute the new inflation + let inflation = compute_inflation( + locked_amount, + total_tokens, + max_inflation_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + locked_ratio_target, + last_staked_ratio, + )?; // Mint inflation and partition rewards among all accounts that earn a // portion of it @@ -365,9 +387,12 @@ where // Write new rewards parameters that will be used for the inflation of // the current new epoch - storage - .write(¶ms_storage::get_pos_inflation_amount_key(), inflation)?; - storage.write(¶ms_storage::get_staked_ratio_key(), locked_ratio)?; + let locked_amount = Dec::from(locked_amount); + let total_amount = Dec::from(total_tokens); + let locked_ratio = locked_amount / total_amount; + + write_last_staked_ratio(storage, locked_ratio)?; + write_last_pos_inflation_amount(storage, inflation)?; Ok(()) } @@ -475,7 +500,7 @@ where credit_tokens( storage, staking_token, - &address::GOV, + &address::PGF, reward_tokens_remaining, )?; } @@ -590,3 +615,278 @@ where let key = storage_key::rewards_counter_key(source, validator); Ok(storage.read::(&key)?.unwrap_or_default()) } + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + #[test] + fn test_inflation_calc_up() { + let locked_amount = token::Amount::native_whole(2_000_000_000); + let total_native_amount = + token::Amount::native_whole(4_000_000_000_u64); + let max_reward_rate = Dec::from_str("0.1").unwrap(); + let p_gain_nom = Dec::from_str("0.1").unwrap(); + let d_gain_nom = Dec::from_str("0.1").unwrap(); + let epochs_per_year = 365; + let target_ratio = Dec::from_str("0.66666666").unwrap(); + + let inflation_0 = compute_inflation( + locked_amount, + total_native_amount, + max_reward_rate, + token::Amount::zero(), + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + Dec::from_str("0.5").unwrap(), + ) + .unwrap(); + let locked_ratio_0 = + Dec::from(locked_amount) / Dec::from(total_native_amount); + + println!( + "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}" + ); + assert_eq!(locked_ratio_0, Dec::from_str("0.5").unwrap()); + assert_eq!(inflation_0, token::Amount::from_u64(18264839452)); + + let locked_amount = locked_amount + inflation_0; + let last_inflation_amount = inflation_0; + let last_locked_ratio = locked_ratio_0; + + let inflation_1 = compute_inflation( + locked_amount, + total_native_amount, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + last_locked_ratio, + ) + .unwrap(); + + // BUG: DIDN'T ADD TO TOTAL AMOUNT + + let locked_ratio_1 = + Dec::from(locked_amount) / Dec::from(total_native_amount); + + println!( + "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}" + ); + assert!(locked_ratio_1 > locked_ratio_0); + assert!(locked_ratio_1 > Dec::from_str("0.5").unwrap()); + assert!(locked_ratio_1 < Dec::from_str("0.51").unwrap()); + assert_eq!(inflation_1, token::Amount::from_u64(36529678904)); + + let locked_amount = locked_amount + inflation_1; + let last_inflation_amount = inflation_1; + let last_locked_ratio = locked_ratio_1; + + let inflation_2 = compute_inflation( + locked_amount, + total_native_amount, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + last_locked_ratio, + ) + .unwrap(); + + let locked_ratio_2 = + Dec::from(locked_amount) / Dec::from(total_native_amount); + println!( + "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", + ); + assert!(locked_ratio_2 > locked_ratio_1); + assert!(locked_ratio_2 > Dec::from_str("0.5").unwrap()); + assert!(locked_ratio_2 < Dec::from_str("0.51").unwrap()); + assert_eq!(inflation_2, token::Amount::from_u64(54794017950)); + } + + #[test] + fn test_inflation_calc_down() { + let locked_amount = token::Amount::native_whole(900_000_000); + let total_native_amount = + token::Amount::native_whole(1_000_000_000_u64); + let max_reward_rate = Dec::from_str("0.1").unwrap(); + let p_gain_nom = Dec::from_str("0.1").unwrap(); + let d_gain_nom = Dec::from_str("0.1").unwrap(); + let epochs_per_year = 365; + let target_ratio = Dec::from_str("0.66666666").unwrap(); + + let inflation_0 = compute_inflation( + locked_amount, + total_native_amount, + max_reward_rate, + token::Amount::native_whole(10_000), + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + Dec::from_str("0.9").unwrap(), + ) + .unwrap(); + let locked_ratio_0 = + Dec::from(locked_amount) / Dec::from(total_native_amount); + + println!( + "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}" + ); + assert_eq!(locked_ratio_0, Dec::from_str("0.9").unwrap()); + assert_eq!(inflation_0, token::Amount::from_u64(3607305753)); + + let locked_amount = locked_amount + inflation_0; + let last_inflation_amount = inflation_0; + let last_locked_ratio = locked_ratio_0; + + let inflation_1 = compute_inflation( + locked_amount, + total_native_amount, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + last_locked_ratio, + ) + .unwrap(); + + // BUG: DIDN'T ADD TO TOTAL AMOUNT + + let locked_ratio_1 = + Dec::from(locked_amount) / Dec::from(total_native_amount); + + println!( + "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}" + ); + assert!(locked_ratio_1 > locked_ratio_0); + assert!(locked_ratio_1 > Dec::from_str("0.9").unwrap()); + assert!(locked_ratio_1 < Dec::from_str("0.91").unwrap()); + assert_eq!(inflation_1, token::Amount::zero()); + + let locked_amount = locked_amount + inflation_1; + let last_inflation_amount = inflation_1; + let last_locked_ratio = locked_ratio_1; + + let inflation_2 = compute_inflation( + locked_amount, + total_native_amount, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + last_locked_ratio, + ) + .unwrap(); + + let locked_ratio_2 = + Dec::from(locked_amount) / Dec::from(total_native_amount); + println!( + "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", + ); + assert_eq!(locked_ratio_2, locked_ratio_1); + assert_eq!(inflation_2, token::Amount::zero()); + } + + #[test] + fn test_pos_inflation_playground() { + let epochs_per_year = 365_u64; + + let init_locked_ratio = Dec::from_str("0.1").unwrap(); + let mut last_locked_ratio = init_locked_ratio; + let total_native_tokens = 1_000_000_000_u64; + let locked_amount = u64::try_from( + (init_locked_ratio * total_native_tokens).to_uint().unwrap(), + ) + .unwrap(); + let mut locked_amount = token::Amount::native_whole(locked_amount); + let mut last_inflation_amount = token::Amount::zero(); + let mut total_native_tokens = + token::Amount::native_whole(total_native_tokens); + + let max_reward_rate = Dec::from_str("0.1").unwrap(); + let target_ratio = Dec::from_str("0.66666666").unwrap(); + let p_gain_nom = Dec::from_str("0.25").unwrap(); + let d_gain_nom = Dec::from_str("0.25").unwrap(); + + let staking_growth = Dec::from_str("0.04").unwrap(); + // let mut do_add = true; + + let num_rounds = 50; + + for round in 0..num_rounds { + let inflation = compute_inflation( + locked_amount, + total_native_tokens, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_ratio, + last_locked_ratio, + ) + .unwrap(); + let locked_ratio = + Dec::from(locked_amount) / Dec::from(total_native_tokens); + + let rate = Dec::from(inflation) * Dec::from(epochs_per_year) + / Dec::from(total_native_tokens); + println!( + "Round {round}: Locked ratio: {locked_ratio}, inflation rate: \ + {rate}", + ); + + last_inflation_amount = inflation; + total_native_tokens += inflation; + last_locked_ratio = locked_ratio; + + // if rate.abs_diff(&controller.max_reward_rate) + // < Dec::from_str("0.01").unwrap() + // { + // controller.locked_tokens = controller.total_tokens; + // } + + let tot_tokens = + Dec::try_from(total_native_tokens.raw_amount()).unwrap(); + let change_staked_tokens = + token::Amount::from(staking_growth * tot_tokens); + + locked_amount = std::cmp::min( + total_native_tokens, + locked_amount + change_staked_tokens, + ); + + // if locked_ratio > Dec::from_str("0.8").unwrap() + // && locked_ratio - controller.locked_ratio_last >= Dec::zero() + // { + // do_add = false; + // } else if locked_ratio < Dec::from_str("0.4").unwrap() + // && locked_ratio - controller.locked_ratio_last < Dec::zero() + // { + // do_add = true; + // } + + // controller.locked_tokens = std::cmp::min( + // if do_add { + // controller.locked_tokens + change_staked_tokens + // } else { + // controller.locked_tokens - change_staked_tokens + // }, + // controller.total_tokens, + // ); + } + } +} diff --git a/crates/proof_of_stake/src/slashing.rs b/crates/proof_of_stake/src/slashing.rs index 513027fe49..3965ff6014 100644 --- a/crates/proof_of_stake/src/slashing.rs +++ b/crates/proof_of_stake/src/slashing.rs @@ -1,10 +1,11 @@ //! Slashing tingzzzz use std::cmp::{self, Reverse}; -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet}; use borsh::BorshDeserialize; use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::key::tm_raw_hash_to_string; use namada_core::storage::{BlockHeight, Epoch}; @@ -159,12 +160,16 @@ where evidence_epoch + params.slash_processing_epoch_offset(); // Add the slash to the list of enqueued slashes to be processed at a later - // epoch - enqueued_slashes_handle() + // epoch. If a slash at the same block height already exists, return early. + let enqueued = enqueued_slashes_handle() .get_data_handler() .at(&processing_epoch) - .at(validator) - .push(storage, slash)?; + .at(validator); + if enqueued.contains(storage, &evidence_block_height)? { + return Ok(()); + } else { + enqueued.insert(storage, evidence_block_height, slash)?; + } // Update the most recent slash (infraction) epoch for the validator let last_slash_epoch = read_validator_last_slash_epoch(storage, validator)?; @@ -298,10 +303,13 @@ where // Update validator sets first because it needs to be able to read // validator stake before we make any changes to it for (&epoch, &slash_amount) in &slash_amounts { - let state = validator_state_handle(&validator) - .get(storage, epoch, ¶ms)? - .unwrap(); - if state != ValidatorState::Jailed { + let is_jailed_or_inactive = matches!( + validator_state_handle(&validator) + .get(storage, epoch, ¶ms)? + .unwrap(), + ValidatorState::Jailed | ValidatorState::Inactive + ); + if !is_jailed_or_inactive { update_validator_set( storage, ¶ms, @@ -325,12 +333,20 @@ where epoch, Some(0), )?; + + let is_jailed_or_inactive = matches!( + validator_state_handle(&validator) + .get(storage, epoch, ¶ms)? + .unwrap(), + ValidatorState::Jailed | ValidatorState::Inactive + ); update_total_deltas( storage, ¶ms, -slash_delta.change(), epoch, Some(0), + !is_jailed_or_inactive, )?; } diff --git a/crates/proof_of_stake/src/storage.rs b/crates/proof_of_stake/src/storage.rs index 9cae1168a9..6bd8448d5f 100644 --- a/crates/proof_of_stake/src/storage.rs +++ b/crates/proof_of_stake/src/storage.rs @@ -1,10 +1,11 @@ //! PoS functions for reading and writing to storage and lazy collection handles //! associated with given `storage_key`s. -use std::collections::{BTreeSet, HashSet}; +use std::collections::BTreeSet; use namada_account::protocol_pk_key; use namada_core::address::Address; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key::{common, tm_consensus_key_raw_hash}; use namada_core::storage::Epoch; @@ -244,6 +245,12 @@ pub fn liveness_sum_missed_votes_handle() -> LivenessSumMissedVotes { LivenessSumMissedVotes::open(key) } +/// Get the storage handle to the total active deltas +pub fn total_active_deltas_handle() -> TotalDeltas { + let key = storage_key::total_active_deltas_key(); + TotalDeltas::open(key) +} + // ---- Storage read + write ---- /// Read PoS parameters @@ -387,6 +394,52 @@ where storage.write(&key, address) } +/// Read last epoch's staked ratio. +pub fn read_last_staked_ratio( + storage: &S, +) -> namada_storage::Result> +where + S: StorageRead, +{ + let key = storage_key::last_staked_ratio_key(); + storage.read(&key) +} + +/// Write last epoch's staked ratio. +pub fn write_last_staked_ratio( + storage: &mut S, + ratio: Dec, +) -> namada_storage::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::last_staked_ratio_key(); + storage.write(&key, ratio) +} + +/// Read last epoch's PoS inflation amount. +pub fn read_last_pos_inflation_amount( + storage: &S, +) -> namada_storage::Result> +where + S: StorageRead, +{ + let key = storage_key::last_pos_inflation_amount_key(); + storage.read(&key) +} + +/// Write last epoch's pos inflation amount. +pub fn write_last_pos_inflation_amount( + storage: &mut S, + inflation: token::Amount, +) -> namada_storage::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::last_pos_inflation_amount_key(); + storage.write(&key, inflation) +} + /// Read PoS validator's delta value. pub fn read_validator_deltas_value( storage: &S, @@ -469,6 +522,26 @@ where Ok(amnt) } +/// Read PoS total stake (sum of deltas). +pub fn read_total_active_stake( + storage: &S, + params: &PosParams, + epoch: namada_core::storage::Epoch, +) -> namada_storage::Result +where + S: StorageRead, +{ + let handle = total_active_deltas_handle(); + let amnt = handle + .get_sum(storage, epoch, params)? + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) + .unwrap_or_default(); + Ok(amnt) +} + /// Read all addresses from consensus validator set. pub fn read_consensus_validator_set_addresses( storage: &S, @@ -617,22 +690,44 @@ pub fn update_total_deltas( delta: token::Change, current_epoch: namada_core::storage::Epoch, offset_opt: Option, + update_active_voting_power: bool, ) -> namada_storage::Result<()> where S: StorageRead + StorageWrite, { - let handle = total_deltas_handle(); let offset = offset_opt.unwrap_or(params.pipeline_len); - let val = handle + let total_deltas = total_deltas_handle(); + let total_active_deltas = total_active_deltas_handle(); + + // Update total deltas + let total_deltas_val = total_deltas .get_delta_val(storage, current_epoch + offset)? .unwrap_or_default(); - handle.set( + total_deltas.set( storage, - val.checked_add(&delta) + total_deltas_val + .checked_add(&delta) .expect("Total deltas updated amount should not overflow"), current_epoch, offset, - ) + )?; + + // Update total active voting power + if update_active_voting_power { + let active_delta = total_active_deltas + .get_delta_val(storage, current_epoch + offset)? + .unwrap_or_default(); + total_active_deltas.set( + storage, + active_delta.checked_add(&delta).expect( + "Total active voting power updated amount should not overflow", + ), + current_epoch, + offset, + )?; + } + + Ok(()) } /// Read PoS validator's email. diff --git a/crates/proof_of_stake/src/storage_key.rs b/crates/proof_of_stake/src/storage_key.rs index 4efb24a06b..cab2c2b8ac 100644 --- a/crates/proof_of_stake/src/storage_key.rs +++ b/crates/proof_of_stake/src/storage_key.rs @@ -58,6 +58,9 @@ const VALIDATOR_AVATAR_KEY: &str = "avatar"; const LIVENESS_PREFIX: &str = "liveness"; const LIVENESS_MISSED_VOTES: &str = "missed_votes"; const LIVENESS_MISSED_VOTES_SUM: &str = "sum_missed_votes"; +const LAST_STAKED_RATIO_KEY: &str = "last_staked_ratio"; +const LAST_POS_INFLATION_AMOUNT_KEY: &str = "last_inflation_amount"; +const TOTAL_ACTIVE_DELTAS_KEY: &str = "total_active_deltas"; /// Is the given key a PoS storage key? pub fn is_pos_key(key: &Key) -> bool { @@ -1044,3 +1047,39 @@ pub fn liveness_sum_missed_votes_key() -> Key { .push(&LIVENESS_MISSED_VOTES_SUM.to_owned()) .expect("Cannot obtain a storage key") } + +/// Storage key for the last epoch's staked ratio. +pub fn last_staked_ratio_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&LAST_STAKED_RATIO_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for the last epoch's PoS inflation amount. +pub fn last_pos_inflation_amount_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&LAST_POS_INFLATION_AMOUNT_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for total active deltas (Consensus, Below-Capacity, and +/// Below-threshold validators). +pub fn total_active_deltas_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&TOTAL_ACTIVE_DELTAS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for total active deltas? +pub fn is_total_active_deltas_key(key: &Key) -> bool { + if key.segments.len() >= 2 { + match &key.segments[..2] { + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix)] => { + addr == &ADDRESS && prefix == TOTAL_ACTIVE_DELTAS_KEY + } + _ => false, + } + } else { + false + } +} diff --git a/crates/proof_of_stake/src/tests/state_machine.rs b/crates/proof_of_stake/src/tests/state_machine.rs index 7bf14ce6d5..16172f8458 100644 --- a/crates/proof_of_stake/src/tests/state_machine.rs +++ b/crates/proof_of_stake/src/tests/state_machine.rs @@ -1,12 +1,13 @@ //! Test PoS transitions with a state machine use std::cmp; -use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; use std::ops::Deref; use assert_matches::assert_matches; use itertools::Itertools; use namada_core::address::{self, Address}; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key; use namada_core::key::common::PublicKey; @@ -737,6 +738,7 @@ impl StateMachineTest for ConcretePosState { ¶ms, current_epoch, infraction_epoch, + height, slash_type, &address, ); @@ -1355,6 +1357,7 @@ impl ConcretePosState { params: &PosParams, current_epoch: Epoch, infraction_epoch: Epoch, + infraction_height: u64, slash_type: SlashType, validator: &Address, ) { @@ -1391,7 +1394,7 @@ impl ConcretePosState { let slash = enqueued_slashes_handle() .at(&processing_epoch) .at(validator) - .back(&self.s) + .get(&self.s, &infraction_height) .unwrap(); if let Some(slash) = slash { assert_eq!(slash.epoch, infraction_epoch); @@ -2602,7 +2605,7 @@ impl ReferenceStateMachine for AbstractPosState { .below_threshold_set .entry(current_epoch + offset) .or_default() - .remove(address); + .swap_remove(address); debug_assert!(removed); } else { // Just make sure the validator is already jailed @@ -3753,7 +3756,7 @@ impl AbstractPosState { ValidatorState::BelowThreshold => { // We know that this validator will be promoted into one of the // higher sets, so first remove from the below-threshold set. - below_thresh_set.remove(validator); + below_thresh_set.swap_remove(validator); let num_consensus = consensus_set.iter().fold(0, |sum, (_, validators)| { @@ -5211,12 +5214,16 @@ fn arb_slash(state: &AbstractPosState) -> impl Strategy { .checked_sub(state.params.unbonding_len) .unwrap_or_default()..=current_epoch) .prop_map(Epoch::from); - (arb_validator, arb_type, arb_epoch).prop_map( - |(validator, slash_type, infraction_epoch)| Transition::Misbehavior { - address: validator, - slash_type, - infraction_epoch, - height: 0, + let arb_height = 0_u64..10_000_u64; + + (arb_validator, arb_type, arb_epoch, arb_height).prop_map( + |(validator, slash_type, infraction_epoch, height)| { + Transition::Misbehavior { + address: validator, + slash_type, + infraction_epoch, + height, + } }, ) } diff --git a/crates/proof_of_stake/src/tests/state_machine_v2.rs b/crates/proof_of_stake/src/tests/state_machine_v2.rs index 9a2d1234d7..5c89bc6d98 100644 --- a/crates/proof_of_stake/src/tests/state_machine_v2.rs +++ b/crates/proof_of_stake/src/tests/state_machine_v2.rs @@ -1,6 +1,6 @@ //! Test PoS transitions with a state machine -use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque}; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; use std::ops::{AddAssign, Deref}; use std::{cmp, mem}; @@ -8,6 +8,7 @@ use assert_matches::assert_matches; use derivative::Derivative; use itertools::Itertools; use namada_core::address::{self, Address}; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key; use namada_core::key::common::PublicKey; @@ -768,7 +769,7 @@ impl AbstractPosState { ValidatorState::BelowThreshold => { // We know that this validator will be promoted into one of the // higher sets, so first remove from the below-threshold set. - below_thresh_set.remove(validator); + below_thresh_set.swap_remove(validator); let num_consensus = consensus_set.iter().fold(0, |sum, (_, validators)| { @@ -2690,6 +2691,7 @@ impl StateMachineTest for ConcretePosState { ¶ms, current_epoch, infraction_epoch, + height, slash_type, &address, ); @@ -3057,6 +3059,7 @@ impl ConcretePosState { params: &PosParams, current_epoch: Epoch, infraction_epoch: Epoch, + infraction_height: u64, slash_type: SlashType, validator: &Address, ) { @@ -3101,7 +3104,7 @@ impl ConcretePosState { let slash = enqueued_slashes_handle() .at(&processing_epoch) .at(validator) - .back(&self.s) + .get(&self.s, &infraction_height) .unwrap(); if let Some(slash) = slash { assert_eq!(slash.epoch, infraction_epoch); @@ -4072,7 +4075,7 @@ impl ReferenceStateMachine for AbstractPosState { .below_threshold_set .entry(current_epoch + offset) .or_default() - .remove(address); + .swap_remove(address); debug_assert!(removed); } else { // Just make sure the validator is already jailed @@ -4588,12 +4591,16 @@ fn arb_slash(state: &AbstractPosState) -> impl Strategy { .checked_sub(state.params.unbonding_len) .unwrap_or_default()..=current_epoch) .prop_map(Epoch::from); - (arb_validator, arb_type, arb_epoch).prop_map( - |(validator, slash_type, infraction_epoch)| Transition::Misbehavior { - address: validator, - slash_type, - infraction_epoch, - height: 0, + let arb_height = 0_u64..10_000_u64; + + (arb_validator, arb_type, arb_epoch, arb_height).prop_map( + |(validator, slash_type, infraction_epoch, height)| { + Transition::Misbehavior { + address: validator, + slash_type, + infraction_epoch, + height, + } }, ) } diff --git a/crates/proof_of_stake/src/tests/test_pos.rs b/crates/proof_of_stake/src/tests/test_pos.rs index 02e716bb54..0f24d0c71f 100644 --- a/crates/proof_of_stake/src/tests/test_pos.rs +++ b/crates/proof_of_stake/src/tests/test_pos.rs @@ -1,9 +1,10 @@ //! PoS system tests -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; use assert_matches::assert_matches; use namada_core::address::Address; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key::testing::{common_sk_from_simple_seed, gen_keypair}; use namada_core::key::RefTo; @@ -17,7 +18,7 @@ use proptest::test_runner::Config; // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see // `tracing` logs from tests use test_log::test; -use token::storage_key::minted_balance_key; +use token::get_effective_total_native_supply; use crate::parameters::testing::arb_pos_params; use crate::parameters::OwnedPosParams; @@ -224,7 +225,7 @@ fn test_test_init_genesis_aux( for (i, validator) in validators.into_iter().enumerate() { let addr = &validator.address; let self_bonds = bond_details - .remove(&BondId { + .swap_remove(&BondId { source: addr.clone(), validator: addr.clone(), }) @@ -1397,8 +1398,8 @@ fn test_update_rewards_products_aux(validators: Vec) { // Read some data before applying rewards let pos_balance_pre = read_balance(&s, &staking_token, &address::POS).unwrap(); - let gov_balance_pre = - read_balance(&s, &staking_token, &address::GOV).unwrap(); + let pgf_balance_pre = + read_balance(&s, &staking_token, &address::PGF).unwrap(); let num_consensus_validators = consensus_set.len() as u64; let accum_val = Dec::one() / num_consensus_validators; @@ -1415,10 +1416,7 @@ fn test_update_rewards_products_aux(validators: Vec) { .unwrap(); } - let total_native_tokens: token::Amount = s - .read(&minted_balance_key(&staking_token)) - .unwrap() - .expect("Total NAM balance should exist in storage"); + let total_native_tokens = get_effective_total_native_supply(&s).unwrap(); // Distribute inflation into rewards let last_epoch = current_epoch.prev(); @@ -1436,17 +1434,17 @@ fn test_update_rewards_products_aux(validators: Vec) { let pos_balance_post = read_balance(&s, &staking_token, &address::POS).unwrap(); - let gov_balance_post = - read_balance(&s, &staking_token, &address::GOV).unwrap(); + let pgf_balance_post = + read_balance(&s, &staking_token, &address::PGF).unwrap(); assert_eq!( - pos_balance_pre + gov_balance_pre + inflation, - pos_balance_post + gov_balance_post, - "Expected inflation to be minted to PoS and left-over amount to Gov" + pos_balance_pre + pgf_balance_pre + inflation, + pos_balance_post + pgf_balance_post, + "Expected inflation to be minted to PoS and left-over amount to PGF" ); let pos_credit = pos_balance_post - pos_balance_pre; - let gov_credit = gov_balance_post - gov_balance_pre; + let gov_credit = pgf_balance_post - pgf_balance_pre; assert!( pos_credit > gov_credit, "PoS must receive more tokens than Gov, but got {} in PoS and {} in \ diff --git a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs index 34a9c574d8..9df89eeef4 100644 --- a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs +++ b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -1,9 +1,15 @@ +use std::collections::BTreeMap; use std::ops::Deref; use std::str::FromStr; use assert_matches::assert_matches; -use namada_core::address; +use namada_core::address::testing::{ + established_address_1, established_address_2, +}; +use namada_core::address::{self, Address}; use namada_core::dec::Dec; +use namada_core::key::testing::{keypair_1, keypair_2, keypair_3}; +use namada_core::key::RefTo; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_state::testing::TestState; @@ -19,9 +25,10 @@ use crate::queries::bonds_and_unbonds; use crate::slashing::{process_slashes, slash}; use crate::storage::{ bond_handle, delegator_redelegated_bonds_handle, - delegator_redelegated_unbonds_handle, read_total_stake, - read_validator_stake, total_bonded_handle, total_unbonded_handle, - unbond_handle, validator_incoming_redelegations_handle, + delegator_redelegated_unbonds_handle, enqueued_slashes_handle, + read_total_stake, read_validator_stake, total_bonded_handle, + total_unbonded_handle, unbond_handle, + validator_incoming_redelegations_handle, validator_outgoing_redelegations_handle, validator_slashes_handle, validator_total_redelegated_bonded_handle, validator_total_redelegated_unbonded_handle, @@ -32,7 +39,7 @@ use crate::tests::helpers::{ test_slashes_with_unbonding_params, }; use crate::token::{credit_tokens, read_balance}; -use crate::types::{BondId, GenesisValidator, SlashType}; +use crate::types::{BondId, GenesisValidator, Slash, SlashType}; use crate::{ bond_tokens, redelegate_tokens, staking_token_address, token, unbond_tokens, withdraw_tokens, OwnedPosParams, RedelegationError, @@ -1500,3 +1507,144 @@ fn test_slashed_bond_amount_aux(validators: Vec) { let diff = val_stake - self_bond_amount - del_bond_amount; assert!(diff <= 2.into()); } + +#[test] +fn test_one_slash_per_block_height() { + let mut storage = TestState::default(); + let params = OwnedPosParams { + unbonding_len: 4, + validator_stake_threshold: token::Amount::zero(), + ..Default::default() + }; + + let validator1 = established_address_1(); + let validator2 = established_address_2(); + + let gen_validators = [ + GenesisValidator { + address: validator1.clone(), + tokens: 100.into(), + consensus_key: keypair_1().ref_to(), + protocol_key: keypair_3().ref_to(), + eth_cold_key: keypair_3().ref_to(), + eth_hot_key: keypair_3().ref_to(), + commission_rate: Default::default(), + max_commission_rate_change: Default::default(), + metadata: Default::default(), + }, + GenesisValidator { + address: validator2.clone(), + tokens: 100.into(), + consensus_key: keypair_2().ref_to(), + protocol_key: keypair_3().ref_to(), + eth_cold_key: keypair_3().ref_to(), + eth_hot_key: keypair_3().ref_to(), + commission_rate: Default::default(), + max_commission_rate_change: Default::default(), + metadata: Default::default(), + }, + ]; + + // Genesis + let current_epoch = storage.in_mem().block.epoch; + let params = test_init_genesis( + &mut storage, + params, + gen_validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + let enqueued_slashes = enqueued_slashes_handle(); + + let slash11 = Slash { + block_height: 0, + epoch: 0.into(), + r#type: SlashType::DuplicateVote, + rate: Dec::zero(), + }; + let slash12 = Slash { + block_height: 0, + epoch: 0.into(), + r#type: SlashType::LightClientAttack, + rate: Dec::zero(), + }; + let slash13 = Slash { + block_height: 1, + epoch: 0.into(), + r#type: SlashType::DuplicateVote, + rate: Dec::zero(), + }; + let slash21 = Slash { + block_height: 0, + epoch: 0.into(), + r#type: SlashType::LightClientAttack, + rate: Dec::zero(), + }; + let slash22 = Slash { + block_height: 0, + epoch: 0.into(), + r#type: SlashType::DuplicateVote, + rate: Dec::zero(), + }; + let slash23 = Slash { + block_height: 1, + epoch: 0.into(), + r#type: SlashType::DuplicateVote, + rate: Dec::zero(), + }; + + let processing_epoch = + current_epoch + params.slash_processing_epoch_offset(); + let enqueue = |stg: &mut TestState, slash: &Slash, validator: &Address| { + crate::slashing::slash( + stg, + ¶ms, + current_epoch, + slash.epoch, + slash.block_height, + slash.r#type, + validator, + current_epoch.next(), + ) + .unwrap(); + }; + + // Enqueue some of the slashes + enqueue(&mut storage, &slash11, &validator1); + enqueue(&mut storage, &slash21, &validator2); + enqueue(&mut storage, &slash13, &validator1); + enqueue(&mut storage, &slash23, &validator2); + + // Check + let res = enqueued_slashes + .get_data_handler() + .collect_map(&storage) + .unwrap(); + let exp = BTreeMap::from_iter([( + processing_epoch, + BTreeMap::from_iter([ + ( + validator1.clone(), + BTreeMap::from_iter([(0, slash11), (1, slash13)]), + ), + ( + validator2.clone(), + BTreeMap::from_iter([(0, slash21), (1, slash23)]), + ), + ]), + )]); + assert_eq!(res, exp); + + // Enqueue new slashes + enqueue(&mut storage, &slash12, &validator1); + enqueue(&mut storage, &slash22, &validator2); + + // Check that the slashes are still the same now + let res = enqueued_slashes + .get_data_handler() + .collect_map(&storage) + .unwrap(); + assert_eq!(res, exp); +} diff --git a/crates/proof_of_stake/src/tests/test_validator.rs b/crates/proof_of_stake/src/tests/test_validator.rs index da7b44a86d..3b8de8bb31 100644 --- a/crates/proof_of_stake/src/tests/test_validator.rs +++ b/crates/proof_of_stake/src/tests/test_validator.rs @@ -1320,8 +1320,20 @@ fn test_purge_validator_information_aux(validators: Vec) { // Check that there is validator data for epochs 0 - pipeline_len check_is_data(&s, current_epoch, Epoch(params.owned.pipeline_len)); + assert_eq!( + consensus_val_set.get_last_update(&s).unwrap().unwrap(), + Epoch(0) + ); + assert_eq!( + validator_positions.get_last_update(&s).unwrap().unwrap(), + Epoch(0) + ); + assert_eq!( + validator_positions.get_last_update(&s).unwrap().unwrap(), + Epoch(0) + ); - // Advance to epoch 1 + // Advance to epoch `default_past_epochs` for _ in 0..default_past_epochs { current_epoch = advance_epoch(&mut s, ¶ms); } @@ -1333,6 +1345,18 @@ fn test_purge_validator_information_aux(validators: Vec) { Epoch(0), Epoch(params.owned.pipeline_len + default_past_epochs), ); + assert_eq!( + consensus_val_set.get_last_update(&s).unwrap().unwrap(), + Epoch(default_past_epochs) + ); + assert_eq!( + validator_positions.get_last_update(&s).unwrap().unwrap(), + Epoch(default_past_epochs) + ); + assert_eq!( + validator_positions.get_last_update(&s).unwrap().unwrap(), + Epoch(default_past_epochs) + ); current_epoch = advance_epoch(&mut s, ¶ms); assert_eq!(current_epoch.0, default_past_epochs + 1); diff --git a/crates/proof_of_stake/src/types/mod.rs b/crates/proof_of_stake/src/types/mod.rs index 8b07dcba6e..8c6259e414 100644 --- a/crates/proof_of_stake/src/types/mod.rs +++ b/crates/proof_of_stake/src/types/mod.rs @@ -3,13 +3,14 @@ mod rev_order; use core::fmt::Debug; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::fmt::Display; use std::hash::Hash; use std::ops::Sub; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::key::common; use namada_core::storage::{Epoch, KeySeg}; @@ -136,7 +137,7 @@ pub type ValidatorAddresses = crate::epoched::NestedEpoched< /// Slashes indexed by validator address and then block height (for easier /// retrieval and iteration when processing) -pub type ValidatorSlashes = NestedMap; +pub type ValidatorSlashes = NestedMap>; /// Epoched slashes, where the outer epoch key is the epoch in which the slash /// is processed diff --git a/crates/proof_of_stake/src/validator_set_update.rs b/crates/proof_of_stake/src/validator_set_update.rs index 8d15820b88..46cad686c8 100644 --- a/crates/proof_of_stake/src/validator_set_update.rs +++ b/crates/proof_of_stake/src/validator_set_update.rs @@ -1,8 +1,7 @@ //! Validator set updates -use std::collections::{HashMap, HashSet}; - use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::key::PublicKeyTmRawHash; use namada_core::storage::Epoch; use namada_core::token; @@ -521,11 +520,13 @@ where /// consensus set already. pub fn promote_next_below_capacity_validator_to_consensus( storage: &mut S, - epoch: Epoch, + current_epoch: Epoch, + offset: u64, ) -> namada_storage::Result<()> where S: StorageRead + StorageWrite, { + let epoch = current_epoch + offset; let below_cap_set = below_capacity_validator_set_handle().at(&epoch); let max_below_capacity_amount = get_max_below_capacity_validator_amount(&below_cap_set, storage)?; @@ -550,8 +551,8 @@ where validator_state_handle(&promoted_validator).set( storage, ValidatorState::Consensus, - epoch, - 0, + current_epoch, + offset, )?; } @@ -828,6 +829,7 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } + // Purge consensus and below-capacity validator sets consensus_validator_set.update_data(storage, params, current_epoch)?; below_capacity_validator_set.update_data(storage, params, current_epoch)?; @@ -847,7 +849,6 @@ where let prev = new_positions_handle.insert(storage, validator, position)?; debug_assert!(prev.is_none()); } - validator_set_positions_handle.set_last_update(storage, current_epoch)?; // Purge old epochs of validator positions validator_set_positions_handle.update_data( diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index f4d05e74aa..77b6920fb7 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -34,6 +34,8 @@ async-client = ["async-trait"] async-send = [] +namada-eth-bridge = ["namada_ethereum_bridge/namada-eth-bridge"] + # for integration tests and test utilities testing = [ "masp_primitives/test-dependencies", @@ -71,6 +73,7 @@ migrations = [ namada_account = { path = "../account" } namada_core = { path = "../core" } namada_ethereum_bridge = { path = "../ethereum_bridge", default-features = false } +namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } namada_ibc = { path = "../ibc" } namada_macros = { path = "../macros" } diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index b14d0240bf..3246ffb1ef 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -1,11 +1,11 @@ //! Structures encapsulating SDK arguments -use std::collections::HashMap; use std::path::PathBuf; use std::time::Duration as StdDuration; use namada_core::address::Address; use namada_core::chain::ChainId; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; @@ -317,6 +317,8 @@ pub struct TxIbcTransfer { pub timeout_height: Option, /// Timeout timestamp offset pub timeout_sec_offset: Option, + /// Refund target address when the shielded transfer failure + pub refund_target: Option, /// Memo pub memo: Option, /// Path to the TX WASM code file @@ -382,6 +384,14 @@ impl TxIbcTransfer { } } + /// Refund target address + pub fn refund_target(self, refund_target: C::TransferTarget) -> Self { + Self { + refund_target: Some(refund_target), + ..self + } + } + /// Memo pub fn memo(self, memo: String) -> Self { Self { @@ -417,8 +427,6 @@ pub struct InitProposal { pub tx: Tx, /// The proposal data pub proposal_data: C::Data, - /// Flag if proposal should be run offline - pub is_offline: bool, /// Flag if proposal is of type Pgf stewards pub is_pgf_stewards: bool, /// Flag if proposal is of type Pgf funding @@ -448,11 +456,6 @@ impl InitProposal { } } - /// Flag if proposal should be run offline - pub fn is_offline(self, is_offline: bool) -> Self { - Self { is_offline, ..self } - } - /// Flag if proposal is of type Pgf stewards pub fn is_pgf_stewards(self, is_pgf_stewards: bool) -> Self { Self { @@ -568,15 +571,11 @@ pub struct VoteProposal { /// Common tx arguments pub tx: Tx, /// Proposal id - pub proposal_id: Option, + pub proposal_id: u64, /// The vote pub vote: String, /// The address of the voter - pub voter: C::Address, - /// Flag if proposal vote should be run offline - pub is_offline: bool, - /// The proposal file path - pub proposal_data: Option, + pub voter_address: C::Address, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -597,7 +596,7 @@ impl VoteProposal { /// Proposal id pub fn proposal_id(self, proposal_id: u64) -> Self { Self { - proposal_id: Some(proposal_id), + proposal_id, ..self } } @@ -608,19 +607,9 @@ impl VoteProposal { } /// The address of the voter - pub fn voter(self, voter: C::Address) -> Self { - Self { voter, ..self } - } - - /// Flag if proposal vote should be run offline - pub fn is_offline(self, is_offline: bool) -> Self { - Self { is_offline, ..self } - } - - /// The proposal file path - pub fn proposal_data(self, proposal_data: C::Data) -> Self { + pub fn voter(self, voter_address: C::Address) -> Self { Self { - proposal_data: Some(proposal_data), + voter_address, ..self } } @@ -753,6 +742,62 @@ pub struct TxBecomeValidator { pub unsafe_dont_encrypt: bool, } +impl TxBuilder for TxBecomeValidator { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + TxBecomeValidator { + tx: func(self.tx), + ..self + } + } +} + +impl TxBecomeValidator { + pub fn address(self, address: C::Address) -> Self { + Self { address, ..self } + } + + pub fn commission_rate(self, commission_rate: Dec) -> Self { + Self { + commission_rate, + ..self + } + } + + pub fn max_commission_rate_change( + self, + max_commission_rate_change: Dec, + ) -> Self { + Self { + max_commission_rate_change, + ..self + } + } + + pub fn email(self, email: String) -> Self { + Self { email, ..self } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl TxBecomeValidator { + pub async fn build( + &self, + context: &impl Namada, + ) -> crate::error::Result<(namada_tx::Tx, SigningTxData)> { + tx::build_become_validator(context, self).await + } +} + /// Transaction to initialize a new account #[derive(Clone, Debug)] pub struct TxInitValidator { @@ -1281,6 +1326,19 @@ pub struct QueryBalance { pub token: Option, /// Whether not to convert balances pub no_conversions: bool, + /// Show IBC tokens + pub show_ibc_tokens: bool, +} + +/// Query IBC token(s) +#[derive(Clone, Debug)] +pub struct QueryIbcToken { + /// Common query args + pub query: Query, + /// The token address which could be a non-namada address + pub token: Option, + /// Address of an owner + pub owner: Option, } /// Query historical transfer(s) @@ -1397,51 +1455,50 @@ pub struct ConsensusKeyChange { pub tx_code_path: PathBuf, } -// impl TxBuilder for ConsensusKeyChange { -// fn tx(self, func: F) -> Self -// where -// F: FnOnce(Tx) -> Tx, -// { -// ConsensusKeyChange { -// tx: func(self.tx), -// ..self -// } -// } -// } - -// impl ConsensusKeyChange { -// /// Validator address (should be self) -// pub fn validator(self, validator: C::Address) -> Self { -// Self { validator, ..self } -// } - -// /// Value to which the tx changes the commission rate -// pub fn consensus_key(self, consensus_key: C::Keypair) -> Self { -// Self { -// consensus_key: Some(consensus_key), -// ..self -// } -// } - -// /// Path to the TX WASM code file -// pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { -// Self { -// tx_code_path, -// ..self -// } -// } -// } - -// impl ConsensusKeyChange { -// /// Build a transaction from this builder -// pub async fn build( -// &self, -// context: &impl Namada, -// ) -> crate::error::Result<(namada_tx::Tx, SigningTxData, -// Option)> { -// tx::build_change_consensus_key(context, self).await -// } -// } +impl TxBuilder for ConsensusKeyChange { + fn tx(self, func: F) -> Self + where + F: FnOnce(Tx) -> Tx, + { + ConsensusKeyChange { + tx: func(self.tx), + ..self + } + } +} + +impl ConsensusKeyChange { + /// Validator address (should be self) + pub fn validator(self, validator: C::Address) -> Self { + Self { validator, ..self } + } + + /// Value to which the tx changes the commission rate + pub fn consensus_key(self, consensus_key: C::PublicKey) -> Self { + Self { + consensus_key: Some(consensus_key), + ..self + } + } + + /// Path to the TX WASM code file + pub fn tx_code_path(self, tx_code_path: PathBuf) -> Self { + Self { + tx_code_path, + ..self + } + } +} + +impl ConsensusKeyChange { + /// Build a transaction from this builder + pub async fn build( + &self, + context: &impl Namada, + ) -> crate::error::Result<(namada_tx::Tx, SigningTxData)> { + tx::build_change_consensus_key(context, self).await + } +} #[derive(Clone, Debug)] /// Commission rate change args @@ -2502,4 +2559,6 @@ pub struct GenIbcShieldedTransfer { pub port_id: PortId, /// Channel ID via which the token is received pub channel_id: ChannelId, + /// Generate the shielded transfer for refunding + pub refund: bool, } diff --git a/crates/sdk/src/error.rs b/crates/sdk/src/error.rs index 23cea67723..c6efa2b5cb 100644 --- a/crates/sdk/src/error.rs +++ b/crates/sdk/src/error.rs @@ -186,6 +186,15 @@ pub enum TxSubmitError { /// No bonds found #[error("No bonds found")] NoBondFound, + /// No delegations found at epoch + #[error("The account {0} has no active delegations found at epoch {1}")] + NoDelegationsFound(Address, Epoch), + /// Cannot vote in governance + #[error( + "Validator {0} cannot vote in governance because the validator is \ + either jailed or inactive at the current epoch {1}" + )] + CannotVoteInGovernance(Address, Epoch), /// Lower bond amount than the unbond #[error( "The total bonds of the source {0} is lower than the amount to be \ @@ -309,6 +318,9 @@ pub enum TxSubmitError { /// An empty string was provided as a new email #[error("An empty string cannot be provided as a new email")] InvalidEmail, + /// The metadata string is too long + #[error("The provided metadata string is too long")] + MetadataTooLong, /// The consensus key is not Ed25519 #[error("The consensus key must be an ed25519 key")] ConsensusKeyNotEd25519, diff --git a/crates/sdk/src/eth_bridge/bridge_pool.rs b/crates/sdk/src/eth_bridge/bridge_pool.rs index 81fe057eb0..6d0dc6e53d 100644 --- a/crates/sdk/src/eth_bridge/bridge_pool.rs +++ b/crates/sdk/src/eth_bridge/bridge_pool.rs @@ -2,7 +2,6 @@ use std::borrow::Cow; use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; use std::sync::Arc; use borsh_ext::BorshSerializeExt; @@ -10,6 +9,7 @@ use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::FutureExt; use namada_core::address::{Address, InternalAddress}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::eth_abi::Encode; use namada_core::eth_bridge_pool::{ erc20_token_address, GasFee, PendingTransfer, TransferToEthereum, @@ -589,7 +589,10 @@ where &*eth_client, io, BlockOnEthSync { - deadline: Instant::now() + Duration::from_secs(60), + deadline: { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + Duration::from_secs(60), delta_sleep: Duration::from_secs(1), }, ) @@ -1013,7 +1016,7 @@ mod recommendations { Uint::from_u64( voting_powers .iter() - .filter_map(|(a, &p)| sigs.get(a).map(|_| p)) + .filter_map(|(a, &p)| sigs.get(*a).map(|_| p)) .take_while(|p| { if power <= FractionalVotingPower::TWO_THIRDS { power += FractionalVotingPower::new( diff --git a/crates/sdk/src/eth_bridge/mod.rs b/crates/sdk/src/eth_bridge/mod.rs index 481c7fb8e5..bcbad77a87 100644 --- a/crates/sdk/src/eth_bridge/mod.rs +++ b/crates/sdk/src/eth_bridge/mod.rs @@ -49,7 +49,10 @@ where eth_syncing_status_timeout( client, DEFAULT_BACKOFF, - Instant::now() + DEFAULT_CEILING, + { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + DEFAULT_CEILING, ) .await } diff --git a/crates/sdk/src/eth_bridge/validator_set.rs b/crates/sdk/src/eth_bridge/validator_set.rs index 59248885c4..59883ebc45 100644 --- a/crates/sdk/src/eth_bridge/validator_set.rs +++ b/crates/sdk/src/eth_bridge/validator_set.rs @@ -406,6 +406,7 @@ where &*eth_client, io, BlockOnEthSync { + #[allow(clippy::disallowed_methods)] deadline: Instant::now() + Duration::from_secs(60), delta_sleep: Duration::from_secs(1), }, diff --git a/crates/sdk/src/events/log.rs b/crates/sdk/src/events/log.rs index 3eb19f89e7..95e96574cd 100644 --- a/crates/sdk/src/events/log.rs +++ b/crates/sdk/src/events/log.rs @@ -3,9 +3,10 @@ //! The log can only hold `N` events at a time, where `N` is a configurable //! parameter. If the log is holding `N` events, and a new event is logged, //! old events are pruned. + use circular_queue::CircularQueue; -use crate::events::Event; +use super::{EmitEvents, Event}; pub mod dumb_queries; @@ -41,6 +42,26 @@ impl Default for EventLog { } } +impl EmitEvents for EventLog { + #[inline] + fn emit(&mut self, event: E) + where + E: Into, + { + self.log_events(core::iter::once(event.into())); + } + + /// Emit a batch of [events](Event). + #[inline] + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: Into, + { + self.log_events(event_batch.into_iter().map(Into::into)); + } +} + impl EventLog { /// Return a new event log. pub fn new(params: Params) -> Self { @@ -91,28 +112,28 @@ mod tests { "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"; /// An accepted tx hash query. - macro_rules! accepted { + macro_rules! applied { ($hash:expr) => { - dumb_queries::QueryMatcher::accepted(Hash::try_from($hash).unwrap()) + dumb_queries::QueryMatcher::applied(Hash::try_from($hash).unwrap()) }; } /// Return a vector of mock `FinalizeBlock` events. fn mock_tx_events(hash: &str) -> Vec { let event_1 = Event { - event_type: EventType::Accepted, + event_type: EventType::Applied, level: EventLevel::Block, attributes: { - let mut attrs = std::collections::HashMap::new(); + let mut attrs = namada_core::collections::HashMap::new(); attrs.insert("hash".to_string(), hash.to_string()); attrs }, }; let event_2 = Event { - event_type: EventType::Applied, + event_type: EventType::Proposal, level: EventLevel::Block, attributes: { - let mut attrs = std::collections::HashMap::new(); + let mut attrs = namada_core::collections::HashMap::new(); attrs.insert("hash".to_string(), hash.to_string()); attrs }, @@ -137,7 +158,7 @@ mod tests { // inspect log let events_in_log: Vec<_> = - log.iter_with_matcher(accepted!(HASH)).cloned().collect(); + log.iter_with_matcher(applied!(HASH)).cloned().collect(); assert_eq!(events_in_log.len(), NUM_HEIGHTS); @@ -176,7 +197,7 @@ mod tests { // inspect log - it should be full let events_in_log: Vec<_> = - log.iter_with_matcher(accepted!(HASH)).cloned().collect(); + log.iter_with_matcher(applied!(HASH)).cloned().collect(); assert_eq!(events_in_log.len(), MATCHED_EVENTS); @@ -184,12 +205,12 @@ mod tests { assert_eq!(events[0], event); } - // add a new APPLIED event to the log, - // pruning the first ACCEPTED event we added + // add a new PROPOSAL event to the log, + // pruning the first APPLIED event we added log.log_events(Some(events[1].clone())); let events_in_log: Vec<_> = - log.iter_with_matcher(accepted!(HASH)).cloned().collect(); + log.iter_with_matcher(applied!(HASH)).cloned().collect(); const ACCEPTED_EVENTS: usize = MATCHED_EVENTS - 1; assert_eq!(events_in_log.len(), ACCEPTED_EVENTS); diff --git a/crates/sdk/src/events/log/dumb_queries.rs b/crates/sdk/src/events/log/dumb_queries.rs index 1d2b0527a2..adf645ab10 100644 --- a/crates/sdk/src/events/log/dumb_queries.rs +++ b/crates/sdk/src/events/log/dumb_queries.rs @@ -6,8 +6,7 @@ //! tm.event='NewBlock' AND .<$attr>='<$value>' //! ``` -use std::collections::HashMap; - +use namada_core::collections::HashMap; use namada_core::hash::Hash; use namada_core::storage::BlockHeight; @@ -41,16 +40,6 @@ impl QueryMatcher { }) } - /// Returns a query matching the given accepted transaction hash. - pub fn accepted(tx_hash: Hash) -> Self { - let mut attributes = HashMap::new(); - attributes.insert("hash".to_string(), tx_hash.to_string()); - Self { - event_type: EventType::Accepted, - attributes, - } - } - /// Returns a query matching the given applied transaction hash. pub fn applied(tx_hash: Hash) -> Self { let mut attributes = HashMap::new(); @@ -132,16 +121,16 @@ mod tests { let mut attributes = HashMap::new(); attributes.insert("hash".to_string(), HASH.to_string()); let matcher = QueryMatcher { - event_type: EventType::Accepted, + event_type: EventType::Proposal, attributes, }; let tests = { let event_1 = Event { - event_type: EventType::Accepted, + event_type: EventType::Proposal, level: EventLevel::Block, attributes: { - let mut attrs = std::collections::HashMap::new(); + let mut attrs = namada_core::collections::HashMap::new(); attrs.insert("hash".to_string(), HASH.to_string()); attrs }, @@ -152,7 +141,7 @@ mod tests { event_type: EventType::Applied, level: EventLevel::Block, attributes: { - let mut attrs = std::collections::HashMap::new(); + let mut attrs = namada_core::collections::HashMap::new(); attrs.insert("hash".to_string(), HASH.to_string()); attrs }, diff --git a/crates/sdk/src/events/mod.rs b/crates/sdk/src/events/mod.rs index c160e08dc8..0d33f8dc89 100644 --- a/crates/sdk/src/events/mod.rs +++ b/crates/sdk/src/events/mod.rs @@ -1,9 +1,10 @@ //! Logic to do with events emitted by the ledger. pub mod log; -use std::collections::HashMap; - -pub use namada_core::event::{Event, EventError, EventLevel, EventType}; +use namada_core::collections::HashMap; +pub use namada_core::event::{ + extend, EmitEvents, Event, EventError, EventLevel, EventType, +}; use serde_json::Value; // use crate::ledger::governance::utils::ProposalEvent; @@ -22,7 +23,7 @@ impl Attributes { /// Get ownership of the value associated to the input key pub fn take(&mut self, key: &str) -> Option { - self.0.remove(key) + self.0.swap_remove(key) } } diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 081865d26e..2407603010 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -29,7 +29,6 @@ pub mod migrations; pub mod queries; pub mod wallet; -use std::collections::HashSet; #[cfg(feature = "async-send")] pub use std::marker::Send as MaybeSend; #[cfg(feature = "async-send")] @@ -39,6 +38,7 @@ use std::str::FromStr; use args::{InputAmount, SdkTypes}; use namada_core::address::Address; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; @@ -263,6 +263,7 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { port_id: PortId::from_str("transfer").unwrap(), timeout_height: None, timeout_sec_offset: None, + refund_target: None, memo: None, tx: self.tx_builder(), tx_code_path: PathBuf::from(TX_IBC_WASM), @@ -273,7 +274,6 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { fn new_init_proposal(&self, proposal_data: Vec) -> args::InitProposal { args::InitProposal { proposal_data, - is_offline: false, is_pgf_stewards: false, is_pgf_funding: false, tx_code_path: PathBuf::from(TX_INIT_PROPOSAL), @@ -282,29 +282,33 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { } /// Make a TxUpdateAccount builder from the given minimum set of arguments - fn new_update_account(&self, addr: Address) -> args::TxUpdateAccount { + fn new_update_account( + &self, + addr: Address, + public_keys: Vec, + threshold: u8, + ) -> args::TxUpdateAccount { args::TxUpdateAccount { addr, vp_code_path: None, - public_keys: vec![], - threshold: None, + public_keys, + threshold: Some(threshold), tx_code_path: PathBuf::from(TX_UPDATE_ACCOUNT_WASM), tx: self.tx_builder(), } } /// Make a VoteProposal builder from the given minimum set of arguments - fn new_vote_prposal( + fn new_proposal_vote( &self, + proposal_id: u64, vote: String, - voter: Address, + voter_address: Address, ) -> args::VoteProposal { args::VoteProposal { vote, - voter, - proposal_id: None, - is_offline: false, - proposal_data: None, + voter_address, + proposal_id, tx_code_path: PathBuf::from(TX_VOTE_PROPOSAL), tx: self.tx_builder(), } @@ -329,10 +333,11 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { fn new_change_consensus_key( &self, validator: Address, + consensus_key: common::PublicKey, ) -> args::ConsensusKeyChange { args::ConsensusKeyChange { validator, - consensus_key: None, + consensus_key: Some(consensus_key), tx_code_path: PathBuf::from(TX_CHANGE_CONSENSUS_KEY_WASM), unsafe_dont_encrypt: false, tx: self.tx_builder(), @@ -356,11 +361,16 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { } /// Make a TxBecomeValidator builder from the given minimum set of arguments + #[allow(clippy::too_many_arguments)] fn new_become_validator( &self, address: Address, commission_rate: Dec, max_commission_rate_change: Dec, + consesus_key: common::PublicKey, + eth_cold_key: common::PublicKey, + eth_hot_key: common::PublicKey, + protocol_key: common::PublicKey, email: String, ) -> args::TxBecomeValidator { args::TxBecomeValidator { @@ -368,10 +378,10 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { commission_rate, max_commission_rate_change, scheme: SchemeType::Ed25519, - consensus_key: None, - eth_cold_key: None, - eth_hot_key: None, - protocol_key: None, + consensus_key: Some(consesus_key), + eth_cold_key: Some(eth_cold_key), + eth_hot_key: Some(eth_hot_key), + protocol_key: Some(protocol_key), unsafe_dont_encrypt: false, tx_code_path: PathBuf::from(TX_BECOME_VALIDATOR_WASM), tx: self.tx_builder(), @@ -811,7 +821,7 @@ pub mod testing { arb_withdraw, }; use crate::tx::{ - Code, Commitment, Header, MaspBuilder, Section, Signature, + Authorization, Code, Commitment, Header, MaspBuilder, Section, }; #[derive(Debug, Clone)] @@ -967,7 +977,6 @@ pub mod testing { // Generate an arbitrary transaction type pub fn arb_tx_type()(tx_type in prop_oneof![ Just(TxType::Raw), - arb_decrypted_tx().prop_map(TxType::Decrypted), arb_wrapper_tx().prop_map(|x| TxType::Wrapper(Box::new(x))), ]) -> TxType { tx_type @@ -1175,7 +1184,7 @@ pub mod testing { let mut tx = Tx { header, sections: vec![] }; let content_hash = tx.add_section(Section::ExtraData(content_extra_data)).get_hash(); init_proposal.content = content_hash; - if let ProposalType::Default(Some(hash)) = &mut init_proposal.r#type { + if let ProposalType::DefaultWithWasm(hash) = &mut init_proposal.r#type { let type_hash = tx.add_section(Section::ExtraData(type_extra_data)).get_hash(); *hash = type_hash; } @@ -1505,16 +1514,16 @@ pub mod testing { 1..3, ), signer in option::of(arb_non_internal_address()), - ) -> Signature { + ) -> Authorization { if signer.is_some() { - Signature::new(targets, secret_keys, signer) + Authorization::new(targets, secret_keys, signer) } else { let secret_keys = secret_keys .into_values() .enumerate() .map(|(k, v)| (k as u8, v)) .collect(); - Signature::new(targets, secret_keys, signer) + Authorization::new(targets, secret_keys, signer) } } } @@ -1527,7 +1536,7 @@ pub mod testing { ) -> (Tx, TxData) { for sig in sigs { // Add all the generated signature sections - tx.0.add_section(Section::Signature(sig)); + tx.0.add_section(Section::Authorization(sig)); } (tx.0, tx.1) } diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 5337c5830d..73d47363f2 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -1,7 +1,7 @@ //! MASP verification wrappers. use std::cmp::Ordering; -use std::collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{btree_map, BTreeMap, BTreeSet}; use std::env; use std::fmt::Debug; use std::ops::Deref; @@ -51,6 +51,7 @@ use masp_proofs::prover::LocalTxProver; #[cfg(not(feature = "testing"))] use masp_proofs::sapling::SaplingVerificationContext; use namada_core::address::{Address, MASP}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; pub use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, @@ -63,6 +64,7 @@ use namada_ibc::IbcMessage; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; +use namada_state::StorageError; use namada_token::{self as token, Denomination, MaspDigitPos, Transfer}; use namada_tx::data::{TxResult, WrapperTx}; use namada_tx::Tx; @@ -153,14 +155,20 @@ pub enum TransferErr { General(#[from] Error), } +#[derive(Debug, Clone)] +struct ExtractedMaspTx { + fee_unshielding: Option<(BTreeSet, Transaction)>, + inner_tx: Option<(BTreeSet, Transaction)>, +} + /// MASP verifying keys pub struct PVKs { /// spend verifying key - spend_vk: PreparedVerifyingKey, + pub spend_vk: PreparedVerifyingKey, /// convert verifying key - convert_vk: PreparedVerifyingKey, + pub convert_vk: PreparedVerifyingKey, /// output verifying key - output_vk: PreparedVerifyingKey, + pub output_vk: PreparedVerifyingKey, } lazy_static! { @@ -290,7 +298,7 @@ impl Authorization for PartialAuthorized { } /// Partially deauthorize the transparent bundle -fn partial_deauthorize( +pub fn partial_deauthorize( tx_data: &TransactionData, ) -> Option> { let transp = tx_data.transparent_bundle().and_then(|x| { @@ -323,20 +331,30 @@ fn partial_deauthorize( } /// Verify a shielded transaction. -pub fn verify_shielded_tx(transaction: &Transaction) -> bool { +pub fn verify_shielded_tx( + transaction: &Transaction, + mut consume_verify_gas: F, +) -> Result<(), StorageError> +where + F: FnMut(u64) -> std::result::Result<(), StorageError>, +{ tracing::info!("entered verify_shielded_tx()"); let sapling_bundle = if let Some(bundle) = transaction.sapling_bundle() { bundle } else { - return false; + return Err(StorageError::SimpleMessage("no sapling bundle")); }; let tx_data = transaction.deref(); // Partially deauthorize the transparent bundle let unauth_tx_data = match partial_deauthorize(tx_data) { Some(tx_data) => tx_data, - None => return false, + None => { + return Err(StorageError::SimpleMessage( + "Failed to partially de-authorize", + )); + } }; let txid_parts = unauth_tx_data.digest(TxIdDigester); @@ -358,21 +376,25 @@ pub fn verify_shielded_tx(transaction: &Transaction) -> bool { let mut ctx = SaplingVerificationContext::new(true); #[cfg(feature = "testing")] let mut ctx = testing::MockSaplingVerificationContext::new(true); - let spends_valid = sapling_bundle - .shielded_spends - .iter() - .all(|spend| check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk)); - let converts_valid = sapling_bundle - .shielded_converts - .iter() - .all(|convert| check_convert(convert, &mut ctx, convert_vk)); - let outputs_valid = sapling_bundle - .shielded_outputs - .iter() - .all(|output| check_output(output, &mut ctx, output_vk)); - - if !(spends_valid && outputs_valid && converts_valid) { - return false; + for spend in &sapling_bundle.shielded_spends { + consume_verify_gas(namada_gas::MASP_VERIFY_SPEND_GAS)?; + if !check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) { + return Err(StorageError::SimpleMessage("Invalid shielded spend")); + } + } + for convert in &sapling_bundle.shielded_converts { + consume_verify_gas(namada_gas::MASP_VERIFY_CONVERT_GAS)?; + if !check_convert(convert, &mut ctx, convert_vk) { + return Err(StorageError::SimpleMessage( + "Invalid shielded conversion", + )); + } + } + for output in &sapling_bundle.shielded_outputs { + consume_verify_gas(namada_gas::MASP_VERIFY_OUTPUT_GAS)?; + if !check_output(output, &mut ctx, output_vk) { + return Err(StorageError::SimpleMessage("Invalid shielded output")); + } } tracing::info!("passed spend/output verification"); @@ -384,13 +406,17 @@ pub fn verify_shielded_tx(transaction: &Transaction) -> bool { assets_and_values.components().len() ); + consume_verify_gas(namada_gas::MASP_VERIFY_FINAL_GAS)?; let result = ctx.final_check( assets_and_values, sighash.as_ref(), sapling_bundle.authorization.binding_sig, ); tracing::info!("final check result {result}"); - result + if !result { + return Err(StorageError::SimpleMessage("MASP final check failed")); + } + Ok(()) } /// Get the path to MASP parameters from [`ENV_VAR_MASP_PARAMS_DIR`] env var or @@ -852,21 +878,36 @@ impl ShieldedContext { for (idx, tx_event) in txs_results { let tx = Tx::try_from(block[idx.0 as usize].as_ref()) .map_err(|e| Error::Other(e.to_string()))?; - let (changed_keys, masp_transaction) = Self::extract_masp_tx( + let ExtractedMaspTx { + fee_unshielding, + inner_tx, + } = Self::extract_masp_tx( &tx, ExtractShieldedActionArg::Event::(&tx_event), true, ) .await?; - - // Collect the current transaction - shielded_txs.insert( - IndexedTx { - height: height.into(), - index: idx, - }, - (epoch, changed_keys, masp_transaction), - ); + // Collect the current transaction(s) + fee_unshielding.and_then(|(changed_keys, masp_transaction)| { + shielded_txs.insert( + IndexedTx { + height: height.into(), + index: idx, + is_wrapper: true, + }, + (epoch, changed_keys, masp_transaction), + ) + }); + inner_tx.and_then(|(changed_keys, masp_transaction)| { + shielded_txs.insert( + IndexedTx { + height: height.into(), + index: idx, + is_wrapper: false, + }, + (epoch, changed_keys, masp_transaction), + ) + }); } } @@ -878,29 +919,83 @@ impl ShieldedContext { tx: &Tx, action_arg: ExtractShieldedActionArg<'args, C>, check_header: bool, - ) -> Result<(BTreeSet, Transaction), Error> { - let maybe_transaction = if check_header { - let tx_header = tx.header(); - // NOTE: simply looking for masp sections attached to the tx - // is not safe. We don't validate the sections attached to a - // transaction se we could end up with transactions carrying - // an unnecessary masp section. We must instead look for the - // required masp sections in the signed commitments (hashes) - // of the transactions' headers/data sections - if let Some(wrapper_header) = tx_header.wrapper() { - let hash = - wrapper_header.unshield_section_hash.ok_or_else(|| { + ) -> Result { + // We use the changed keys instead of the Transfer object + // because those are what the masp validity predicate works on + let (wrapper_changed_keys, changed_keys) = + if let ExtractShieldedActionArg::Event(tx_event) = action_arg { + let tx_result_str = tx_event + .attributes + .iter() + .find_map(|attr| { + if attr.key == "inner_tx" { + Some(&attr.value) + } else { + None + } + }) + .ok_or_else(|| { Error::Other( - "Missing expected fee unshielding section hash" - .to_string(), + "Missing required tx result in event".to_string(), ) })?; + let result = TxResult::from_str(tx_result_str) + .map_err(|e| Error::Other(e.to_string()))?; + (result.wrapper_changed_keys, result.changed_keys) + } else { + (Default::default(), Default::default()) + }; - let masp_transaction = tx + let tx_header = tx.header(); + // NOTE: simply looking for masp sections attached to the tx + // is not safe. We don't validate the sections attached to a + // transaction se we could end up with transactions carrying + // an unnecessary masp section. We must instead look for the + // required masp sections in the signed commitments (hashes) + // of the transactions' headers/data sections + let wrapper_header = tx_header + .wrapper() + .expect("All transactions must have a wrapper"); + let maybe_fee_unshield = if let (Some(hash), true) = + (wrapper_header.unshield_section_hash, check_header) + { + let masp_transaction = tx + .get_section(&hash) + .ok_or_else(|| { + Error::Other("Missing expected masp section".to_string()) + })? + .masp_tx() + .ok_or_else(|| { + Error::Other("Missing masp transaction".to_string()) + })?; + + Some((wrapper_changed_keys, masp_transaction)) + } else { + None + }; + + // Expect transaction + let tx_data = tx + .data() + .ok_or_else(|| Error::Other("Missing data section".to_string()))?; + let maybe_masp_tx = match Transfer::try_from_slice(&tx_data) { + Ok(transfer) => Some((changed_keys, transfer)), + Err(_) => { + // This should be a MASP over IBC transaction, it + // could be a ShieldedTransfer or an Envelope + // message, need to try both + extract_payload_from_shielded_action::(&tx_data, action_arg) + .await + .ok() + } + } + .map(|(changed_keys, transfer)| { + if let Some(hash) = transfer.shielded { + let masp_tx = tx .get_section(&hash) .ok_or_else(|| { Error::Other( - "Missing expected masp section".to_string(), + "Missing masp section in transaction".to_string(), ) })? .masp_tx() @@ -908,113 +1003,18 @@ impl ShieldedContext { Error::Other("Missing masp transaction".to_string()) })?; - // We use the changed keys instead of the Transfer object - // because those are what the masp validity predicate works on - let changed_keys = - if let ExtractShieldedActionArg::Event(tx_event) = - action_arg - { - let tx_result_str = tx_event - .attributes - .iter() - .find_map(|attr| { - if attr.key == "inner_tx" { - Some(&attr.value) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Missing required tx result in event" - .to_string(), - ) - })?; - TxResult::from_str(tx_result_str) - .map_err(|e| Error::Other(e.to_string()))? - .changed_keys - } else { - BTreeSet::default() - }; - - Some((changed_keys, masp_transaction)) + Ok::<_, Error>(Some((changed_keys, masp_tx))) } else { - None + Ok(None) } - } else { - None - }; - - let result = if let Some(tx) = maybe_transaction { - tx - } else { - // Expect decrypted transaction - let tx_data = tx.data().ok_or_else(|| { - Error::Other("Missing data section".to_string()) - })?; - match Transfer::try_from_slice(&tx_data) { - Ok(transfer) => { - let masp_transaction = tx - .get_section(&transfer.shielded.ok_or_else(|| { - Error::Other( - "Missing masp section hash".to_string(), - ) - })?) - .ok_or_else(|| { - Error::Other( - "Missing masp section in transaction" - .to_string(), - ) - })? - .masp_tx() - .ok_or_else(|| { - Error::Other("Missing masp transaction".to_string()) - })?; + }) + .transpose()? + .flatten(); - // We use the changed keys instead of the Transfer object - // because those are what the masp validity predicate works - // on - let changed_keys = - if let ExtractShieldedActionArg::Event(tx_event) = - action_arg - { - let tx_result_str = tx_event - .attributes - .iter() - .find_map(|attr| { - if attr.key == "inner_tx" { - Some(&attr.value) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Missing required tx result in event" - .to_string(), - ) - })?; - TxResult::from_str(tx_result_str) - .map_err(|e| Error::Other(e.to_string()))? - .changed_keys - } else { - BTreeSet::default() - }; - (changed_keys, masp_transaction) - } - Err(_) => { - // This should be a MASP over IBC transaction, it - // could be a ShieldedTransfer or an Envelope - // message, need to try both - - extract_payload_from_shielded_action::( - &tx_data, action_arg, - ) - .await? - } - } - }; - Ok(result) + Ok(ExtractedMaspTx { + fee_unshielding: maybe_fee_unshield, + inner_tx: maybe_masp_tx, + }) } /// Applies the given transaction to the supplied context. More precisely, @@ -1776,7 +1776,11 @@ impl ShieldedContext { )), false, ) - .await?; + .await? + .inner_tx + .ok_or_else(|| { + Error::Other("Missing shielded inner portion of pinned tx".into()) + })?; // Accumulate the combined output note value into this Amount let mut val_acc = I128Sum::zero(); @@ -2011,6 +2015,7 @@ impl ShieldedContext { crate::rpc::query_block(context.client()) .await? .map_or_else(|| 1, |block| u64::from(block.height)); + #[allow(clippy::disallowed_methods)] let current_time = DateTimeUtc::now(); let delta_time = expiration.0.signed_duration_since(current_time.0); @@ -2357,10 +2362,12 @@ impl ShieldedContext { || IndexedTx { height: BlockHeight::first(), index: TxIndex(0), + is_wrapper: false, }, |indexed| IndexedTx { height: indexed.height, index: indexed.index + 1, + is_wrapper: false, }, ); self.sync_status = ContextSyncStatus::Speculative; @@ -2453,9 +2460,14 @@ impl ShieldedContext { let idx = TxIndex(response_tx.index); // Only process yet unprocessed transactions which have // been accepted by node VPs - let should_process = !transfers - .contains_key(&IndexedTx { height, index: idx }) - && block_results[u64::from(height) as usize] + // TODO: Check that wrappers shouldn't be considered + // here + let should_process = + !transfers.contains_key(&IndexedTx { + height, + index: idx, + is_wrapper: false, + }) && block_results[u64::from(height) as usize] .is_accepted(idx.0 as usize); if !should_process { continue; @@ -2490,7 +2502,11 @@ impl ShieldedContext { // No shielded accounts are affected by this // Transfer transfers.insert( - IndexedTx { height, index: idx }, + IndexedTx { + height, + index: idx, + is_wrapper: false, + }, (epoch, delta, TransactionDelta::new()), ); } @@ -2607,7 +2623,6 @@ async fn get_indexed_masp_events_at_height( None } }); - match tx_index { Some(idx) => { if idx >= first_idx_to_query { @@ -2628,115 +2643,65 @@ enum ExtractShieldedActionArg<'args, C: Client + Sync> { Request((&'args C, BlockHeight, Option)), } -// Extract the changed keys and Transaction objects from a masp over ibc message +// Extract the changed keys and Transaction hash from a masp over ibc message async fn extract_payload_from_shielded_action<'args, C: Client + Sync>( tx_data: &[u8], args: ExtractShieldedActionArg<'args, C>, -) -> Result<(BTreeSet, Transaction), Error> { +) -> Result<(BTreeSet, Transfer), Error> { let message = namada_ibc::decode_message(tx_data) .map_err(|e| Error::Other(e.to_string()))?; let result = match message { - IbcMessage::ShieldedTransfer(msg) => { - let tx_event = match args { - ExtractShieldedActionArg::Event(event) => event, - ExtractShieldedActionArg::Request(_) => { - return Err(Error::Other( - "Unexpected event request for ShieldedTransfer" - .to_string(), - )); - } - }; + IbcMessage::Transfer(msg) => { + let tx_result = get_sending_result(args)?; - let changed_keys = tx_event - .attributes - .iter() - .find_map(|attribute| { - if attribute.key == "inner_tx" { - let tx_result = - TxResult::from_str(&attribute.value).unwrap(); - Some(tx_result.changed_keys) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Couldn't find changed keys in the event for the \ - provided transaction" - .to_string(), - ) - })?; + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; - (changed_keys, msg.shielded_transfer.masp_tx) + (tx_result.changed_keys, transfer) } - IbcMessage::Envelope(_) => { - let tx_event = match args { - ExtractShieldedActionArg::Event(event) => { - std::borrow::Cow::Borrowed(event) - } - ExtractShieldedActionArg::Request((client, height, index)) => { - std::borrow::Cow::Owned( - get_indexed_masp_events_at_height( - client, height, index, - ) - .await? - .ok_or_else(|| { - Error::Other(format!( - "Missing required ibc event at block height {}", - height - )) - })? - .first() - .ok_or_else(|| { - Error::Other(format!( - "Missing required ibc event at block height {}", - height - )) - })? - .1 - .to_owned(), - ) - } - }; + IbcMessage::NftTransfer(msg) => { + let tx_result = get_sending_result(args)?; - tx_event - .attributes - .iter() - .find_map(|attribute| { - if attribute.key == "inner_tx" { - let tx_result = - TxResult::from_str(&attribute.value).unwrap(); - for ibc_event in &tx_result.ibc_events { - let event = - namada_core::ibc::get_shielded_transfer( - ibc_event, - ) - .ok() - .flatten(); - if let Some(transfer) = event { - return Some(( - tx_result.changed_keys, - transfer.masp_tx, - )); - } - } - None - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Couldn't deserialize masp tx to ibc message envelope" - .to_string(), - ) - })? + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::RecvPacket(msg) => { + let tx_result = get_receiving_result(args).await?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) } - _ => { + IbcMessage::AckPacket(msg) => { + // Refund tokens by the ack message + let tx_result = get_receiving_result(args).await?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::Timeout(msg) => { + // Refund tokens by the timeout message + let tx_result = get_receiving_result(args).await?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::Envelope(_) => { return Err(Error::Other( - "Couldn't deserialize masp tx to a valid ibc message" - .to_string(), + "Unexpected ibc message for masp".to_string(), )); } }; @@ -2744,6 +2709,78 @@ async fn extract_payload_from_shielded_action<'args, C: Client + Sync>( Ok(result) } +fn get_sending_result( + args: ExtractShieldedActionArg<'_, C>, +) -> Result { + let tx_event = match args { + ExtractShieldedActionArg::Event(event) => event, + ExtractShieldedActionArg::Request(_) => { + return Err(Error::Other( + "Unexpected event request for ShieldedTransfer".to_string(), + )); + } + }; + + get_tx_result(tx_event) +} + +async fn get_receiving_result( + args: ExtractShieldedActionArg<'_, C>, +) -> Result { + let tx_event = match args { + ExtractShieldedActionArg::Event(event) => { + std::borrow::Cow::Borrowed(event) + } + ExtractShieldedActionArg::Request((client, height, index)) => { + std::borrow::Cow::Owned( + get_indexed_masp_events_at_height(client, height, index) + .await? + .ok_or_else(|| { + Error::Other(format!( + "Missing required ibc event at block height {}", + height + )) + })? + .first() + .ok_or_else(|| { + Error::Other(format!( + "Missing required ibc event at block height {}", + height + )) + })? + .1 + .to_owned(), + ) + } + }; + + get_tx_result(&tx_event) +} + +fn get_tx_result( + tx_event: &crate::tendermint::abci::Event, +) -> Result { + tx_event + .attributes + .iter() + .find_map(|attribute| { + if attribute.key == "inner_tx" { + let tx_result = TxResult::from_str(&attribute.value) + .expect("The event value should be parsable"); + Some(tx_result) + } else { + None + } + }) + .ok_or_else(|| { + Error::Other( + "Couldn't find changed keys in the event for the provided \ + transaction" + .to_string(), + ) + }) +} + mod tests { /// quick and dirty test. will fail on size check #[test] diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index 5f4295d08f..d6cb69ad88 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -100,9 +100,6 @@ router! {SHELL, // Block results access - read bit-vec ( "results" ) -> Vec = read_results, - // was the transaction accepted? - ( "accepted" / [tx_hash: Hash] ) -> Option = accepted, - // was the transaction applied? ( "applied" / [tx_hash: Hash] ) -> Option = applied, @@ -511,23 +508,6 @@ where Ok(data) } -fn accepted( - ctx: RequestCtx<'_, D, H, V, T>, - tx_hash: Hash, -) -> namada_storage::Result> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - let matcher = dumb_queries::QueryMatcher::accepted(tx_hash); - Ok(ctx - .event_log - .iter_with_matcher(matcher) - .by_ref() - .next() - .cloned()) -} - fn applied( ctx: RequestCtx<'_, D, H, V, T>, tx_hash: Hash, diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index 9df529df89..28f112c37e 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -1,12 +1,12 @@ //! Ethereum bridge related shell queries. use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::eth_abi::{Encode, EncodeCell}; use namada_core::eth_bridge_pool::{PendingTransfer, PendingTransferAppendix}; use namada_core::ethereum_events::{ @@ -276,7 +276,7 @@ where }); } else { for hash in store.keys() { - if transfer_hashes.remove(hash) { + if transfer_hashes.swap_remove(hash) { status.pending.insert(hash.clone()); } if transfer_hashes.is_empty() { @@ -315,7 +315,7 @@ where .as_str() .try_into() .expect("We must have a valid KeccakHash"); - if !transfer_hashes.remove(&tx_hash) { + if !transfer_hashes.swap_remove(&tx_hash) { return None; } Some((tx_hash, is_relayed, transfer_hashes.is_empty())) diff --git a/crates/sdk/src/queries/vp/pos.rs b/crates/sdk/src/queries/vp/pos.rs index f4805ee3bc..f7f79b40af 100644 --- a/crates/sdk/src/queries/vp/pos.rs +++ b/crates/sdk/src/queries/vp/pos.rs @@ -1,9 +1,10 @@ //! Queries router and handlers for PoS validity predicate -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; use namada_core::key::common; use namada_core::storage::Epoch; use namada_core::token; diff --git a/crates/sdk/src/queries/vp/token.rs b/crates/sdk/src/queries/vp/token.rs index cf6e1b9c42..18e994ebc9 100644 --- a/crates/sdk/src/queries/vp/token.rs +++ b/crates/sdk/src/queries/vp/token.rs @@ -3,7 +3,9 @@ use namada_core::address::Address; use namada_core::token; use namada_state::{DBIter, StorageHasher, DB}; -use namada_token::{read_denom, read_total_supply}; +use namada_token::{ + get_effective_total_native_supply, read_denom, read_total_supply, +}; use crate::queries::RequestCtx; @@ -34,7 +36,12 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_total_supply(ctx.state, &addr) + let native_token = ctx.state.in_mem().native_token.clone(); + if addr == native_token { + get_effective_total_native_supply(ctx.state) + } else { + read_total_supply(ctx.state, &addr) + } } #[cfg(any(test, feature = "async-client"))] diff --git a/crates/sdk/src/rpc.rs b/crates/sdk/src/rpc.rs index 9b7de172fb..f5ff2658a5 100644 --- a/crates/sdk/src/rpc.rs +++ b/crates/sdk/src/rpc.rs @@ -1,7 +1,7 @@ //! SDK RPC queries use std::cell::Cell; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; use std::ops::ControlFlow; use std::str::FromStr; @@ -11,7 +11,9 @@ use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; use namada_account::Account; use namada_core::address::{Address, InternalAddress}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::hash::Hash; +use namada_core::ibc::IbcTokenHash; use namada_core::key::common; use namada_core::storage::{ BlockHeight, BlockResults, Epoch, Key, PrefixValue, @@ -28,7 +30,7 @@ use namada_governance::utils::{ compute_proposal_result, ProposalResult, ProposalVotes, Vote, }; use namada_ibc::storage::{ - ibc_denom_key, ibc_denom_key_prefix, is_ibc_denom_key, + ibc_trace_key, ibc_trace_key_prefix, is_ibc_trace_key, }; use namada_parameters::{storage as params_storage, EpochDuration}; use namada_proof_of_stake::parameters::PosParams; @@ -103,9 +105,6 @@ pub async fn query_tx_status( "Transaction status query deadline of {deadline:?} exceeded" ); match status { - TxEventQuery::Accepted(_) => { - Error::Tx(TxSubmitError::AcceptTimeout) - } TxEventQuery::Applied(_) => { Error::Tx(TxSubmitError::AppliedTimeout) } @@ -384,9 +383,7 @@ where .await, )?; if response.data.is_empty() { - return Err(Error::from(QueryError::General(format!( - "No data found in {key}" - )))); + return Err(Error::from(QueryError::NoSuchKey(key.to_string()))); } T::try_from_slice(&response.data[..]) .map_err(|err| Error::from(EncodingError::Decoding(err.to_string()))) @@ -460,8 +457,6 @@ pub async fn query_has_storage_key( /// Represents a query for an event pertaining to the specified transaction #[derive(Debug, Copy, Clone)] pub enum TxEventQuery<'a> { - /// Queries whether transaction with given hash was accepted - Accepted(&'a str), /// Queries whether transaction with given hash was applied Applied(&'a str), } @@ -470,7 +465,6 @@ impl<'a> TxEventQuery<'a> { /// The event type to which this event query pertains pub fn event_type(self) -> &'static str { match self { - TxEventQuery::Accepted(_) => "accepted", TxEventQuery::Applied(_) => "applied", } } @@ -478,7 +472,6 @@ impl<'a> TxEventQuery<'a> { /// The transaction to which this event query pertains pub fn tx_hash(self) -> &'a str { match self { - TxEventQuery::Accepted(tx_hash) => tx_hash, TxEventQuery::Applied(tx_hash) => tx_hash, } } @@ -488,9 +481,6 @@ impl<'a> TxEventQuery<'a> { impl<'a> From> for Query { fn from(tx_query: TxEventQuery<'a>) -> Self { match tx_query { - TxEventQuery::Accepted(tx_hash) => { - Query::default().and_eq("accepted.hash", tx_hash) - } TxEventQuery::Applied(tx_hash) => { Query::default().and_eq("applied.hash", tx_hash) } @@ -506,15 +496,9 @@ pub async fn query_tx_events( ) -> std::result::Result, ::Error> { let tx_hash: Hash = tx_event_query.tx_hash().try_into().unwrap(); match tx_event_query { - TxEventQuery::Accepted(_) => { - RPC.shell().accepted(client, &tx_hash).await - } - /*.wrap_err_with(|| { - eyre!("Failed querying whether a transaction was accepted") - })*/, - TxEventQuery::Applied(_) => RPC.shell().applied(client, &tx_hash).await, /*.wrap_err_with(|| { - eyre!("Error querying whether a transaction was applied") - })*/ + TxEventQuery::Applied(_) => RPC.shell().applied(client, &tx_hash).await, /* .wrap_err_with(|| { + * eyre!("Error querying whether a transaction was applied") + * }) */ } } @@ -537,9 +521,10 @@ pub async fn dry_run_tx( ) } else { format!( - "Transaction was rejected by VPs: {}.\nChanged key: {}", + "Transaction was rejected by VPs: {}\nErrors: {}\nChanged keys: {}", serde_json::to_string_pretty(&result.vps_result.rejected_vps) .unwrap(), + serde_json::to_string_pretty(&result.vps_result.errors).unwrap(), serde_json::to_string_pretty(&result.changed_keys).unwrap(), ) }; @@ -561,10 +546,8 @@ pub enum TxBroadcastData { Live { /// Transaction to broadcast tx: Tx, - /// Hash of the wrapper transaction - wrapper_hash: String, - /// Hash of decrypted transaction - decrypted_hash: String, + /// Hash of the transaction + tx_hash: String, }, } @@ -720,7 +703,7 @@ pub async fn query_tx_response( ) })?; // Reformat the event attributes so as to ease value extraction - let event_map: std::collections::HashMap<&str, &str> = query_event + let event_map: namada_core::collections::HashMap<&str, &str> = query_event .attributes .iter() .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) @@ -997,7 +980,7 @@ pub async fn query_proposal_result( proposal_votes.add_validator( &vote.validator, voting_power, - vote.data.into(), + vote.data, ); } false => { @@ -1014,7 +997,7 @@ pub async fn query_proposal_result( &vote.delegator, &vote.validator, voting_power, - vote.data.into(), + vote.data, ); } } @@ -1360,6 +1343,48 @@ pub async fn format_denominated_amount( .to_string() } +/// Look up IBC tokens. The given base token can be non-Namada token. +pub async fn query_ibc_tokens( + context: &N, + base_token: Option, + owner: Option<&Address>, +) -> Result, Error> { + // Check the base token + let prefixes = match (base_token, owner) { + (Some(base_token), Some(owner)) => vec![ + ibc_trace_key_prefix(Some(base_token)), + ibc_trace_key_prefix(Some(owner.to_string())), + ], + (Some(base_token), None) => { + vec![ibc_trace_key_prefix(Some(base_token))] + } + _ => { + // Check all IBC denoms because the owner might not know IBC token + // transfers in the same chain + vec![ibc_trace_key_prefix(None)] + } + }; + + let mut tokens = BTreeMap::new(); + for prefix in prefixes { + let ibc_traces = + query_storage_prefix::<_, String>(context, &prefix).await?; + if let Some(ibc_traces) = ibc_traces { + for (key, ibc_trace) in ibc_traces { + if let Some((_, hash)) = is_ibc_trace_key(&key) { + let hash: IbcTokenHash = hash.parse().expect( + "Parsing an IBC token hash from storage shouldn't fail", + ); + let ibc_token = + Address::Internal(InternalAddress::IbcToken(hash)); + tokens.insert(ibc_trace, ibc_token); + } + } + } + } + Ok(tokens) +} + /// Look up the IBC denomination from a IbcToken. pub async fn query_ibc_denom( context: &N, @@ -1374,9 +1399,9 @@ pub async fn query_ibc_denom( }; if let Some(owner) = owner { - let ibc_denom_key = ibc_denom_key(owner.to_string(), &hash); + let ibc_trace_key = ibc_trace_key(owner.to_string(), &hash); if let Ok(ibc_denom) = - query_storage_value::<_, String>(context.client(), &ibc_denom_key) + query_storage_value::<_, String>(context.client(), &ibc_trace_key) .await { return ibc_denom; @@ -1384,12 +1409,12 @@ pub async fn query_ibc_denom( } // No owner is specified or the owner doesn't have the token - let ibc_denom_prefix = ibc_denom_key_prefix(None); + let ibc_denom_prefix = ibc_trace_key_prefix(None); if let Ok(Some(ibc_denoms)) = query_storage_prefix::<_, String>(context, &ibc_denom_prefix).await { for (key, ibc_denom) in ibc_denoms { - if let Some((_, token_hash)) = is_ibc_denom_key(&key) { + if let Some((_, token_hash)) = is_ibc_trace_key(&key) { if token_hash == hash { return ibc_denom; } diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 4d05007734..cd01dca1c5 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -1,5 +1,5 @@ //! Functions to sign transactions -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::fmt::Display; use borsh::BorshDeserialize; @@ -13,6 +13,7 @@ use masp_primitives::transaction::components::sapling::fees::{ use masp_primitives::transaction::Transaction; use namada_account::{AccountPublicKeysMap, InitAccount, UpdateAccount}; use namada_core::address::{Address, ImplicitAddress, InternalAddress, MASP}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; use namada_core::masp::{AssetData, ExtendedViewingKey, PaymentAddress}; use namada_core::sign::SignatureIndex; @@ -887,11 +888,11 @@ fn to_ledger_decimal(amount: &str) -> String { if amount.contains('.') { let mut amount = amount.trim_end_matches('0').to_string(); if amount.ends_with('.') { - amount.push('0') + amount.pop(); } amount } else { - amount.to_string() + ".0" + amount.to_string() } } @@ -916,8 +917,8 @@ struct LedgerProposalType<'a>(&'a ProposalType, &'a Tx); impl<'a> Display for LedgerProposalType<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self.0 { - ProposalType::Default(None) => write!(f, "Default"), - ProposalType::Default(Some(hash)) => { + ProposalType::Default => write!(f, "Default"), + ProposalType::DefaultWithWasm(hash) => { let extra = self .1 .get_section(hash) @@ -939,10 +940,10 @@ fn proposal_type_to_ledger_vector( output: &mut Vec, ) { match proposal_type { - ProposalType::Default(None) => { + ProposalType::Default => { output.push("Proposal type : Default".to_string()) } - ProposalType::Default(Some(hash)) => { + ProposalType::DefaultWithWasm(hash) => { output.push("Proposal type : Default".to_string()); let extra = tx .get_section(hash) @@ -1223,7 +1224,6 @@ pub async fn to_ledger_vector( .hash(); tv.output.push("Type : Init proposal".to_string()); - tv.output.push(format!("ID : {}", init_proposal_data.id)); proposal_type_to_ledger_vector( &init_proposal_data.r#type, tx, @@ -1239,12 +1239,13 @@ pub async fn to_ledger_vector( "Voting end epoch : {}", init_proposal_data.voting_end_epoch ), - format!("Grace epoch : {}", init_proposal_data.grace_epoch), + format!( + "Activation epoch : {}", + init_proposal_data.activation_epoch + ), format!("Content : {}", HEXLOWER.encode(&extra.0)), ]); - tv.output_expert - .push(format!("ID : {}", init_proposal_data.id)); proposal_type_to_ledger_vector( &init_proposal_data.r#type, tx, @@ -1260,7 +1261,10 @@ pub async fn to_ledger_vector( "Voting end epoch : {}", init_proposal_data.voting_end_epoch ), - format!("Grace epoch : {}", init_proposal_data.grace_epoch), + format!( + "Activation epoch : {}", + init_proposal_data.activation_epoch + ), format!("Content : {}", HEXLOWER.encode(&extra.0)), ]); } else if code_sec.tag == Some(TX_VOTE_PROPOSAL.to_string()) { diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 694936e98b..2faef0c99a 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -1,7 +1,7 @@ //! SDK functions to construct different types of transactions use std::borrow::Cow; -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; use std::fs::File; use std::path::{Path, PathBuf}; use std::time::Duration; @@ -9,7 +9,6 @@ use std::time::Duration; use borsh::BorshSerialize; use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; -use masp_primitives::transaction::builder; use masp_primitives::transaction::builder::Builder; use masp_primitives::transaction::components::sapling::fees::{ ConvertView, InputView as SaplingInputView, OutputView as SaplingOutputView, @@ -18,20 +17,28 @@ use masp_primitives::transaction::components::transparent::fees::{ InputView as TransparentInputView, OutputView as TransparentOutputView, }; use masp_primitives::transaction::components::I128Sum; +use masp_primitives::transaction::{builder, Transaction as MaspTransaction}; +use masp_primitives::zip32::ExtendedFullViewingKey; use namada_account::{InitAccount, UpdateAccount}; use namada_core::address::{Address, InternalAddress, MASP}; +use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::hash::Hash; -use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; +use namada_core::ibc::apps::nft_transfer::types::msgs::transfer::MsgTransfer as IbcMsgNftTransfer; +use namada_core::ibc::apps::nft_transfer::types::packet::PacketData as NftPacketData; +use namada_core::ibc::apps::nft_transfer::types::PrefixedClassId; +use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::PrefixedCoin; use namada_core::ibc::core::channel::types::timeout::TimeoutHeight; use namada_core::ibc::core::client::types::Height as IbcHeight; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; -use namada_core::ibc::primitives::{Msg, Timestamp as IbcTimestamp}; -use namada_core::ibc::{IbcShieldedTransfer, MsgShieldedTransfer}; -use namada_core::key::*; -use namada_core::masp::{AssetData, TransferSource, TransferTarget}; +use namada_core::ibc::primitives::Timestamp as IbcTimestamp; +use namada_core::ibc::{is_nft_trace, MsgNftTransfer, MsgTransfer}; +use namada_core::key::{self, *}; +use namada_core::masp::{ + AssetData, PaymentAddress, TransferSource, TransferTarget, +}; use namada_core::storage::Epoch; use namada_core::time::DateTimeUtc; use namada_core::{storage, token}; @@ -43,14 +50,18 @@ use namada_governance::storage::proposal::{ InitProposalData, ProposalType, VoteProposalData, }; use namada_governance::storage::vote::ProposalVote; -use namada_ibc::storage::channel_key; -use namada_proof_of_stake::parameters::PosParams; +use namada_ibc::storage::{channel_key, ibc_token}; +use namada_proof_of_stake::parameters::{ + PosParams, MAX_VALIDATOR_METADATA_LEN, +}; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; use namada_token::storage_key::balance_key; use namada_token::DenominatedAmount; use namada_tx::data::pgf::UpdateStewardCommission; +use namada_tx::data::pos::{BecomeValidator, ConsensusKeyChange}; use namada_tx::data::{pos, ResultCode, TxResult}; -pub use namada_tx::{Signature, *}; +pub use namada_tx::{Authorization, *}; +use rand_core::{OsRng, RngCore}; use crate::args::{self, InputAmount}; use crate::control_flow::time; @@ -60,8 +71,8 @@ use crate::masp::TransferErr::Build; use crate::masp::{ShieldedContext, ShieldedTransfer}; use crate::queries::Client; use crate::rpc::{ - self, query_wasm_code_hash, validate_amount, InnerTxResult, - TxBroadcastData, TxResponse, + self, get_validator_stake, query_wasm_code_hash, validate_amount, + InnerTxResult, TxBroadcastData, TxResponse, }; use crate::signing::{self, validate_fee_and_gen_unshield, SigningTxData}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; @@ -220,15 +231,10 @@ pub async fn process_tx( expect_dry_broadcast(TxBroadcastData::DryRun(tx), context).await } else { // We use this to determine when the wrapper tx makes it on-chain - let wrapper_hash = tx.header_hash().to_string(); + let tx_hash = tx.header_hash().to_string(); // We use this to determine when the decrypted inner tx makes it // on-chain - let decrypted_hash = tx.raw_header_hash().to_string(); - let to_broadcast = TxBroadcastData::Live { - tx, - wrapper_hash, - decrypted_hash, - }; + let to_broadcast = TxBroadcastData::Live { tx, tx_hash }; // TODO: implement the code to resubmit the wrapper if it fails because // of masp epoch Either broadcast or submit transaction and // collect result into sum type @@ -313,12 +319,8 @@ pub async fn broadcast_tx( context: &impl Namada, to_broadcast: &TxBroadcastData, ) -> Result { - let (tx, wrapper_tx_hash, decrypted_tx_hash) = match to_broadcast { - TxBroadcastData::Live { - tx, - wrapper_hash, - decrypted_hash, - } => Ok((tx, wrapper_hash, decrypted_hash)), + let (tx, tx_hash) = match to_broadcast { + TxBroadcastData::Live { tx, tx_hash } => Ok((tx, tx_hash)), TxBroadcastData::DryRun(tx) => { Err(TxSubmitError::ExpectLiveRun(tx.clone())) } @@ -342,14 +344,7 @@ pub async fn broadcast_tx( // Print the transaction identifiers to enable the extraction of // acceptance/application results later { - display_line!( - context.io(), - "Wrapper transaction hash: {wrapper_tx_hash}", - ); - display_line!( - context.io(), - "Inner transaction hash: {decrypted_tx_hash}", - ); + display_line!(context.io(), "Transaction hash: {tx_hash}",); } Ok(response) } else { @@ -373,12 +368,8 @@ pub async fn submit_tx( context: &impl Namada, to_broadcast: TxBroadcastData, ) -> Result { - let (_, wrapper_hash, decrypted_hash) = match &to_broadcast { - TxBroadcastData::Live { - tx, - wrapper_hash, - decrypted_hash, - } => Ok((tx, wrapper_hash, decrypted_hash)), + let (_, tx_hash) = match &to_broadcast { + TxBroadcastData::Live { tx, tx_hash } => Ok((tx, tx_hash)), TxBroadcastData::DryRun(tx) => { Err(TxSubmitError::ExpectLiveRun(tx.clone())) } @@ -387,6 +378,7 @@ pub async fn submit_tx( // Broadcast the supplied transaction broadcast_tx(context, &to_broadcast).await?; + #[allow(clippy::disallowed_methods)] let deadline = time::Instant::now() + time::Duration::from_secs( DEFAULT_NAMADA_EVENTS_MAX_WAIT_TIME_SECONDS, @@ -398,36 +390,12 @@ pub async fn submit_tx( "Awaiting transaction approval", ); - let response = { - let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); - let event = - rpc::query_tx_status(context, wrapper_query, deadline).await?; - let wrapper_resp = TxResponse::from_event(event); - - if display_wrapper_resp_and_get_result(context, &wrapper_resp) { - display_line!( - context.io(), - "Waiting for inner transaction result..." - ); - // The transaction is now on chain. We wait for it to be decrypted - // and applied - // We also listen to the event emitted when the encrypted - // payload makes its way onto the blockchain - let decrypted_query = - rpc::TxEventQuery::Applied(decrypted_hash.as_str()); - let event = - rpc::query_tx_status(context, decrypted_query, deadline) - .await?; - let inner_resp = TxResponse::from_event(event); - - display_inner_resp(context, &inner_resp); - Ok(inner_resp) - } else { - Ok(wrapper_resp) - } - }; - - response + // The transaction is now on chain. We wait for it to be applied + let tx_query = rpc::TxEventQuery::Applied(tx_hash.as_str()); + let event = rpc::query_tx_status(context, tx_query, deadline).await?; + let response = TxResponse::from_event(event); + display_inner_resp(context, &response); + Ok(response) } /// Display a result of a wrapper tx. @@ -481,9 +449,11 @@ pub fn display_inner_resp(context: &impl Namada, resp: &TxResponse) { .collect(); edisplay_line!( context.io(), - "Transaction was rejected by VPs: {}.\nChanged keys: {}", + "Transaction was rejected by VPs: {}\nErrors: {}\nChanged \ + keys: {}", serde_json::to_string_pretty(&inner.vps_result.rejected_vps) .unwrap(), + serde_json::to_string_pretty(&inner.vps_result.errors).unwrap(), serde_json::to_string_pretty(&changed_keys).unwrap(), ); } @@ -562,6 +532,71 @@ pub async fn save_initialized_accounts( } } +/// Submit validator commission rate change +pub async fn build_change_consensus_key( + context: &impl Namada, + args::ConsensusKeyChange { + tx: tx_args, + validator, + consensus_key, + tx_code_path, + unsafe_dont_encrypt: _, + }: &args::ConsensusKeyChange, +) -> Result<(Tx, SigningTxData)> { + let consensus_key = if let Some(consensus_key) = consensus_key { + consensus_key + } else { + edisplay_line!(context.io(), "Consensus key must must be present."); + return Err(Error::from(TxSubmitError::Other( + "Consensus key must must be present.".to_string(), + ))); + }; + + // Check that the new consensus key is unique + let consensus_keys = rpc::get_consensus_keys(context.client()).await?; + + if consensus_keys.contains(consensus_key) { + edisplay_line!( + context.io(), + "The consensus key is already being used." + ); + return Err(Error::from(TxSubmitError::ConsensusKeyNotUnique)); + } + + let data = ConsensusKeyChange { + validator: validator.clone(), + consensus_key: consensus_key.clone(), + }; + + let signing_data = signing::init_validator_signing_data( + context, + tx_args, + vec![consensus_key.clone()], + ) + .await?; + + let (fee_amount, _updated_balance, unshield) = + validate_fee_and_gen_unshield( + context, + tx_args, + &signing_data.fee_payer, + ) + .await?; + + build( + context, + tx_args, + tx_code_path.clone(), + data, + do_nothing, + unshield, + fee_amount, + &signing_data.fee_payer, + ) + .await + .map(|tx| (tx, signing_data)) +} + /// Submit validator commission rate change pub async fn build_validator_commission_change( context: &impl Namada, @@ -733,6 +768,68 @@ pub async fn build_validator_metadata_change( ); return Err(Error::from(TxSubmitError::InvalidEmail)); } + // Check that the email is within MAX_VALIDATOR_METADATA_LEN characters + if email.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + edisplay_line!( + context.io(), + "Email provided is too long, must be within \ + {MAX_VALIDATOR_METADATA_LEN} characters" + ); + if !tx_args.force { + return Err(Error::from(TxSubmitError::MetadataTooLong)); + } + } + } + + // Check that any new metadata provided is within MAX_VALIDATOR_METADATA_LEN + // characters + if let Some(description) = description.as_ref() { + if description.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + edisplay_line!( + context.io(), + "Description provided is too long, must be within \ + {MAX_VALIDATOR_METADATA_LEN} characters" + ); + if !tx_args.force { + return Err(Error::from(TxSubmitError::MetadataTooLong)); + } + } + } + if let Some(website) = website.as_ref() { + if website.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + edisplay_line!( + context.io(), + "Website provided is too long, must be within \ + {MAX_VALIDATOR_METADATA_LEN} characters" + ); + if !tx_args.force { + return Err(Error::from(TxSubmitError::MetadataTooLong)); + } + } + } + if let Some(discord_handle) = discord_handle.as_ref() { + if discord_handle.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + edisplay_line!( + context.io(), + "Discord handle provided is too long, must be within \ + {MAX_VALIDATOR_METADATA_LEN} characters" + ); + if !tx_args.force { + return Err(Error::from(TxSubmitError::MetadataTooLong)); + } + } + } + if let Some(avatar) = avatar.as_ref() { + if avatar.len() as u64 > MAX_VALIDATOR_METADATA_LEN { + edisplay_line!( + context.io(), + "Avatar provided is too long, must be within \ + {MAX_VALIDATOR_METADATA_LEN} characters" + ); + if !tx_args.force { + return Err(Error::from(TxSubmitError::MetadataTooLong)); + } + } } // If there's a new commission rate, it must be valid @@ -1643,6 +1740,13 @@ pub async fn build_unbond( amount.to_string_native(), bond_amount.to_string_native(), ); + if !tx_args.force { + return Err(Error::from(TxSubmitError::LowerBondThanUnbond( + bond_source, + amount.to_string_native(), + bond_amount.to_string_native(), + ))); + } } // Query the unbonds before submitting the tx @@ -1898,7 +2002,6 @@ pub async fn build_default_proposal( args::InitProposal { tx, proposal_data: _, - is_offline: _, is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, @@ -1930,7 +2033,7 @@ pub async fn build_default_proposal( let (_, extra_section_hash) = tx_builder.add_extra_section(init_proposal_code, None); init_proposal_data.r#type = - ProposalType::Default(Some(extra_section_hash)); + ProposalType::DefaultWithWasm(extra_section_hash); }; Ok(()) }; @@ -1956,18 +2059,16 @@ pub async fn build_vote_proposal( tx, proposal_id, vote, - voter, - is_offline: _, - proposal_data: _, + voter_address, tx_code_path, }: &args::VoteProposal, - epoch: Epoch, + current_epoch: Epoch, ) -> Result<(Tx, SigningTxData)> { - let default_signer = Some(voter.clone()); + let default_signer = Some(voter_address.clone()); let signing_data = signing::aux_signing_data( context, tx, - Some(voter.clone()), + default_signer.clone(), default_signer.clone(), ) .await?; @@ -1978,57 +2079,150 @@ pub async fn build_vote_proposal( let proposal_vote = ProposalVote::try_from(vote.clone()) .map_err(|_| TxSubmitError::InvalidProposalVote)?; - let proposal_id = proposal_id.ok_or_else(|| { - Error::Other("Proposal id must be defined.".to_string()) - })?; let proposal = if let Some(proposal) = - rpc::query_proposal_by_id(context.client(), proposal_id).await? + rpc::query_proposal_by_id(context.client(), *proposal_id).await? { proposal } else { return Err(Error::from(TxSubmitError::ProposalDoesNotExist( - proposal_id, + *proposal_id, ))); }; - let is_validator = rpc::is_validator(context.client(), voter).await?; + let is_validator = + rpc::is_validator(context.client(), voter_address).await?; - if !proposal.can_be_voted(epoch, is_validator) { + // Prevent jailed or inactive validators from voting + if is_validator && !tx.force { + let state = rpc::get_validator_state( + context.client(), + voter_address, + Some(current_epoch), + ) + .await? + .expect("Expected to find the state of the validator"); + + if matches!(state, ValidatorState::Jailed | ValidatorState::Inactive) { + return Err(Error::from(TxSubmitError::CannotVoteInGovernance( + voter_address.clone(), + current_epoch, + ))); + } + } + + // Check if the voting period is still valid for the voter + if !proposal.can_be_voted(current_epoch, is_validator) { eprintln!("Proposal {} cannot be voted on anymore.", proposal_id); if is_validator { eprintln!( "NB: voter address {} is a validator, and validators can only \ vote on proposals within the first 2/3 of the voting period.", - voter + voter_address ); } if !tx.force { return Err(Error::from( - TxSubmitError::InvalidProposalVotingPeriod(proposal_id), + TxSubmitError::InvalidProposalVotingPeriod(*proposal_id), )); } } - let delegations = rpc::get_delegators_delegation_at( - context.client(), - voter, - proposal.voting_start_epoch, - ) - .await? - .keys() - .cloned() - .collect::>(); - - if delegations.is_empty() { - return Err(Error::Other( - "Voter address must have delegations".to_string(), - )); - } + let delegations = if is_validator { + let stake = + get_validator_stake(context.client(), current_epoch, voter_address) + .await?; + + if stake.is_zero() { + eprintln!( + "Voter address {} is a validator but has no stake, so it has \ + no votes.", + voter_address + ); + if !tx.force { + return Err(Error::Other( + "Voter address must have delegations".to_string(), + )); + } + } + let val_state = rpc::get_validator_state( + context.client(), + voter_address, + Some(current_epoch), + ) + .await? + .expect("Expected to find the state of the validator"); + + if !matches!( + val_state, + ValidatorState::Jailed | ValidatorState::Inactive + ) { + vec![voter_address.clone()] + } else { + eprintln!( + "Voter address {} is a validator that is either jailed or \ + inactive, and so it may not vote in governance at this \ + moment.", + voter_address + ); + if !tx.force { + return Err(Error::from( + TxSubmitError::CannotVoteInGovernance( + voter_address.clone(), + current_epoch, + ), + )); + } + vec![] + } + } else { + // Get active valid validators with whom the voter has delegations + // (bonds) + let delegation_vals = rpc::get_delegators_delegation_at( + context.client(), + voter_address, + proposal.voting_start_epoch, + ) + .await?; + + let mut delegation_validators = Vec::
::new(); + for validator in delegation_vals.keys() { + let val_state = rpc::get_validator_state( + context.client(), + validator, + Some(current_epoch), + ) + .await? + .expect("Expected to find the state of the validator"); + + if matches!( + val_state, + ValidatorState::Jailed | ValidatorState::Inactive + ) { + continue; + } + delegation_validators.push(validator.clone()); + } + + // Check that there are delegations to vote with + if delegation_validators.is_empty() { + eprintln!( + "Voter address {} does not have any delegations.", + voter_address + ); + if !tx.force { + return Err(Error::from(TxSubmitError::NoDelegationsFound( + voter_address.clone(), + current_epoch, + ))); + } + } + delegation_validators + }; let data = VoteProposalData { - id: proposal_id, + id: *proposal_id, vote: proposal_vote, - voter: voter.clone(), + voter: voter_address.clone(), delegations, }; @@ -2046,13 +2240,202 @@ pub async fn build_vote_proposal( .map(|tx| (tx, signing_data)) } +/// Build a pgf funding proposal governance +pub async fn build_become_validator( + context: &impl Namada, + args::TxBecomeValidator { + tx: tx_args, + address, + scheme: _, + consensus_key, + eth_cold_key, + eth_hot_key, + protocol_key, + commission_rate, + max_commission_rate_change, + email, + website, + description, + discord_handle, + avatar, + unsafe_dont_encrypt: _, + tx_code_path, + }: &args::TxBecomeValidator, +) -> Result<(Tx, SigningTxData)> { + // Check that the address is established + if !address.is_established() { + edisplay_line!( + context.io(), + "The given address {address} is not established. Only an \ + established address can become a validator.", + ); + if !tx_args.force { + return Err(Error::Other( + "The given address must be enstablished".to_string(), + )); + } + }; + + // Check that the address is not already a validator + if rpc::is_validator(context.client(), address).await? { + edisplay_line!( + context.io(), + "The given address {address} is already a validator", + ); + if !tx_args.force { + return Err(Error::Other( + "The given address must not be a validator already".to_string(), + )); + } + }; + + // If the address is not yet a validator, it cannot have self-bonds, but it + // may have delegations. It has to unbond those before it can become a + // validator. + if rpc::has_bonds(context.client(), address).await? { + edisplay_line!( + context.io(), + "The given address {address} has delegations and therefore cannot \ + become a validator. To become a validator, you have to unbond \ + your delegations first.", + ); + if !tx_args.force { + return Err(Error::Other( + "The given address must not have delegations".to_string(), + )); + } + } + + // Validate the commission rate data + if *commission_rate > Dec::one() || *commission_rate < Dec::zero() { + edisplay_line!( + context.io(), + "The validator commission rate must not exceed 1.0 or 100%, and \ + it must be 0 or positive." + ); + if !tx_args.force { + return Err(Error::Other( + "Invalid validator commission rate".to_string(), + )); + } + } + + if *max_commission_rate_change > Dec::one() + || *max_commission_rate_change < Dec::zero() + { + edisplay_line!( + context.io(), + "The validator maximum change in commission rate per epoch must \ + not exceed 1.0 or 100%, and it must be 0 or positive." + ); + if !tx_args.force { + return Err(Error::Other( + "Invalid validator maximum change".to_string(), + )); + } + } + + // Validate the email + if email.is_empty() { + edisplay_line!( + context.io(), + "The validator email must not be an empty string." + ); + if !tx_args.force { + return Err(Error::Other( + "Validator email must not be empty".to_string(), + )); + } + } + + // check that all keys have been supplied correctly + if [ + consensus_key.clone(), + eth_cold_key.clone(), + eth_hot_key.clone(), + protocol_key.clone(), + ] + .iter() + .any(|key| key.is_none()) + { + edisplay_line!( + context.io(), + "All validator keys must be supplied to create a validator." + ); + return Err(Error::Other("Validator key must be present".to_string())); + } + + let data = BecomeValidator { + address: address.clone(), + consensus_key: consensus_key.clone().unwrap(), + eth_cold_key: key::secp256k1::PublicKey::try_from_pk( + ð_cold_key.clone().unwrap(), + ) + .unwrap(), + eth_hot_key: key::secp256k1::PublicKey::try_from_pk( + ð_hot_key.clone().unwrap(), + ) + .unwrap(), + protocol_key: protocol_key.clone().unwrap(), + commission_rate: *commission_rate, + max_commission_rate_change: *max_commission_rate_change, + email: email.to_owned(), + description: description.clone(), + website: website.clone(), + discord_handle: discord_handle.clone(), + avatar: avatar.clone(), + }; + + // Put together all the PKs that we have to sign with to verify ownership + let account = if let Some(account) = + rpc::get_account_info(context.client(), address).await? + { + account + } else { + edisplay_line!( + context.io(), + "Unable to query account keys for address {address}." + ); + return Err(Error::Other("Invalid address".to_string())); + }; + + let mut all_pks = account.get_all_public_keys(); + all_pks.push(consensus_key.clone().unwrap().clone()); + all_pks.push(eth_cold_key.clone().unwrap()); + all_pks.push(eth_hot_key.clone().unwrap()); + all_pks.push(protocol_key.clone().unwrap().clone()); + + let signing_data = + signing::init_validator_signing_data(context, tx_args, all_pks).await?; + + let (fee_amount, _updated_balance, unshield) = + validate_fee_and_gen_unshield( + context, + tx_args, + &signing_data.fee_payer, + ) + .await?; + + build( + context, + tx_args, + tx_code_path.clone(), + data, + do_nothing, + unshield, + fee_amount, + &signing_data.fee_payer, + ) + .await + .map(|tx| (tx, signing_data)) +} + /// Build a pgf funding proposal governance pub async fn build_pgf_funding_proposal( context: &impl Namada, args::InitProposal { tx, proposal_data: _, - is_offline: _, is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, @@ -2101,7 +2484,6 @@ pub async fn build_pgf_stewards_proposal( args::InitProposal { tx, proposal_data: _, - is_offline: _, is_pgf_stewards: _, is_pgf_funding: _, tx_code_path, @@ -2150,6 +2532,9 @@ pub async fn build_ibc_transfer( context: &impl Namada, args: &args::TxIbcTransfer, ) -> Result<(Tx, SigningTxData, Option)> { + let refund_target = + get_refund_target(context, &args.source, &args.refund_target).await?; + let source = args.source.effective_address(); let signing_data = signing::aux_signing_data( context, @@ -2219,21 +2604,6 @@ pub async fn build_ibc_transfer( .await?; let shielded_tx_epoch = shielded_parts.as_ref().map(|trans| trans.0.epoch); - let ibc_denom = - rpc::query_ibc_denom(context, &args.token.to_string(), Some(&source)) - .await; - let token = PrefixedCoin { - denom: ibc_denom.parse().expect("Invalid IBC denom"), - // Set the IBC amount as an integer - amount: validated_amount.into(), - }; - let packet_data = PacketData { - token, - sender: source.to_string().into(), - receiver: args.receiver.clone().into(), - memo: args.memo.clone().unwrap_or_default().into(), - }; - // this height should be that of the destination chain, not this chain let timeout_height = match args.timeout_height { Some(h) => { @@ -2247,7 +2617,11 @@ pub async fn build_ibc_transfer( let now: std::result::Result< crate::tendermint::Time, namada_core::tendermint::Error, - > = DateTimeUtc::now().try_into(); + > = { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + } + .try_into(); let now = now.map_err(|e| Error::Other(e.to_string()))?; let now: IbcTimestamp = now.into(); let timeout_timestamp = if let Some(offset) = args.timeout_sec_offset { @@ -2261,59 +2635,109 @@ pub async fn build_ibc_transfer( IbcTimestamp::none() }; - let message = MsgTransfer { - port_id_on_a: args.port_id.clone(), - chan_id_on_a: args.channel_id.clone(), - packet_data, - timeout_height_on_b: timeout_height, - timeout_timestamp_on_b: timeout_timestamp, - }; - let chain_id = args.tx.chain_id.clone().unwrap(); let mut tx = Tx::new(chain_id, args.tx.expiration); if let Some(memo) = &args.tx.memo { tx.add_memo(memo); } - let data = match shielded_parts { - Some((shielded_transfer, asset_types)) => { - let masp_tx_hash = - tx.add_masp_tx_section(shielded_transfer.masp_tx.clone()).1; - let transfer = token::Transfer { - source: source.clone(), - // The token will be escrowed to IBC address - target: Address::Internal(InternalAddress::Ibc), - token: args.token.clone(), - amount: validated_amount, - // The address could be a payment address, but the address isn't - // that of this chain. - key: None, - // Link the Transfer to the MASP Transaction by hash code - shielded: Some(masp_tx_hash), - }; - tx.add_masp_builder(MaspBuilder { - asset_types, - metadata: shielded_transfer.metadata, - builder: shielded_transfer.builder, - target: masp_tx_hash, - }); - let shielded_transfer = IbcShieldedTransfer { - transfer, - masp_tx: shielded_transfer.masp_tx, - }; - MsgShieldedTransfer { - message, - shielded_transfer, - } - .serialize_to_vec() - } - None => { - let any_msg = message.to_any(); - let mut data = vec![]; - prost::Message::encode(&any_msg, &mut data) - .map_err(TxSubmitError::EncodeFailure)?; - data - } + let transfer = shielded_parts.map(|(shielded_transfer, asset_types)| { + let masp_tx_hash = + tx.add_masp_tx_section(shielded_transfer.masp_tx.clone()).1; + let transfer = token::Transfer { + source: source.clone(), + // The token will be escrowed to IBC address + target: Address::Internal(InternalAddress::Ibc), + token: args.token.clone(), + amount: validated_amount, + // The address could be a payment address, but the address isn't + // that of this chain. + key: None, + // Link the Transfer to the MASP Transaction by hash code + shielded: Some(masp_tx_hash), + }; + tx.add_masp_builder(MaspBuilder { + asset_types, + metadata: shielded_transfer.metadata, + builder: shielded_transfer.builder, + target: masp_tx_hash, + }); + transfer + }); + + // Check the token and make the tx data + let ibc_denom = + rpc::query_ibc_denom(context, &args.token.to_string(), Some(&source)) + .await; + // The refund target should be given or created if the source is shielded. + // Otherwise, the refund target should be None. + assert!( + (args.source.spending_key().is_some() && refund_target.is_some()) + || (args.source.address().is_some() && refund_target.is_none()) + ); + // If the refund address is given, set the refund address. It is used only + // when refunding and won't affect the actual transfer because the actual + // source will be the MASP address and the MASP transaction is generated by + // the shielded source address. + let sender = refund_target + .map(|t| t.to_string()) + .unwrap_or(source.to_string()) + .into(); + let data = if args.port_id == PortId::transfer() { + let token = PrefixedCoin { + denom: ibc_denom + .parse() + .map_err(|e| Error::Other(format!("Invalid IBC denom: {e}")))?, + // Set the IBC amount as an integer + amount: validated_amount.into(), + }; + let packet_data = PacketData { + token, + sender, + receiver: args.receiver.clone().into(), + memo: args.memo.clone().unwrap_or_default().into(), + }; + let message = IbcMsgTransfer { + port_id_on_a: args.port_id.clone(), + chan_id_on_a: args.channel_id.clone(), + packet_data, + timeout_height_on_b: timeout_height, + timeout_timestamp_on_b: timeout_timestamp, + }; + MsgTransfer { message, transfer }.serialize_to_vec() + } else if let Some((trace_path, base_class_id, token_id)) = + is_nft_trace(&ibc_denom) + { + let class_id = PrefixedClassId { + trace_path, + base_class_id: base_class_id.parse().map_err(|_| { + Error::Other(format!("Invalid class ID: {base_class_id}")) + })?, + }; + let token_ids = vec![token_id.clone()].try_into().map_err(|_| { + Error::Other(format!("Invalid token ID: {token_id}")) + })?; + let packet_data = NftPacketData { + class_id, + class_uri: None, + class_data: None, + token_ids, + token_uris: None, + token_data: None, + sender, + receiver: args.receiver.clone().into(), + memo: args.memo.clone().map(|m| m.into()), + }; + let message = IbcMsgNftTransfer { + port_id_on_a: args.port_id.clone(), + chan_id_on_a: args.channel_id.clone(), + packet_data, + timeout_height_on_b: timeout_height, + timeout_timestamp_on_b: timeout_timestamp, + }; + MsgNftTransfer { message, transfer }.serialize_to_vec() + } else { + return Err(Error::Other(format!("Invalid IBC denom: {ibc_denom}"))); }; tx.add_code_from_hash( @@ -2869,7 +3293,7 @@ pub async fn build_custom( pub async fn gen_ibc_shielded_transfer( context: &N, args: args::GenIbcShieldedTransfer, -) -> Result> { +) -> Result> { let key = match args.target.payment_address() { Some(pa) if pa.is_pinned() => Some(pa.hash()), Some(_) => None, @@ -2881,19 +3305,27 @@ pub async fn gen_ibc_shielded_transfer( .await?; let ibc_denom = rpc::query_ibc_denom(context, &args.token, Some(&source)).await; - let prefixed_denom = ibc_denom - .parse() - .map_err(|_| Error::Other(format!("Invalid IBC denom: {ibc_denom}")))?; - let token = namada_ibc::received_ibc_token( - &prefixed_denom, - &src_port_id, - &src_channel_id, - &args.port_id, - &args.channel_id, - ) - .map_err(|e| { - Error::Other(format!("Getting IBC Token failed: error {e}")) - })?; + let token = if args.refund { + if ibc_denom.contains('/') { + ibc_token(ibc_denom) + } else { + // the token is a base token + Address::decode(&ibc_denom) + .map_err(|e| Error::Other(format!("Invalid token: {e}")))? + } + } else { + // Need to check the prefix + namada_ibc::received_ibc_token( + &ibc_denom, + &src_port_id, + &src_channel_id, + &args.port_id, + &args.channel_id, + ) + .map_err(|e| { + Error::Other(format!("Getting IBC Token failed: error {e}")) + })? + }; let validated_amount = validate_amount(context, args.amount, &token, false).await?; @@ -2919,20 +3351,17 @@ pub async fn gen_ibc_shielded_transfer( .map_err(|err| TxSubmitError::MaspError(err.to_string()))?; if let Some(shielded_transfer) = shielded_transfer { + let masp_tx_hash = + Section::MaspTx(shielded_transfer.masp_tx.clone()).get_hash(); let transfer = token::Transfer { source: source.clone(), target: MASP, token: token.clone(), amount: validated_amount, key, - shielded: Some( - Section::MaspTx(shielded_transfer.masp_tx.clone()).get_hash(), - ), + shielded: Some(masp_tx_hash), }; - Ok(Some(IbcShieldedTransfer { - transfer, - masp_tx: shielded_transfer.masp_tx, - })) + Ok(Some((transfer, shielded_transfer.masp_tx))) } else { Ok(None) } @@ -2989,11 +3418,9 @@ async fn expect_dry_broadcast( let result = rpc::dry_run_tx(context, tx.to_bytes()).await?; Ok(ProcessTxResponse::DryRun(result)) } - TxBroadcastData::Live { - tx, - wrapper_hash: _, - decrypted_hash: _, - } => Err(Error::from(TxSubmitError::ExpectDryRun(tx))), + TxBroadcastData::Live { tx, tx_hash: _ } => { + Err(Error::from(TxSubmitError::ExpectDryRun(tx))) + } } } @@ -3087,6 +3514,62 @@ async fn target_exists_or_err( .await } +/// Returns the given refund target address if the given address is valid for +/// the IBC shielded transfer. Returns an error if the address is transparent +/// or the address is given for non-shielded transfer. +async fn get_refund_target( + context: &impl Namada, + source: &TransferSource, + refund_target: &Option, +) -> Result> { + match (source, refund_target) { + ( + TransferSource::ExtendedSpendingKey(_), + Some(TransferTarget::PaymentAddress(pa)), + ) => Ok(Some(*pa)), + ( + TransferSource::ExtendedSpendingKey(_), + Some(TransferTarget::Address(addr)), + ) => Err(Error::Other(format!( + "Transparent address can't be specified as a refund target: {}", + addr, + ))), + (TransferSource::ExtendedSpendingKey(spending_key), None) => { + // Generate a new payment address + let viewing_key = + ExtendedFullViewingKey::from(&(*spending_key).into()).fvk.vk; + let mut rng = OsRng; + let (div, _g_d) = crate::masp::find_valid_diversifier(&mut rng); + let payment_addr: PaymentAddress = viewing_key + .to_payment_address(div) + .ok_or_else(|| { + Error::Other( + "Converting to a payment address failed".to_string(), + ) + })? + .into(); + let alias = format!("ibc-refund-target-{}", rng.next_u64()); + let mut wallet = context.wallet_mut().await; + wallet + .insert_payment_addr(alias, payment_addr, false) + .ok_or_else(|| { + Error::Other( + "Adding a new payment address failed".to_string(), + ) + })?; + wallet.save().map_err(|e| { + Error::Other(format!("Saving wallet error: {e}")) + })?; + Ok(Some(payment_addr)) + } + (_, Some(_)) => Err(Error::Other( + "Refund target can't be specified for non-shielded transfer" + .to_string(), + )), + (_, None) => Ok(None), + } +} + enum CheckBalance { Balance(token::Amount), Query(storage::Key), diff --git a/crates/sdk/src/wallet/mod.rs b/crates/sdk/src/wallet/mod.rs index 4d90279c42..b18025b178 100644 --- a/crates/sdk/src/wallet/mod.rs +++ b/crates/sdk/src/wallet/mod.rs @@ -5,7 +5,7 @@ mod keys; pub mod pre_genesis; pub mod store; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::fmt::Display; use std::str::FromStr; @@ -13,6 +13,8 @@ use alias::Alias; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; +use namada_core::ibc::is_ibc_denom; use namada_core::key::*; use namada_core::masp::{ ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, @@ -379,6 +381,34 @@ impl Wallet { } } + /// Try to find an alias of the base token in the given IBC denomination + /// from the wallet. If not found, formats the IBC denomination into a + /// string. + pub fn lookup_ibc_token_alias(&self, ibc_denom: impl AsRef) -> String { + // Convert only an IBC denom or a Namada address since an NFT trace + // doesn't have the alias + is_ibc_denom(&ibc_denom) + .map(|(trace_path, base_token)| { + let base_token_alias = match Address::decode(&base_token) { + Ok(base_token) => self.lookup_alias(&base_token), + Err(_) => base_token, + }; + if trace_path.is_empty() { + base_token_alias + } else { + format!("{}/{}", trace_path, base_token_alias) + } + }) + .or_else(|| { + // It's not an IBC denom, but could be a raw Namada address + match Address::decode(&ibc_denom) { + Ok(addr) => Some(self.lookup_alias(&addr)), + Err(_) => None, + } + }) + .unwrap_or(ibc_denom.as_ref().to_string()) + } + /// Find the viewing key with the given alias in the wallet and return it pub fn find_viewing_key( &self, @@ -769,7 +799,7 @@ impl Wallet { // Try cache first if let Some(cached_key) = self .decrypted_key_cache - .get(&alias_pkh_or_pk.as_ref().into()) + .get(&Alias::from(alias_pkh_or_pk.as_ref())) { return Ok(cached_key.clone()); } @@ -810,8 +840,9 @@ impl Wallet { password: Option>, ) -> Result { // Try cache first - if let Some(cached_key) = - self.decrypted_spendkey_cache.get(&alias.as_ref().into()) + if let Some(cached_key) = self + .decrypted_spendkey_cache + .get(&Alias::from(alias.as_ref())) { return Ok(*cached_key); } diff --git a/crates/sdk/src/wallet/store.rs b/crates/sdk/src/wallet/store.rs index 068b81a9fd..82b32a887d 100644 --- a/crates/sdk/src/wallet/store.rs +++ b/crates/sdk/src/wallet/store.rs @@ -1,6 +1,6 @@ //! Wallet Store information -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; use std::fmt::Display; use std::str::FromStr; @@ -8,6 +8,7 @@ use bimap::BiBTreeMap; use itertools::Itertools; use masp_primitives::zip32; use namada_core::address::{Address, ImplicitAddress}; +use namada_core::collections::HashSet; use namada_core::key::*; use namada_core::masp::{ ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, diff --git a/crates/shielded_token/Cargo.toml b/crates/shielded_token/Cargo.toml index 6b73901484..8f4ef5a1e2 100644 --- a/crates/shielded_token/Cargo.toml +++ b/crates/shielded_token/Cargo.toml @@ -18,6 +18,7 @@ multicore = ["dep:rayon"] testing = ["multicore", "namada_core/testing"] [dependencies] +namada_controller = { path = "../controller" } namada_core = { path = "../core" } namada_parameters = { path = "../parameters" } namada_storage = { path = "../storage" } @@ -31,6 +32,7 @@ tracing.workspace = true [dev-dependencies] namada_core = { path = "../core", features = ["testing"] } +namada_parameters = { path = "../parameters", features = ["testing"] } namada_storage = { path = "../storage", features = ["testing"] } proptest.workspace = true diff --git a/crates/shielded_token/src/conversion.rs b/crates/shielded_token/src/conversion.rs index b0fa016a53..5dbb210110 100644 --- a/crates/shielded_token/src/conversion.rs +++ b/crates/shielded_token/src/conversion.rs @@ -1,5 +1,6 @@ //! MASP rewards conversions +use namada_controller::PDController; use namada_core::address::{Address, MASP}; #[cfg(any(feature = "multicore", test))] use namada_core::borsh::BorshSerializeExt; @@ -9,11 +10,10 @@ use namada_core::hash::Hash; use namada_core::uint::Uint; use namada_parameters as parameters; use namada_storage::{StorageRead, StorageWrite}; -use namada_trans_token::inflation::{ - ShieldedRewardsController, ShieldedValsToUpdate, +use namada_trans_token::{ + get_effective_total_native_supply, read_balance, read_denom, Amount, + DenominatedAmount, Denomination, }; -use namada_trans_token::storage_key::{balance_key, minted_balance_key}; -use namada_trans_token::{read_denom, Amount, DenominatedAmount, Denomination}; #[cfg(any(feature = "multicore", test))] use crate::storage_key::{masp_assets_hash_key, masp_token_map_key}; @@ -24,6 +24,36 @@ use crate::storage_key::{ }; use crate::WithConversionState; +/// Compute shielded token inflation amount +#[allow(clippy::too_many_arguments)] +pub fn compute_inflation( + locked_amount: Uint, + total_native_amount: Uint, + max_reward_rate: Dec, + last_inflation_amount: Uint, + p_gain_nom: Dec, + d_gain_nom: Dec, + epochs_per_year: u64, + target_amount: Dec, + last_amount: Dec, +) -> Uint { + let controller = PDController::new( + total_native_amount, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + target_amount, + last_amount, + ); + + let metric = Dec::try_from(locked_amount) + .expect("Should not fail to convert Uint to Dec"); + let control_coeff = max_reward_rate / controller.get_epochs_per_year(); + controller.compute_inflation(control_coeff, metric) +} + /// Compute the precision of MASP rewards for the given token. This function /// must be a non-zero constant for a given token. pub fn calculate_masp_rewards_precision( @@ -64,16 +94,11 @@ where // Query the storage for information ------------------------- - let native_token = storage.get_native_token()?; //// information about the amount of native tokens on the chain - let total_native_tokens: Amount = storage - .read(&minted_balance_key(&native_token))? - .expect("the total supply key should be here"); + let total_native_tokens = get_effective_total_native_supply(storage)?; // total locked amount in the Shielded pool - let total_tokens_in_masp: Amount = storage - .read(&balance_key(token, &masp_addr))? - .unwrap_or_default(); + let total_tokens_in_masp = read_balance(storage, token, &masp_addr)?; let epochs_per_year: u64 = storage .read(¶meters::storage::get_epochs_per_year_key())? @@ -105,21 +130,23 @@ where .read(&masp_locked_amount_target_key(token))? .expect("locked ratio target should properly decode"); - // Creating the PD controller for handing out tokens - let controller = ShieldedRewardsController { - locked_tokens: total_tokens_in_masp.raw_amount(), - total_native_tokens: total_native_tokens.raw_amount(), - locked_tokens_target: target_locked_amount.raw_amount(), - locked_tokens_last: last_locked_amount.raw_amount(), + let target_locked_dec = Dec::try_from(target_locked_amount.raw_amount()) + .expect("Should not fail to convert Uint to Dec"); + let last_locked_dec = Dec::try_from(last_locked_amount.raw_amount()) + .expect("Should not fail to convert Uint to Dec"); + + // Initial computation of the new shielded inflation + let inflation = compute_inflation( + total_tokens_in_masp.raw_amount(), + total_native_tokens.raw_amount(), max_reward_rate, - last_inflation_amount: last_inflation.raw_amount(), - p_gain_nom: kp_gain_nom, - d_gain_nom: kd_gain_nom, + last_inflation.raw_amount(), + kp_gain_nom, + kd_gain_nom, epochs_per_year, - }; - - let ShieldedValsToUpdate { inflation } = - ShieldedRewardsController::run(controller); + target_locked_dec, + last_locked_dec, + ); // inflation-per-token = inflation / locked tokens = n/PRECISION // ∴ n = (inflation * PRECISION) / locked tokens @@ -217,6 +244,7 @@ where use namada_core::masp::encode_asset_type; use namada_core::storage::Epoch; use namada_storage::ResultExt; + use namada_trans_token::storage_key::balance_key; use namada_trans_token::{MaspDigitPos, NATIVE_MAX_DECIMAL_PLACES}; use rayon::iter::{ IndexedParallelIterator, IntoParallelIterator, ParallelIterator, @@ -299,9 +327,8 @@ where let (reward, denom) = calculate_masp_rewards(storage, token)?; masp_reward_denoms.insert(token.clone(), denom); // Dispense a transparent reward in parallel to the shielded rewards - let addr_bal: Amount = storage - .read(&balance_key(token, &masp_addr))? - .unwrap_or_default(); + let addr_bal = read_balance(storage, token, &masp_addr)?; + // Get the last rewarded amount of the native token let normed_inflation = *storage .conversion_state_mut() @@ -531,15 +558,14 @@ where #[cfg(test)] mod tests { - use std::collections::HashMap; use std::str::FromStr; use namada_core::address; + use namada_core::collections::HashMap; use namada_core::dec::testing::arb_non_negative_dec; - use namada_core::time::DurationSecs; use namada_core::token::testing::arb_amount; - use namada_parameters::{EpochDuration, Parameters}; use namada_storage::testing::TestStorage; + use namada_trans_token::storage_key::{balance_key, minted_balance_key}; use namada_trans_token::write_denom; use proptest::prelude::*; use proptest::test_runner::Config; @@ -569,31 +595,10 @@ mod tests { const ROUNDS: usize = 10; let mut s = TestStorage::default(); - let params = Parameters { - max_tx_bytes: 1024 * 1024, - epoch_duration: EpochDuration { - min_num_of_blocks: 1, - min_duration: DurationSecs(3600), - }, - max_expected_time_per_block: DurationSecs(3600), - max_proposal_bytes: Default::default(), - max_block_gas: 100, - vp_allowlist: vec![], - tx_allowlist: vec![], - implicit_vp_code_hash: Default::default(), - epochs_per_year: 365, - max_signatures_per_transaction: 10, - staked_ratio: Default::default(), - pos_inflation_amount: Default::default(), - fee_unshielding_gas_limit: 0, - fee_unshielding_descriptions_limit: 0, - minimum_gas_price: Default::default(), - }; - // Initialize the state { // Parameters - namada_parameters::init_storage(¶ms, &mut s).unwrap(); + namada_parameters::init_test_storage(&mut s).unwrap(); // Tokens let token_params = ShieldedParams { @@ -652,4 +657,59 @@ mod tests { .into_iter() .collect() } + + #[test] + fn test_masp_inflation_playground() { + let denom = Uint::from(1_000_000); // token denomination (usually 6) + let total_tokens = 10_000_000_000_u64; // 10B naan + let mut total_tokens = Uint::from(total_tokens) * denom; + let locked_tokens_target = Uint::from(500_000) * denom; // Dependent on the token type + let init_locked_ratio = Dec::from_str("0.1").unwrap(); // Arbitrary amount to play around with + let init_locked_tokens = (init_locked_ratio + * Dec::try_from(locked_tokens_target).unwrap()) + .to_uint() + .unwrap(); + let epochs_per_year = 730_u64; // SE configuration + let max_reward_rate = Dec::from_str("0.01").unwrap(); // Pre-determined based on token type + let mut last_inflation_amount = Uint::zero(); + let p_gain_nom = Dec::from_str("25000").unwrap(); // To be configured + let d_gain_nom = Dec::from_str("25000").unwrap(); // To be configured + + let mut locked_amount = init_locked_tokens; + let mut locked_tokens_last = init_locked_tokens; + + let num_rounds = 10; + println!(); + + for round in 0..num_rounds { + let inflation = compute_inflation( + locked_amount, + total_tokens, + max_reward_rate, + last_inflation_amount, + p_gain_nom, + d_gain_nom, + epochs_per_year, + Dec::try_from(locked_tokens_target).unwrap(), + Dec::try_from(locked_tokens_last).unwrap(), + ); + + let rate = Dec::try_from(inflation).unwrap() + * Dec::from(epochs_per_year) + / Dec::try_from(total_tokens).unwrap(); + + println!( + "Round {round}: Locked amount: {locked_amount}, inflation \ + rate: {rate} -- (raw infl: {inflation})", + ); + // dbg!(&controller); + + last_inflation_amount = inflation; + total_tokens += inflation; + locked_tokens_last = locked_amount; + + let change_staked_tokens = Uint::from(2) * locked_tokens_target; + locked_amount += change_staked_tokens; + } + } } diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 42fc6413dd..b4253e76e2 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -76,6 +76,7 @@ pub fn handle_masp_tx( IndexedTx { height: ctx.get_block_height()?, index: ctx.get_tx_index()?, + is_wrapper: false, }, )?; } diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index c0e4e7e55d..174407218f 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -21,6 +21,7 @@ migrations = [ "namada_migrations", "linkme", ] +benches = [] [dependencies] namada_core = { path = "../core", default-features = false } diff --git a/crates/state/src/host_env.rs b/crates/state/src/host_env.rs index 2b2a1ef818..5a6804b5ba 100644 --- a/crates/state/src/host_env.rs +++ b/crates/state/src/host_env.rs @@ -1,6 +1,5 @@ use std::cell::RefCell; -use namada_core::validity_predicate::VpSentinel; use namada_gas::{GasMetering, TxGasMeter, VpGasMeter}; use namada_tx::data::TxSentinel; @@ -42,8 +41,6 @@ where pub in_mem: &'a InMemory, /// VP gas meter pub gas_meter: &'a RefCell, - /// Errors sentinel - pub sentinel: &'a RefCell, } impl StateRead for TxHostEnvState<'_, D, H> @@ -115,13 +112,6 @@ where } fn charge_gas(&self, gas: u64) -> Result<()> { - self.gas_meter.borrow_mut().consume(gas).map_err(|err| { - self.sentinel.borrow_mut().set_out_of_gas(); - tracing::info!( - "Stopping VP execution because of gas error: {}", - err - ); - Error::Gas(err) - }) + self.gas_meter.borrow_mut().consume(gas).map_err(Error::Gas) } } diff --git a/crates/state/src/in_memory.rs b/crates/state/src/in_memory.rs index ad814bcd12..7bff934f4c 100644 --- a/crates/state/src/in_memory.rs +++ b/crates/state/src/in_memory.rs @@ -1,6 +1,7 @@ use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::chain::{ChainId, CHAIN_ID_LENGTH}; +use namada_core::hash::Hash; use namada_core::time::DateTimeUtc; use namada_core::{encode, ethereum_structs}; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; @@ -10,7 +11,8 @@ use namada_merkle_tree::{MerkleRoot, MerkleTree}; use namada_migrations::*; use namada_parameters::{EpochDuration, Parameters}; use namada_storage::conversion_state::ConversionState; -use namada_storage::tx_queue::{ExpiredTxsQueue, TxQueue}; +use namada_storage::tx_queue::ExpiredTxsQueue; +use namada_storage::types::CommitOnlyData; use namada_storage::{ BlockHash, BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, Header, Key, KeySeg, StorageHasher, TxIndex, BLOCK_HASH_LENGTH, @@ -57,8 +59,6 @@ where pub tx_index: TxIndex, /// The currently saved conversion state pub conversion_state: ConversionState, - /// Wrapper txs to be decrypted in the next block proposal - pub tx_queue: TxQueue, /// Queue of expired transactions that need to be retransmitted. /// /// These transactions do not need to be persisted, as they are @@ -72,6 +72,8 @@ where pub eth_events_queue: EthEventsQueue, /// How many block heights in the past can the storage be queried pub storage_read_past_height_limit: Option, + /// Data that needs to be committed to the merkle tree + pub commit_only_data: CommitOnlyData, } /// Last committed block @@ -135,6 +137,7 @@ where last_block: None, last_epoch: Epoch::default(), next_epoch_min_start_height: BlockHeight::default(), + #[allow(clippy::disallowed_methods)] next_epoch_min_start_time: DateTimeUtc::now(), address_gen: EstablishedAddressGen::new( "Privacy is a function of liberty.", @@ -142,12 +145,12 @@ where update_epoch_blocks_delay: None, tx_index: TxIndex::default(), conversion_state: ConversionState::default(), - tx_queue: TxQueue::default(), expired_txs_queue: ExpiredTxsQueue::default(), native_token, ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit, + commit_only_data: CommitOnlyData::default(), } } @@ -186,6 +189,10 @@ where Ok(()) } + pub fn add_tx_gas(&mut self, tx_hash: Hash, gas: u64) { + self.commit_only_data.tx_gas.insert(tx_hash, gas); + } + /// Get the chain ID as a raw string pub fn get_chain_id(&self) -> (String, u64) { ( diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index c7f112c500..79e34b972a 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -20,10 +20,7 @@ pub use namada_core::storage::{ EPOCH_TYPE_LENGTH, }; use namada_core::tendermint::merkle::proof::ProofOps; -use namada_gas::{ - MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_ACCESS_GAS_PER_BYTE, - STORAGE_WRITE_GAS_PER_BYTE, -}; +use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_ACCESS_GAS_PER_BYTE}; use namada_merkle_tree::Error as MerkleTreeError; pub use namada_merkle_tree::{ self as merkle_tree, ics23_specs, MembershipProof, MerkleTree, @@ -197,21 +194,18 @@ macro_rules! impl_storage_read { key: &storage::Key, ) -> namada_storage::Result>> { // try to read from the write log first - let (log_val, gas) = self.write_log().read(key); + let (log_val, gas) = self.write_log().read_persistent(key); self.charge_gas(gas).into_storage_result()?; match log_val { - Some(write_log::StorageModification::Write { ref value }) => { + Some(write_log::PersistentStorageModification::Write { value }) => { Ok(Some(value.clone())) } - Some(write_log::StorageModification::Delete) => Ok(None), - Some(write_log::StorageModification::InitAccount { + Some(write_log::PersistentStorageModification::Delete) => Ok(None), + Some(write_log::PersistentStorageModification::InitAccount { ref vp_code_hash, }) => Ok(Some(vp_code_hash.to_vec())), - Some(write_log::StorageModification::Temp { ref value }) => { - Ok(Some(value.clone())) - } None => { - // when not found in write log, try to read from the storage + // when not found in write log try to read from the storage let (value, gas) = self.db_read(key).into_storage_result()?; self.charge_gas(gas).into_storage_result()?; Ok(value) @@ -225,14 +219,13 @@ macro_rules! impl_storage_read { self.charge_gas(gas).into_storage_result()?; match log_val { Some(&write_log::StorageModification::Write { .. }) - | Some(&write_log::StorageModification::InitAccount { .. }) - | Some(&write_log::StorageModification::Temp { .. }) => Ok(true), + | Some(&write_log::StorageModification::InitAccount { .. }) => Ok(true), Some(&write_log::StorageModification::Delete) => { // the given key has been deleted Ok(false) } - None => { - // when not found in write log, try to check the storage + Some(&write_log::StorageModification::Temp { .. }) | None => { + // when not found in write log or only found a temporary value, try to check the storage let (present, gas) = self.db_has_key(key).into_storage_result()?; self.charge_gas(gas).into_storage_result()?; Ok(present) @@ -551,8 +544,7 @@ where self.write_log_iter.next() { match modification { - write_log::StorageModification::Write { value } - | write_log::StorageModification::Temp { value } => { + write_log::StorageModification::Write { value } => { let gas = value.len() as u64; return Some((key, value, gas)); } @@ -562,7 +554,8 @@ where let gas = vp_code_hash.len() as u64; return Some((key, vp_code_hash.to_vec(), gas)); } - write_log::StorageModification::Delete => { + write_log::StorageModification::Delete + | write_log::StorageModification::Temp { .. } => { continue; } } @@ -581,11 +574,13 @@ where /// Helpers for testing components that depend on storage #[cfg(any(test, feature = "testing"))] pub mod testing { + use namada_core::address; use namada_core::address::EstablishedAddressGen; use namada_core::chain::ChainId; use namada_core::time::DateTimeUtc; - use namada_storage::tx_queue::{ExpiredTxsQueue, TxQueue}; + use namada_storage::tx_queue::ExpiredTxsQueue; + use storage::types::CommitOnlyData; use super::mockdb::MockDB; use super::*; @@ -625,6 +620,7 @@ pub mod testing { last_block: None, last_epoch: Epoch::default(), next_epoch_min_start_height: BlockHeight::default(), + #[allow(clippy::disallowed_methods)] next_epoch_min_start_time: DateTimeUtc::now(), address_gen: EstablishedAddressGen::new( "Test address generator seed", @@ -632,12 +628,12 @@ pub mod testing { update_epoch_blocks_delay: None, tx_index: TxIndex::default(), conversion_state: ConversionState::default(), - tx_queue: TxQueue::default(), expired_txs_queue: ExpiredTxsQueue::default(), native_token: address::testing::nam(), ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit: Some(1000), + commit_only_data: CommitOnlyData::default(), } } } @@ -650,10 +646,8 @@ mod tests { use chrono::{TimeZone, Utc}; use namada_core::address::InternalAddress; use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; - use namada_core::dec::Dec; use namada_core::storage::DbKeySeg; use namada_core::time::{self, DateTimeUtc, Duration}; - use namada_core::token; use namada_parameters::{EpochDuration, Parameters}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -737,11 +731,10 @@ mod tests { implicit_vp_code_hash: Some(Hash::zero()), epochs_per_year: 100, max_signatures_per_transaction: 15, - staked_ratio: Dec::new(1,1).expect("Cannot fail"), - pos_inflation_amount: token::Amount::zero(), fee_unshielding_gas_limit: 20_000, fee_unshielding_descriptions_limit: 15, minimum_gas_price: BTreeMap::default(), + is_native_token_transferable: true, }; namada_parameters::init_storage(¶meters, &mut state).unwrap(); // Initialize pred_epochs to the current height diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index ec20360bfb..4934647624 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -13,14 +13,13 @@ use namada_storage::{BlockHeight, BlockStateRead, BlockStateWrite, ResultExt}; use crate::in_memory::InMemory; use crate::write_log::{ - self, ReProtStorageModification, StorageModification, WriteLog, + ReProtStorageModification, StorageModification, WriteLog, }; use crate::{ is_pending_transfer_key, DBIter, Epoch, Error, Hash, Key, LastBlock, MembershipProof, MerkleTree, MerkleTreeError, ProofOps, Result, State, StateRead, StorageHasher, StorageResult, StoreType, DB, EPOCH_SWITCH_BLOCKS_DELAY, STORAGE_ACCESS_GAS_PER_BYTE, - STORAGE_WRITE_GAS_PER_BYTE, }; /// Owned state with full R/W access. @@ -490,9 +489,9 @@ where results, address_gen, conversion_state, - tx_queue, ethereum_height, eth_events_queue, + commit_only_data, }) = self .0 .db @@ -513,6 +512,7 @@ where in_mem.next_epoch_min_start_time = next_epoch_min_start_time; in_mem.update_epoch_blocks_delay = update_epoch_blocks_delay; in_mem.address_gen = address_gen; + in_mem.commit_only_data = commit_only_data; } // Rebuild Merkle tree - requires the values above to be set first @@ -523,7 +523,6 @@ where let in_mem = &mut self.0.in_mem; in_mem.block.tree = tree; in_mem.conversion_state = conversion_state; - in_mem.tx_queue = tx_queue; in_mem.ethereum_height = ethereum_height; in_mem.eth_events_queue = eth_events_queue; tracing::debug!("Loaded storage from DB"); @@ -532,6 +531,15 @@ where } } + pub fn commit_only_data(&mut self) -> Result<()> { + let data = self.in_mem().commit_only_data.serialize(); + self.in_mem_mut() + .block + .tree + .update_commit_data(data) + .map_err(Error::MerkleTreeError) + } + /// Persist the block's state from batch writes to the database. /// Note that unlike `commit_block` this method doesn't commit the write /// log. @@ -545,17 +553,20 @@ where // For convenience in tests, fill-in a header if it's missing. // Normally, the header is added in `FinalizeBlock`. - #[cfg(any(test, feature = "testing"))] + #[cfg(any(test, feature = "testing", feature = "benches"))] { if self.in_mem.header.is_none() { self.in_mem.header = Some(storage::Header { hash: Hash::default(), + #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), next_validators_hash: Hash::default(), }); } } + self.commit_only_data()?; + let state = BlockStateWrite { merkle_tree_stores: self.in_mem.block.tree.stores(), header: self.in_mem.header.as_ref(), @@ -577,9 +588,9 @@ where update_epoch_blocks_delay: self.in_mem.update_epoch_blocks_delay, address_gen: &self.in_mem.address_gen, conversion_state: &self.in_mem.conversion_state, - tx_queue: &self.in_mem.tx_queue, ethereum_height: self.in_mem.ethereum_height.as_ref(), eth_events_queue: &self.in_mem.eth_events_queue, + commit_only_data: &self.in_mem.commit_only_data, }; self.db .add_block_to_batch(state, &mut batch, is_full_commit)?; @@ -598,6 +609,10 @@ where // prune old merkle tree stores self.prune_merkle_tree_stores(&mut batch)?; } + // If there's a previous block, prune non-persisted diffs from it + if let Some(height) = self.in_mem.block.height.checked_prev() { + self.db.prune_non_persisted_diffs(&mut batch, height)?; + } self.db.exec_batch(batch)?; Ok(()) } @@ -651,8 +666,8 @@ where } /// Delete the provided transaction's hash from storage. - pub fn delete_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.delete_tx_hash(hash) + pub fn delete_tx_hash(&mut self, hash: Hash) { + self.write_log.delete_tx_hash(hash); } #[inline] @@ -713,6 +728,7 @@ where /// Write a value to the specified subspace and returns the gas cost and the /// size difference + #[cfg(any(test, feature = "testing", feature = "benches"))] pub fn db_write( &mut self, key: &Key, @@ -737,7 +753,8 @@ where } let len = value.len(); - let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; + let gas = + (key.len() + len) as u64 * namada_gas::STORAGE_WRITE_GAS_PER_BYTE; let size_diff = self.db.write_subspace_val( self.in_mem.block.height, key, @@ -749,6 +766,7 @@ where /// Delete the specified subspace and returns the gas cost and the size /// difference + #[cfg(any(test, feature = "testing", feature = "benches"))] pub fn db_delete(&mut self, key: &Key) -> Result<(u64, i64)> { // Note that this method is the same as `StorageWrite::delete`, // but with gas and storage bytes len diff accounting @@ -765,7 +783,7 @@ where )?; } let gas = (key.len() + deleted_bytes_len as usize) as u64 - * STORAGE_WRITE_GAS_PER_BYTE; + * namada_gas::STORAGE_WRITE_GAS_PER_BYTE; Ok((gas, deleted_bytes_len)) } @@ -873,19 +891,25 @@ where .pred_epochs .get_epoch(height) .unwrap_or_default(); - let epoch_start_height = match self - .in_mem - .block - .pred_epochs - .get_start_height_of_epoch(epoch) - { - Some(BlockHeight(0)) => BlockHeight(1), - Some(height) => height, - None => BlockHeight(1), + let start_height = if store_type == Some(StoreType::CommitData) { + // CommitData is stored every height + height + } else { + // others are stored at the first height of each epoch + match self + .in_mem + .block + .pred_epochs + .get_start_height_of_epoch(epoch) + { + Some(BlockHeight(0)) => BlockHeight(1), + Some(height) => height, + None => BlockHeight(1), + } }; let stores = self .db - .read_merkle_tree_stores(epoch, epoch_start_height, store_type)? + .read_merkle_tree_stores(epoch, start_height, store_type)? .ok_or(Error::NoMerkleTree { height })?; let prefix = store_type.and_then(|st| st.provable_prefix()); let mut tree = match store_type { @@ -893,7 +917,7 @@ where None => MerkleTree::::new(stores).expect("invalid stores"), }; // Restore the tree state with diffs - let mut target_height = epoch_start_height; + let mut target_height = start_height; while target_height < height { target_height = target_height.next_height(); let mut old_diff_iter = @@ -984,17 +1008,40 @@ where } } } - if let Some(st) = store_type { - // Add the base tree with the given height - let mut stores = self - .db - .read_merkle_tree_stores(epoch, height, Some(StoreType::Base))? - .ok_or(Error::NoMerkleTree { height })?; - let restored_stores = tree.stores(); - // Set the root and store of the rebuilt subtree - stores.set_root(&st, *restored_stores.root(&st)); - stores.set_store(restored_stores.store(&st).to_owned()); - tree = MerkleTree::::new_partial(stores); + + // Restore the base tree and CommitData tree + match store_type { + Some(st) => { + // It is enough to get the base tree + let mut stores = self + .db + .read_merkle_tree_stores( + epoch, + height, + Some(StoreType::Base), + )? + .ok_or(Error::NoMerkleTree { height })?; + let restored_stores = tree.stores(); + stores.set_root(&st, *restored_stores.root(&st)); + stores.set_store(restored_stores.store(&st).to_owned()); + tree = MerkleTree::::new_partial(stores); + } + None => { + // Get the base and CommitData trees + let mut stores = self + .db + .read_merkle_tree_stores(epoch, height, None)? + .ok_or(Error::NoMerkleTree { height })?; + let restored_stores = tree.stores(); + // Set all rebuilt subtrees except for CommitData tree + for st in StoreType::iter_subtrees() { + if *st != StoreType::CommitData { + stores.set_root(st, *restored_stores.root(st)); + stores.set_store(restored_stores.store(st).to_owned()); + } + } + tree = MerkleTree::::new(stores)?; + } } Ok(tree) } @@ -1004,10 +1051,11 @@ where pub fn get_last_block_timestamp(&self) -> Result { let last_block_height = self.in_mem.get_block_height().0; - Ok(self - .db - .read_block_header(last_block_height)? - .map_or_else(DateTimeUtc::now, |header| header.time)) + Ok(self.db.read_block_header(last_block_height)?.map_or_else( + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now, + |header| header.time, + )) } } @@ -1213,3 +1261,52 @@ where &mut self.0 } } + +#[cfg(any(test, feature = "testing"))] +impl namada_tx::action::Read for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type Err = Error; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result> { + let (log_val, _) = self.write_log().read(key); + match log_val { + Some(crate::write_log::StorageModification::Temp { value }) => { + let value = + namada_core::borsh::BorshDeserialize::try_from_slice(value) + .map_err(Error::BorshCodingError)?; + Ok(Some(value)) + } + None => Ok(None), + _ => Err(Error::UnknownKey { + key: key.to_string(), + }), + } + } +} + +#[cfg(any(test, feature = "testing"))] +impl namada_tx::action::Write for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_temp( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<()> { + let _ = self + .write_log_mut() + .write_temp(key, val.serialize_to_vec()) + .map_err(|err| Error::Temporary { + error: err.to_string(), + })?; + Ok(()) + } +} diff --git a/crates/state/src/wl_storage.rs b/crates/state/src/wl_storage.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index a3f25797eb..44f28c9619 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -1,18 +1,15 @@ //! Write log is temporary storage for modifications performed by a transaction. //! before they are committed to the ledger's storage. -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet}; use itertools::Itertools; -use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; +use namada_core::address::{Address, EstablishedAddressGen}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::hash::Hash; use namada_core::ibc::IbcEvent; use namada_core::storage; use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE}; -use namada_trans_token::storage_key::{ - is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, - is_any_token_parameter_key, -}; use thiserror::Error; #[allow(missing_docs)] @@ -31,8 +28,14 @@ pub enum Error { DeleteVp, #[error("Trying to write a temporary value after deleting")] WriteTempAfterDelete, + #[error("Trying to write a temporary value after writing")] + WriteTempAfterWrite, #[error("Replay protection key: {0}")] ReplayProtection(String), + #[error( + "Trying to cast a temporary write to a persistent storage modification" + )] + TempToPersistentModificationCast, } /// Result for functions that may fail @@ -63,6 +66,51 @@ pub enum StorageModification { }, } +/// A persistent storage modification. Associated data is present as a reference +/// to the corresponding [`StorageModification`] present in the write log +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum PersistentStorageModification<'wl> { + /// Write a new value + Write { + /// Value bytes + value: &'wl Vec, + }, + /// Delete an existing key-value + Delete, + /// Initialize a new account with established address and a given validity + /// predicate hash. The key for `InitAccount` inside the [`WriteLog`] must + /// point to its validity predicate. + InitAccount { + /// Validity predicate hash bytes + vp_code_hash: &'wl Hash, + }, +} + +impl<'wl> TryFrom<&'wl StorageModification> + for PersistentStorageModification<'wl> +{ + type Error = Error; + + fn try_from( + value: &'wl StorageModification, + ) -> std::prelude::v1::Result { + match value { + StorageModification::Write { value } => { + Ok(PersistentStorageModification::Write { value }) + } + StorageModification::Delete => { + Ok(PersistentStorageModification::Delete) + } + StorageModification::InitAccount { vp_code_hash } => { + Ok(PersistentStorageModification::InitAccount { vp_code_hash }) + } + StorageModification::Temp { value: _ } => { + Err(Error::TempToPersistentModificationCast) + } + } + } +} + #[derive(Debug, Clone, PartialEq, Eq)] /// A replay protection storage modification pub(crate) enum ReProtStorageModification { @@ -168,6 +216,41 @@ impl WriteLog { } } + /// Read a non-temporary value at the given key and return the value and the + /// gas cost, returns [`None`] if the key is not present in the write + /// log + pub fn read_persistent( + &self, + key: &storage::Key, + ) -> (Option, u64) { + for bucket in [ + &self.tx_write_log, + &self.tx_precommit_write_log, + &self.block_write_log, + ] { + if let Some(modification) = bucket.get(key) { + let gas = match modification { + StorageModification::Write { ref value } => { + key.len() + value.len() + } + StorageModification::Delete => key.len(), + StorageModification::InitAccount { ref vp_code_hash } => { + key.len() + vp_code_hash.len() + } + StorageModification::Temp { .. } => continue, + }; + return ( + Some(modification.try_into().expect( + "Temporary value should have been filtered out", + )), + gas as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ); + } + } + + (None, key.len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE) + } + /// Read a value before the latest tx execution at the given key and return /// the value and the gas cost, returns [`None`] if the key is not present /// in the write log @@ -209,7 +292,8 @@ impl WriteLog { let gas = key.len() + len; let size_diff = match self .tx_write_log - .insert(key.clone(), StorageModification::Write { value }) + .get(key) + .or_else(|| self.tx_precommit_write_log.get(key)) { Some(prev) => match prev { StorageModification::Write { ref value } => { @@ -217,6 +301,11 @@ impl WriteLog { } StorageModification::Delete => len as i64, StorageModification::InitAccount { .. } => { + // NOTE: errors from host functions force a shudown of the + // wasm environment without the need for cooperation from + // the wasm code (tx or vp), so there's no need to return + // gas in case of an error because execution will terminate + // anyway and this cannot be exploited to run the vm forever return Err(Error::UpdateVpOfNewAccount); } StorageModification::Temp { .. } => { @@ -227,6 +316,10 @@ impl WriteLog { // the previous value exists on the storage None => len as i64, }; + + self.tx_write_log + .insert(key.clone(), StorageModification::Write { value }); + Ok((gas as u64 * STORAGE_WRITE_GAS_PER_BYTE, size_diff)) } @@ -259,6 +352,8 @@ impl WriteLog { } /// Write a key and a value and return the gas cost and the size difference + /// Fails with [`Error::WriteTempAfterWrite`] when attempting to update a + /// temporary value after writing. /// Fails with [`Error::UpdateVpOfNewAccount`] when attempting to update a /// validity predicate of a new account that's not yet committed to storage. /// Fails with [`Error::WriteTempAfterDelete`] when attempting to update a @@ -272,11 +367,13 @@ impl WriteLog { let gas = key.len() + len; let size_diff = match self .tx_write_log - .insert(key.clone(), StorageModification::Temp { value }) + .get(key) + .or_else(|| self.tx_precommit_write_log.get(key)) { Some(prev) => match prev { - StorageModification::Write { ref value } => { - len as i64 - value.len() as i64 + StorageModification::Write { .. } => { + // Cannot overwrite a write request with a temporary one + return Err(Error::WriteTempAfterWrite); } StorageModification::Delete => { return Err(Error::WriteTempAfterDelete); @@ -292,6 +389,10 @@ impl WriteLog { // the previous value exists on the storage None => len as i64, }; + + self.tx_write_log + .insert(key.clone(), StorageModification::Temp { value }); + // Temp writes are not propagated to db so just charge the cost of // accessing storage Ok((gas as u64 * MEMORY_ACCESS_GAS_PER_BYTE, size_diff)) @@ -307,7 +408,8 @@ impl WriteLog { } let size_diff = match self .tx_write_log - .insert(key.clone(), StorageModification::Delete) + .get(key) + .or_else(|| self.tx_precommit_write_log.get(key)) { Some(prev) => match prev { StorageModification::Write { ref value } => value.len() as i64, @@ -321,6 +423,9 @@ impl WriteLog { // storage None => 0, }; + + self.tx_write_log + .insert(key.clone(), StorageModification::Delete); let gas = key.len() + size_diff as usize; Ok((gas as u64 * STORAGE_WRITE_GAS_PER_BYTE, -size_diff)) } @@ -353,13 +458,14 @@ impl WriteLog { &mut self, storage_address_gen: &EstablishedAddressGen, vp_code_hash: Hash, + entropy_source: &[u8], ) -> (Address, u64) { // If we've previously generated a new account, we use the local copy of // the generator. Otherwise, we create a new copy from the storage - let address_gen = - self.address_gen.get_or_insert(storage_address_gen.clone()); - let addr = - address_gen.generate_address("TODO more randomness".as_bytes()); + let address_gen = self + .address_gen + .get_or_insert_with(|| storage_address_gen.clone()); + let addr = address_gen.generate_address(entropy_source); let key = storage::Key::validity_predicate(&addr); let gas = (key.len() + vp_code_hash.len()) as u64 * STORAGE_WRITE_GAS_PER_BYTE; @@ -378,12 +484,21 @@ impl WriteLog { len as u64 * MEMORY_ACCESS_GAS_PER_BYTE } - /// Get the storage keys changed and accounts keys initialized in the - /// current transaction. The account keys point to the validity predicates - /// of the newly created accounts. The keys in the precommit are not - /// included in the result of this function. + /// Get the non-temporary storage keys changed and accounts keys initialized + /// in the current transaction. The account keys point to the validity + /// predicates of the newly created accounts. The keys in the precommit are + /// not included in the result of this function. pub fn get_keys(&self) -> BTreeSet { - self.tx_write_log.keys().cloned().collect() + self.tx_write_log + .iter() + .filter_map(|(key, modification)| match modification { + StorageModification::Write { .. } => Some(key.clone()), + StorageModification::Delete => Some(key.clone()), + StorageModification::InitAccount { .. } => Some(key.clone()), + // Skip temporary storage changes - they are never committed + StorageModification::Temp { .. } => None, + }) + .collect() } /// Get the storage keys changed and accounts keys initialized in the @@ -506,39 +621,15 @@ impl WriteLog { // get changed keys grouped by the address for key in changed_keys.iter() { - // for token keys, trigger Multitoken VP and the owner's VP - // - // TODO: this should not be a special case, as it is error prone. - // any internal addresses corresponding to tokens which have - // native vp equivalents should be automatically added as verifiers - if let Some([token, owner]) = is_any_token_balance_key(key) { - if matches!(&token, Address::Internal(InternalAddress::Nut(_))) + if let Some(addr) = key.fst_address() { + // We can skip insert when the address has been added from the + // Tx above. Also skip if it's an address of a newly initialized + // account, because anything can be written into an account's + // storage in the same tx in which it's initialized (there is no + // VP in the state prior to tx execution). + if !verifiers_from_tx.contains(addr) + && !initialized_accounts.contains(addr) { - verifiers.insert(token.clone()); - } - verifiers - .insert(Address::Internal(InternalAddress::Multitoken)); - verifiers.insert(owner.clone()); - } else if is_any_minted_balance_key(key).is_some() - || is_any_minter_key(key).is_some() - || is_any_token_parameter_key(key).is_some() - { - verifiers - .insert(Address::Internal(InternalAddress::Multitoken)); - } else { - for addr in key.iter_addresses() { - if verifiers_from_tx.contains(addr) - || initialized_accounts.contains(addr) - { - // We can skip this when the address has been added from - // the Tx above. - // Also skip if it's an address of a newly initialized - // account, because anything can be written into an - // account's storage in the same tx in which it's - // initialized (there is no VP in the state prior to tx - // execution). - continue; - } // Add the address as a verifier verifiers.insert(addr.clone()); } @@ -567,14 +658,15 @@ impl WriteLog { pub fn iter_prefix_post(&self, prefix: &storage::Key) -> PrefixIter { let mut matches = BTreeMap::new(); - for (key, modification) in &self.block_write_log { - if key.split_prefix(prefix).is_some() { - matches.insert(key.to_string(), modification.clone()); - } - } - for (key, modification) in &self.tx_write_log { - if key.split_prefix(prefix).is_some() { - matches.insert(key.to_string(), modification.clone()); + for bucket in [ + &self.block_write_log, + &self.tx_precommit_write_log, + &self.tx_write_log, + ] { + for (key, modification) in bucket { + if key.split_prefix(prefix).is_some() { + matches.insert(key.to_string(), modification.clone()); + } } } @@ -608,24 +700,9 @@ impl WriteLog { } /// Remove the transaction hash - pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { - match self - .replay_protection - .insert(hash, ReProtStorageModification::Delete) - { - None => Ok(()), - // Allow overwriting a previous finalize request - Some(ReProtStorageModification::Finalize) => Ok(()), - Some(_) => - // Cannot delete an hash that still has to be written to - // storage or has already been deleted - { - Err(Error::ReplayProtection(format!( - "Requested a delete on hash {hash} not yet committed to \ - storage" - ))) - } - } + pub(crate) fn delete_tx_hash(&mut self, hash: Hash) { + self.replay_protection + .insert(hash, ReProtStorageModification::Delete); } /// Move the transaction hash of the previous block to the list of all @@ -744,7 +821,7 @@ mod tests { // init let init_vp = "initialized".as_bytes().to_vec(); let vp_hash = Hash::sha256(init_vp); - let (addr, gas) = write_log.init_account(&address_gen, vp_hash); + let (addr, gas) = write_log.init_account(&address_gen, vp_hash, &[]); let vp_key = storage::Key::validity_predicate(&addr); assert_eq!( gas, @@ -777,7 +854,7 @@ mod tests { let init_vp = "initialized".as_bytes().to_vec(); let vp_hash = Hash::sha256(init_vp); - let (addr, _) = write_log.init_account(&address_gen, vp_hash); + let (addr, _) = write_log.init_account(&address_gen, vp_hash, &[]); let vp_key = storage::Key::validity_predicate(&addr); // update should fail @@ -796,7 +873,7 @@ mod tests { let init_vp = "initialized".as_bytes().to_vec(); let vp_hash = Hash::sha256(init_vp); - let (addr, _) = write_log.init_account(&address_gen, vp_hash); + let (addr, _) = write_log.init_account(&address_gen, vp_hash, &[]); let vp_key = storage::Key::validity_predicate(&addr); // delete should fail @@ -831,7 +908,7 @@ mod tests { // initialize an account let vp1 = Hash::sha256("vp1".as_bytes()); - let (addr1, _) = state.write_log.init_account(&address_gen, vp1); + let (addr1, _) = state.write_log.init_account(&address_gen, vp1, &[]); state.write_log.commit_tx(); // write values @@ -916,9 +993,7 @@ mod tests { .unwrap(); // delete previous hash - write_log - .delete_tx_hash(Hash::sha256("tx1".as_bytes())) - .unwrap(); + write_log.delete_tx_hash(Hash::sha256("tx1".as_bytes())); // finalize previous hashes for tx in ["tx2", "tx3"] { @@ -948,8 +1023,7 @@ mod tests { // try to delete finalized hash which shouldn't work state .write_log - .delete_tx_hash(Hash::sha256("tx2".as_bytes())) - .unwrap(); + .delete_tx_hash(Hash::sha256("tx2".as_bytes())); // commit a block state.commit_block().expect("commit failed"); @@ -962,6 +1036,78 @@ mod tests { ); } + // Test that writing a value on top of a temporary write is not allowed + #[test] + fn test_write_after_temp_disallowed() { + let mut state = crate::testing::TestState::default(); + + let key1 = + storage::Key::parse("key1").expect("cannot parse the key string"); + let val1 = "val1".as_bytes().to_vec(); + // Test from tx_write_log + state.write_log.write_temp(&key1, val1.clone()).unwrap(); + assert!(matches!( + state.write_log.write(&key1, val1.clone()), + Err(Error::UpdateTemporaryValue) + )); + + // Test with a temporary write precommitted + state.write_log.write_temp(&key1, val1.clone()).unwrap(); + state.write_log.precommit_tx(); + assert!(matches!( + state.write_log.write(&key1, val1), + Err(Error::UpdateTemporaryValue) + )); + } + + // Test that a temporary write on top of a write is not allowed + #[test] + fn test_write_temp_after_write_disallowed() { + let mut state = crate::testing::TestState::default(); + + let key1 = + storage::Key::parse("key1").expect("cannot parse the key string"); + let val1 = "val1".as_bytes().to_vec(); + // Test from tx_write_log + state.write_log.write(&key1, val1.clone()).unwrap(); + assert!(matches!( + state.write_log.write_temp(&key1, val1.clone()), + Err(Error::WriteTempAfterWrite) + )); + + // Test with a temporary write precommitted + state.write_log.write(&key1, val1.clone()).unwrap(); + state.write_log.precommit_tx(); + assert!(matches!( + state.write_log.write_temp(&key1, val1), + Err(Error::WriteTempAfterWrite) + )); + } + + // Test that a temporary write on top of a delete is not allowed + #[test] + fn test_write_temp_after_delete_disallowed() { + let mut state = crate::testing::TestState::default(); + + let key1 = + storage::Key::parse("key1").expect("cannot parse the key string"); + let val1 = "val1".as_bytes().to_vec(); + // Test from tx_write_log + state.write_log.delete(&key1).unwrap(); + assert!(matches!( + state.write_log.write_temp(&key1, val1.clone()), + Err(Error::WriteTempAfterDelete) + )); + + // Test with a temporary write precommitted + state.write_log.delete(&key1).unwrap(); + state.write_log.precommit_tx(); + assert!(matches!( + state.write_log.write_temp(&key1, val1), + Err(Error::WriteTempAfterDelete) + )); + } + prop_compose! { fn arb_verifiers_changed_key_tx_all_key() (verifiers_from_tx in testing::arb_verifiers_from_tx()) @@ -976,8 +1122,8 @@ mod tests { /// Test [`WriteLog::verifiers_changed_keys`] that: /// 1. Every address from `verifiers_from_tx` is included in the /// verifiers set. - /// 2. Every address included in the changed storage keys is included in - /// the verifiers set. + /// 2. Every address included in the first segment of changed storage + /// keys is included in the verifiers set. /// 3. Addresses of newly initialized accounts are not verifiers, so /// that anything can be written into an account's storage in the /// same tx in which it's initialized. @@ -999,12 +1145,12 @@ mod tests { let (_changed_keys, initialized_accounts) = write_log.get_partitioned_keys(); for key in changed_keys.iter() { - for addr_from_key in &key.find_addresses() { - if !initialized_accounts.contains(addr_from_key) { - // Test for 2. - assert!(verifiers.contains(addr_from_key)); - } + if let Some(addr_from_key) = key.fst_address() { + if !initialized_accounts.contains(addr_from_key) { + // Test for 2. + assert!(verifiers.contains(addr_from_key)); } + } } println!("verifiers {:#?}", verifiers); @@ -1050,6 +1196,7 @@ pub mod testing { arb_storage_modification(can_init_account), 0..100, ) + .prop_map(|map| map.into_iter().collect()) }) } diff --git a/crates/storage/src/collections/lazy_map.rs b/crates/storage/src/collections/lazy_map.rs index 7c7ea1c2b9..c1153910e0 100644 --- a/crates/storage/src/collections/lazy_map.rs +++ b/crates/storage/src/collections/lazy_map.rs @@ -18,14 +18,16 @@ pub const DATA_SUBKEY: &str = "data"; /// Lazy map. /// -/// This can be used as an alternative to `std::collections::HashMap` and -/// `BTreeMap`. In the lazy map, the elements do not reside in memory but are +/// This can be used as an alternative to [`HashMap`] and +/// [`BTreeMap`]. In the lazy map, the elements do not reside in memory but are /// instead read and written to storage sub-keys of the storage `key` used to /// construct the map. /// /// In the [`LazyMap`], the type of key `K` can be anything that implements /// [`storage::KeySeg`] and this trait is used to turn the keys into key /// segments. +/// +/// [`HashMap`]: `namada_core::collections::HashMap` #[derive(Debug)] pub struct LazyMap { key: storage::Key, diff --git a/crates/storage/src/collections/lazy_set.rs b/crates/storage/src/collections/lazy_set.rs index bee96d41a5..850fa1fcae 100644 --- a/crates/storage/src/collections/lazy_set.rs +++ b/crates/storage/src/collections/lazy_set.rs @@ -12,14 +12,17 @@ use crate::{ResultExt, StorageRead, StorageWrite}; /// A lazy set. /// -/// This can be used as an alternative to `std::collections::HashSet` and -/// `BTreeSet`. In the lazy set, the elements do not reside in memory but are +/// This can be used as an alternative to [`HashSet`] and +/// [`BTreeSet`]. In the lazy set, the elements do not reside in memory but are /// instead read and written to storage sub-keys of the storage `key` used to /// construct the set. /// /// In the [`LazySet`], the type of key `K` can be anything that implements /// [`storage::KeySeg`], and this trait is used to turn the keys into key /// segments. +/// +/// [`HashSet`]: `namada_core::collections::HashSet` +/// [`BTreeSet`]: `std::collections::BTreeSet` #[derive(Debug)] pub struct LazySet { key: storage::Key, diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index ac0d0a32a9..dde0a6dc34 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -16,7 +16,7 @@ use regex::Regex; use thiserror::Error; use crate::conversion_state::ConversionState; -use crate::tx_queue::TxQueue; +use crate::types::CommitOnlyData; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -70,13 +70,13 @@ pub struct BlockStateRead { pub results: BlockResults, /// The conversion state pub conversion_state: ConversionState, - /// Wrapper txs to be decrypted in the next block proposal - pub tx_queue: TxQueue, /// The latest block height on Ethereum processed, if /// the bridge is enabled. pub ethereum_height: Option, /// The queue of Ethereum events to be processed in order. pub eth_events_queue: EthEventsQueue, + /// Structure holding data that needs to be added to the merkle tree + pub commit_only_data: CommitOnlyData, } /// The block's state to write into the database. @@ -107,13 +107,13 @@ pub struct BlockStateWrite<'a> { pub results: &'a BlockResults, /// The conversion state pub conversion_state: &'a ConversionState, - /// Wrapper txs to be decrypted in the next block proposal - pub tx_queue: &'a TxQueue, /// The latest block height on Ethereum processed, if /// the bridge is enabled. pub ethereum_height: Option<&'a ethereum_structs::BlockHeight>, /// The queue of Ethereum events to be processed in order. pub eth_events_queue: &'a EthEventsQueue, + /// Structure holding data that needs to be added to the merkle tree + pub commit_only_data: &'a CommitOnlyData, } /// A database backend. @@ -268,6 +268,13 @@ pub trait DB: Debug { batch: &mut Self::WriteBatch, ) -> Result<()>; + /// Prune non-persisted diffs that are only kept for one block for rollback + fn prune_non_persisted_diffs( + &mut self, + batch: &mut Self::WriteBatch, + height: BlockHeight, + ) -> Result<()>; + /// Overwrite a new value in storage, taking into /// account values stored at a previous height fn overwrite_entry( diff --git a/crates/storage/src/error.rs b/crates/storage/src/error.rs index 77138b66fd..d0c81122ed 100644 --- a/crates/storage/src/error.rs +++ b/crates/storage/src/error.rs @@ -9,6 +9,8 @@ pub enum Error { #[error("{0}")] SimpleMessage(&'static str), #[error("{0}")] + AllocMessage(String), + #[error("{0}")] Custom(CustomError), #[error("{0}: {1}")] CustomWithMessage(&'static str, CustomError), @@ -56,6 +58,12 @@ impl Error { Self::SimpleMessage(msg) } + /// Create an [`enum@Error`] from a heap allocated message. + #[inline] + pub const fn new_alloc(msg: String) -> Self { + Self::AllocMessage(msg) + } + /// Wrap another [`std::error::Error`] with a static message. pub fn wrap(msg: &'static str, error: E) -> Self where diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 90cf1bccac..ca60eae55c 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -16,7 +16,8 @@ use namada_core::storage::{ use namada_core::time::DateTimeUtc; use namada_core::{decode, encode, ethereum_events, ethereum_structs}; use namada_merkle_tree::{ - base_tree_key_prefix, subtree_key_prefix, MerkleTreeStoresRead, StoreType, + tree_key_prefix_with_epoch, tree_key_prefix_with_height, + MerkleTreeStoresRead, StoreType, }; use namada_replay_protection as replay_protection; use regex::Regex; @@ -25,8 +26,7 @@ use crate::conversion_state::ConversionState; use crate::db::{ BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, Error, Result, DB, }; -use crate::tx_queue::TxQueue; -use crate::types::{KVBytes, PatternIterator, PrefixIterator}; +use crate::types::{CommitOnlyData, KVBytes, PatternIterator, PrefixIterator}; const SUBSPACE_CF: &str = "subspace"; @@ -94,15 +94,16 @@ impl DB for MockDB { Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; + let commit_only_data: CommitOnlyData = + match self.0.borrow().get("commit_only_data") { + Some(bytes) => decode(bytes).map_err(Error::CodingError)?, + None => return Ok(None), + }; let conversion_state: ConversionState = match self.0.borrow().get("conversion_state") { Some(bytes) => decode(bytes).map_err(Error::CodingError)?, None => return Ok(None), }; - let tx_queue: TxQueue = match self.0.borrow().get("tx_queue") { - Some(bytes) => decode(bytes).map_err(Error::CodingError)?, - None => return Ok(None), - }; let ethereum_height: Option = match self.0.borrow().get("ethereum_height") { @@ -178,7 +179,7 @@ impl DB for MockDB { // Restore subtrees of Merkle tree if let Some(epoch) = epoch { for st in StoreType::iter_subtrees() { - let prefix_key = subtree_key_prefix(st, epoch); + let prefix_key = tree_key_prefix_with_epoch(st, epoch); let root_key = prefix_key.clone().with_segment("root".to_owned()); if let Some(bytes) = self.0.borrow().get(&root_key.to_string()) @@ -215,9 +216,9 @@ impl DB for MockDB { address_gen, results, conversion_state, - tx_queue, ethereum_height, eth_events_queue, + commit_only_data, })), _ => Err(Error::Temporary { error: "Essential data couldn't be read from the DB" @@ -248,7 +249,7 @@ impl DB for MockDB { conversion_state, ethereum_height, eth_events_queue, - tx_queue, + commit_only_data, }: BlockStateWrite = state; // Epoch start height and time @@ -270,22 +271,27 @@ impl DB for MockDB { self.0 .borrow_mut() .insert("eth_events_queue".into(), encode(ð_events_queue)); - self.0 - .borrow_mut() - .insert("tx_queue".into(), encode(&tx_queue)); self.0 .borrow_mut() .insert("conversion_state".into(), encode(conversion_state)); + self.0.borrow_mut().insert( + "commit_only_data_commitment".into(), + commit_only_data.serialize(), + ); let prefix_key = Key::from(height.to_db_key()); // Merkle tree { for st in StoreType::iter() { - if *st == StoreType::Base || is_full_commit { - let key_prefix = if *st == StoreType::Base { - base_tree_key_prefix(height) - } else { - subtree_key_prefix(st, epoch) + if *st == StoreType::Base + || *st == StoreType::CommitData + || is_full_commit + { + let key_prefix = match st { + StoreType::Base | StoreType::CommitData => { + tree_key_prefix_with_height(st, height) + } + _ => tree_key_prefix_with_epoch(st, epoch), }; let root_key = key_prefix.clone().with_segment("root".to_owned()); @@ -388,10 +394,11 @@ impl DB for MockDB { .map(|st| Either::Left(std::iter::once(st))) .unwrap_or_else(|| Either::Right(StoreType::iter())); for st in store_types { - let key_prefix = if *st == StoreType::Base { - base_tree_key_prefix(base_height) - } else { - subtree_key_prefix(st, epoch) + let key_prefix = match st { + StoreType::Base | StoreType::CommitData => { + tree_key_prefix_with_height(st, base_height) + } + _ => tree_key_prefix_with_epoch(st, epoch), }; let root_key = key_prefix.clone().with_segment("root".to_owned()); let bytes = self.0.borrow().get(&root_key.to_string()).cloned(); @@ -526,7 +533,8 @@ impl DB for MockDB { let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); - // Diffs + // Diffs - Note that this is different from RocksDB that has a separate + // CF for non-persisted diffs (ROLLBACK_CF) let size_diff = match db.insert(subspace_key.to_string(), value.to_owned()) { Some(prev_value) => { @@ -585,6 +593,8 @@ impl DB for MockDB { let diff_prefix = Key::from(height.to_db_key()); let mut db = self.0.borrow_mut(); + // Diffs - Note that this is different from RocksDB that has a separate + // CF for non-persisted diffs (ROLLBACK_CF) let size_diff = match db.remove(&subspace_key.to_string()) { Some(value) => { let old_key = diff_prefix @@ -625,7 +635,7 @@ impl DB for MockDB { store_type: &StoreType, epoch: Epoch, ) -> Result<()> { - let prefix_key = subtree_key_prefix(store_type, epoch); + let prefix_key = tree_key_prefix_with_epoch(store_type, epoch); let root_key = prefix_key .push(&"root".to_owned()) .map_err(Error::KeyError)?; @@ -691,6 +701,16 @@ impl DB for MockDB { Ok(()) } + fn prune_non_persisted_diffs( + &mut self, + _batch: &mut Self::WriteBatch, + _height: BlockHeight, + ) -> Result<()> { + // No-op - Note that this is different from RocksDB that has a separate + // CF for non-persisted diffs (ROLLBACK_CF) + Ok(()) + } + fn overwrite_entry( &self, _batch: &mut Self::WriteBatch, diff --git a/crates/storage/src/tx_queue.rs b/crates/storage/src/tx_queue.rs index 7a02a31ebb..2b11cf4edb 100644 --- a/crates/storage/src/tx_queue.rs +++ b/crates/storage/src/tx_queue.rs @@ -1,57 +1,8 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::ethereum_events::EthereumEvent; -use namada_gas::Gas; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use namada_tx::Tx; - -/// A wrapper for `crate::types::transaction::WrapperTx` to conditionally -/// add `has_valid_pow` flag for only used in testnets. -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, BorshDeserializer)] -pub struct TxInQueue { - /// Wrapper tx - pub tx: Tx, - /// The available gas remaining for the inner tx (for gas accounting). - /// This allows for a more detailed logging about the gas used by the - /// wrapper and that used by the inner - pub gas: Gas, -} - -#[derive( - Default, Debug, Clone, BorshDeserialize, BorshSerialize, BorshDeserializer, -)] -/// Wrapper txs to be decrypted in the next block proposal -pub struct TxQueue(std::collections::VecDeque); - -impl TxQueue { - /// Add a new wrapper at the back of the queue - pub fn push(&mut self, wrapper: TxInQueue) { - self.0.push_back(wrapper); - } - - /// Remove the wrapper at the head of the queue - pub fn pop(&mut self) -> Option { - self.0.pop_front() - } - - /// Get an iterator over the queue - pub fn iter(&self) -> impl std::iter::Iterator { - self.0.iter() - } - - /// Check if there are any txs in the queue - #[allow(dead_code)] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Get reference to the element at the given index. - /// Returns [`None`] if index exceeds the queue lenght. - pub fn get(&self, index: usize) -> Option<&TxInQueue> { - self.0.get(index) - } -} /// Expired transaction kinds. #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] diff --git a/crates/storage/src/types.rs b/crates/storage/src/types.rs index 7f66bdb782..eaf7fac6de 100644 --- a/crates/storage/src/types.rs +++ b/crates/storage/src/types.rs @@ -1,5 +1,10 @@ //! The key and values that may be persisted in a DB. +use std::collections::BTreeMap; + +use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::borsh::BorshSerializeExt; +use namada_core::hash::Hash; use regex::Regex; /// A key-value pair as raw bytes @@ -58,3 +63,15 @@ impl std::fmt::Debug for PatternIterator { f.write_str("PatternIterator") } } + +/// Structure holding data that will be committed to the merkle tree +#[derive(Debug, BorshSerialize, BorshDeserialize, Default)] +pub struct CommitOnlyData { + pub tx_gas: BTreeMap, +} + +impl CommitOnlyData { + pub fn serialize(&self) -> Vec { + self.serialize_to_vec() + } +} diff --git a/crates/test_utils/src/lib.rs b/crates/test_utils/src/lib.rs index d2b27cf009..29ce624c23 100644 --- a/crates/test_utils/src/lib.rs +++ b/crates/test_utils/src/lib.rs @@ -19,16 +19,21 @@ pub const WASM_FOR_TESTS_DIR: &str = "wasm_for_tests"; pub enum TestWasms { TxMemoryLimit, TxNoOp, + TxInvalidData, + TxInfiniteGuestGas, + TxInfiniteHostGas, TxProposalCode, + TxProposalMaspRewards, + TxProposalIbcTokenInflation, TxReadStorageKey, TxWriteStorageKey, VpAlwaysFalse, VpAlwaysTrue, VpEval, + VpInfiniteGuestGas, + VpInfiniteHostGas, VpMemoryLimit, VpReadStorageKey, - TxProposalMaspRewards, - TxProposalIbcTokenInflation, } impl TestWasms { @@ -38,18 +43,23 @@ impl TestWasms { let filename = match self { TestWasms::TxMemoryLimit => "tx_memory_limit.wasm", TestWasms::TxNoOp => "tx_no_op.wasm", + TestWasms::TxInvalidData => "tx_invalid_data.wasm", + TestWasms::TxInfiniteGuestGas => "tx_infinite_guest_gas.wasm", + TestWasms::TxInfiniteHostGas => "tx_infinite_host_gas.wasm", TestWasms::TxProposalCode => "tx_proposal_code.wasm", + TestWasms::TxProposalMaspRewards => "tx_proposal_masp_reward.wasm", + TestWasms::TxProposalIbcTokenInflation => { + "tx_proposal_ibc_token_inflation.wasm" + } TestWasms::TxReadStorageKey => "tx_read_storage_key.wasm", TestWasms::TxWriteStorageKey => "tx_write.wasm", TestWasms::VpAlwaysFalse => "vp_always_false.wasm", TestWasms::VpAlwaysTrue => "vp_always_true.wasm", TestWasms::VpEval => "vp_eval.wasm", + TestWasms::VpInfiniteGuestGas => "vp_infinite_guest_gas.wasm", + TestWasms::VpInfiniteHostGas => "vp_infinite_host_gas.wasm", TestWasms::VpMemoryLimit => "vp_memory_limit.wasm", TestWasms::VpReadStorageKey => "vp_read_storage_key.wasm", - TestWasms::TxProposalMaspRewards => "tx_proposal_masp_reward.wasm", - TestWasms::TxProposalIbcTokenInflation => { - "tx_proposal_ibc_token_inflation.wasm" - } }; let cwd = env::current_dir().expect("Couldn't get current working directory"); diff --git a/crates/tests/Cargo.toml b/crates/tests/Cargo.toml index 2aec37b1ed..a729be296b 100644 --- a/crates/tests/Cargo.toml +++ b/crates/tests/Cargo.toml @@ -24,6 +24,11 @@ migrations = [ "namada_sdk/migrations", "namada_core/migrations", ] +namada-eth-bridge = [ + "namada/namada-eth-bridge", + "namada_sdk/namada-eth-bridge", + "namada_apps/namada-eth-bridge", +] [dependencies] namada = {path = "../namada", features = ["testing"]} @@ -59,6 +64,7 @@ tracing.workspace = true namada_apps = {path = "../apps", features = ["testing"]} namada_vm_env = {path = "../vm_env"} assert_cmd.workspace = true +assert_matches.workspace = true borsh.workspace = true borsh-ext.workspace = true color-eyre.workspace = true @@ -74,6 +80,7 @@ pretty_assertions.workspace = true proptest.workspace = true proptest-state-machine.workspace = true rand.workspace = true +test-log.workspace = true toml.workspace = true # This is used to enable logging from tests diff --git a/crates/tests/src/e2e/eth_bridge_tests.rs b/crates/tests/src/e2e/eth_bridge_tests.rs index 9d7eee8ed8..9c77ab85b3 100644 --- a/crates/tests/src/e2e/eth_bridge_tests.rs +++ b/crates/tests/src/e2e/eth_bridge_tests.rs @@ -826,7 +826,6 @@ async fn test_wdai_transfer_established_unauthorized() -> Result<()> { &bertha_addr.to_string(), &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()), )?; - cmd.exp_string(TX_ACCEPTED)?; cmd.exp_string(TX_REJECTED)?; cmd.assert_success(); diff --git a/crates/tests/src/e2e/helpers.rs b/crates/tests/src/e2e/helpers.rs index 03e2401362..1118a38ba2 100644 --- a/crates/tests/src/e2e/helpers.rs +++ b/crates/tests/src/e2e/helpers.rs @@ -36,7 +36,7 @@ use super::setup::{ ENV_VAR_USE_PREBUILT_BINARIES, }; use crate::e2e::setup::{Bin, Who, APPS_PACKAGE}; -use crate::strings::{LEDGER_STARTED, TX_ACCEPTED, TX_APPLIED_SUCCESS}; +use crate::strings::{LEDGER_STARTED, TX_APPLIED_SUCCESS}; use crate::{run, run_as}; /// Instantiate a new [`HttpClient`] to perform RPC requests with. @@ -100,7 +100,6 @@ pub fn init_established_account( rpc_addr, ]; let mut cmd = run!(test, Bin::Client, init_account_args, Some(40))?; - cmd.exp_string(TX_ACCEPTED)?; cmd.exp_string(TX_APPLIED_SUCCESS)?; cmd.assert_success(); Ok(()) @@ -379,6 +378,7 @@ pub fn wait_for_block_height( height: u64, timeout_secs: u64, ) -> Result<()> { + #[allow(clippy::disallowed_methods)] let start = Instant::now(); let loop_timeout = Duration::new(timeout_secs, 0); loop { @@ -386,6 +386,7 @@ pub fn wait_for_block_height( if current >= height { break Ok(()); } + #[allow(clippy::disallowed_methods)] if Instant::now().duration_since(start) > loop_timeout { return Err(eyre!( "Timed out waiting for height {height}, current {current}" @@ -425,6 +426,7 @@ pub fn generate_bin_command(bin_name: &str, manifest_path: &Path) -> Command { build_cmd.release() }; + #[allow(clippy::disallowed_methods)] let now = time::Instant::now(); // ideally we would print the compile command here, but escargot doesn't // implement Display or Debug for CargoBuild diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index 9074d4f183..52f20742a7 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -11,7 +11,7 @@ use core::str::FromStr; use core::time::Duration; -use std::collections::{BTreeSet, HashMap}; +use std::collections::BTreeSet; use std::path::{Path, PathBuf}; use color_eyre::eyre::Result; @@ -32,7 +32,8 @@ use namada::ibc::clients::tendermint::types::{ use namada::ibc::core::channel::types::channel::Order as ChanOrder; use namada::ibc::core::channel::types::msgs::{ MsgAcknowledgement, MsgChannelOpenAck, MsgChannelOpenConfirm, - MsgChannelOpenInit, MsgChannelOpenTry, MsgRecvPacket, MsgTimeout, + MsgChannelOpenInit, MsgChannelOpenTry, MsgRecvPacket as IbcMsgRecvPacket, + MsgTimeout as IbcMsgTimeout, }; use namada::ibc::core::channel::types::packet::Packet; use namada::ibc::core::channel::types::timeout::TimeoutHeight; @@ -56,7 +57,7 @@ use namada::ibc::core::host::types::identifiers::{ ChainId, ChannelId, ClientId, ConnectionId, PortId, }; use namada::ibc::primitives::proto::Any; -use namada::ibc::primitives::{Msg, Signer, Timestamp}; +use namada::ibc::primitives::{Signer, Timestamp, ToProto}; use namada::ledger::events::EventType; use namada::ledger::ibc::storage::*; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; @@ -77,6 +78,7 @@ use namada_apps::config::{ethereum_bridge, TendermintMode}; use namada_apps::facade::tendermint::block::Header as TmHeader; use namada_apps::facade::tendermint::merkle::proof::ProofOps as TmProof; use namada_apps::facade::tendermint_rpc::{Client, HttpClient, Url}; +use namada_core::collections::HashMap; use namada_core::string_encoding::StringEncoded; use namada_sdk::masp::fs::FsShieldedUtils; use namada_test_utils::TestWasms; @@ -95,7 +97,7 @@ use crate::e2e::setup::{ self, run_hermes_cmd, setup_hermes, sleep, Bin, NamadaCmd, Test, Who, }; use crate::strings::{ - LEDGER_STARTED, TX_ACCEPTED, TX_APPLIED_SUCCESS, TX_FAILED, VALIDATOR_NODE, + LEDGER_STARTED, TX_APPLIED_SUCCESS, TX_FAILED, VALIDATOR_NODE, }; use crate::{run, run_as}; @@ -103,7 +105,14 @@ use crate::{run, run_as}; fn run_ledger_ibc() -> Result<()> { let update_genesis = |mut genesis: templates::All, base_dir: &_| { - genesis.parameters.parameters.epochs_per_year = 31536; + genesis.parameters.parameters.epochs_per_year = + epochs_per_year_from_min_duration(1800); + genesis.parameters.ibc_params.default_mint_limit = + Amount::max_signed(); + genesis + .parameters + .ibc_params + .default_per_epoch_throughput_limit = Amount::max_signed(); setup::set_validators(1, genesis, base_dir, |_| 0) }; let (ledger_a, ledger_b, test_a, test_b) = run_two_nets(update_genesis)?; @@ -167,37 +176,7 @@ fn run_ledger_ibc() -> Result<()> { // The balance should not be changed check_balances_after_back(&port_id_b, &channel_id_b, &test_a, &test_b)?; - // Shielded transfer 10 BTC from Chain A to Chain B - shielded_transfer( - &test_a, - &test_b, - &client_id_a, - &client_id_b, - &port_id_a, - &channel_id_a, - &port_id_b, - &channel_id_b, - )?; - check_shielded_balances(&port_id_b, &channel_id_b, &test_a, &test_b)?; - - // Shielded transfer 5 BTC back from Chain B to the origin-specific account - // on Chain A - shielded_transfer_back( - &test_a, - &test_b, - &client_id_a, - &client_id_b, - &port_id_a, - &channel_id_a, - &port_id_b, - &channel_id_b, - )?; - check_shielded_balances_after_back( - &port_id_b, - &channel_id_b, - &test_a, - &test_b, - )?; + // Shielded transfers are tested with Hermes // Skip tests for closing a channel and timeout_on_close since the transfer // channel cannot be closed @@ -209,7 +188,14 @@ fn run_ledger_ibc() -> Result<()> { fn run_ledger_ibc_with_hermes() -> Result<()> { let update_genesis = |mut genesis: templates::All, base_dir: &_| { - genesis.parameters.parameters.epochs_per_year = 31536; + genesis.parameters.parameters.epochs_per_year = + epochs_per_year_from_min_duration(1800); + genesis.parameters.ibc_params.default_mint_limit = + Amount::max_signed(); + genesis + .parameters + .ibc_params + .default_per_epoch_throughput_limit = Amount::max_signed(); setup::set_validators(1, genesis, base_dir, |_| 0) }; let (ledger_a, ledger_b, test_a, test_b) = run_two_nets(update_genesis)?; @@ -224,7 +210,7 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { // Start relaying let hermes = run_hermes(&test_a)?; - let _bg_hermes = hermes.background(); + let bg_hermes = hermes.background(); // Transfer 100000 from the normal account on Chain A to Chain B std::env::set_var(ENV_VAR_CHAIN_ID, test_b.net.chain_id.to_string()); @@ -240,7 +226,6 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { &channel_id_a, None, None, - None, false, )?; wait_for_packet_relay(&port_id_a, &channel_id_a, &test_a)?; @@ -269,7 +254,6 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { &channel_id_b, None, None, - None, false, )?; wait_for_packet_relay(&port_id_a, &channel_id_a, &test_a)?; @@ -288,7 +272,6 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { ALBERT_KEY, &port_id_a, &channel_id_a, - None, Some(Duration::new(0, 0)), None, false, @@ -298,26 +281,106 @@ fn run_ledger_ibc_with_hermes() -> Result<()> { // The balance should not be changed check_balances_after_back(&port_id_b, &channel_id_b, &test_a, &test_b)?; + // Send a token to the shielded address on Chain A + transfer_on_chain( + &test_a, + ALBERT, + AA_PAYMENT_ADDRESS, + BTC, + 100, + ALBERT_KEY, + )?; + shielded_sync(&test_a, AA_VIEWING_KEY)?; + // Shieded transfer from Chain A to Chain B + transfer( + &test_a, + A_SPENDING_KEY, + AB_PAYMENT_ADDRESS, + BTC, + "10", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + None, + None, + false, + )?; + wait_for_packet_relay(&port_id_a, &channel_id_a, &test_a)?; + check_shielded_balances(&port_id_b, &channel_id_b, &test_a, &test_b)?; + + // Shielded transfer to an invalid receiver address (refund) + transfer( + &test_a, + A_SPENDING_KEY, + "invalid_receiver", + BTC, + "10", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + None, + None, + false, + )?; + wait_for_packet_relay(&port_id_a, &channel_id_a, &test_a)?; + // The balance should not be changed + check_shielded_balances(&port_id_b, &channel_id_b, &test_a, &test_b)?; + + // Stop Hermes for timeout test + let mut hermes = bg_hermes.foreground(); + hermes.interrupt()?; + + // Send transfer will be timed out (refund) + transfer( + &test_a, + A_SPENDING_KEY, + AB_PAYMENT_ADDRESS, + BTC, + "10", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + Some(Duration::new(10, 0)), + None, + false, + )?; + // wait for the timeout + sleep(10); + + // Restart relaying + let hermes = run_hermes(&test_a)?; + let _bg_hermes = hermes.background(); + + wait_for_packet_relay(&port_id_a, &channel_id_a, &test_a)?; + // The balance should not be changed + check_shielded_balances(&port_id_b, &channel_id_b, &test_a, &test_b)?; + Ok(()) } #[test] fn pgf_over_ibc_with_hermes() -> Result<()> { - let update_genesis = - |mut genesis: templates::All, base_dir: &_| { - genesis.parameters.parameters.epochs_per_year = - epochs_per_year_from_min_duration(20); - // for the trusting period of IBC client - genesis.parameters.pos_params.pipeline_len = 5; - genesis.parameters.parameters.max_proposal_bytes = - Default::default(); - genesis.parameters.pgf_params.stewards = - BTreeSet::from_iter([get_established_addr_from_pregenesis( - ALBERT_KEY, base_dir, &genesis, - ) - .unwrap()]); - setup::set_validators(1, genesis, base_dir, |_| 0) - }; + let update_genesis = |mut genesis: templates::All< + templates::Unvalidated, + >, + base_dir: &_| { + genesis.parameters.parameters.epochs_per_year = + epochs_per_year_from_min_duration(20); + // for the trusting period of IBC client + genesis.parameters.pos_params.pipeline_len = 5; + genesis.parameters.parameters.max_proposal_bytes = Default::default(); + genesis.parameters.pgf_params.stewards = + BTreeSet::from_iter([get_established_addr_from_pregenesis( + ALBERT_KEY, base_dir, &genesis, + ) + .unwrap()]); + genesis.parameters.ibc_params.default_mint_limit = Amount::max_signed(); + genesis + .parameters + .ibc_params + .default_per_epoch_throughput_limit = Amount::max_signed(); + setup::set_validators(1, genesis, base_dir, |_| 0) + }; let (ledger_a, ledger_b, test_a, test_b) = run_two_nets(update_genesis)?; let _bg_ledger_a = ledger_a.background(); let _bg_ledger_b = ledger_b.background(); @@ -364,8 +427,8 @@ fn pgf_over_ibc_with_hermes() -> Result<()> { submit_votes(&test_a)?; // wait for the grace - let grace_epoch = start_epoch + 12u64 + 6u64 + 1u64; - while epoch <= grace_epoch { + let activation_epoch = start_epoch + 12u64 + 6u64 + 1u64; + while epoch <= activation_epoch { sleep(5); epoch = get_epoch(&test_a, &rpc_a).unwrap(); } @@ -383,6 +446,12 @@ fn proposal_ibc_token_inflation() -> Result<()> { genesis.parameters.parameters.epochs_per_year = epochs_per_year_from_min_duration(60); genesis.parameters.gov_params.min_proposal_grace_epochs = 3; + genesis.parameters.ibc_params.default_mint_limit = + Amount::max_signed(); + genesis + .parameters + .ibc_params + .default_per_epoch_throughput_limit = Amount::max_signed(); setup::set_validators(1, genesis, base_dir, |_| 0) }; let (ledger_a, ledger_b, test_a, test_b) = run_two_nets(update_genesis)?; @@ -414,29 +483,15 @@ fn proposal_ibc_token_inflation() -> Result<()> { setup_hermes(&test_a, &test_b)?; let port_id_a = "transfer".parse().unwrap(); - let port_id_b: PortId = "transfer".parse().unwrap(); - let (channel_id_a, channel_id_b) = + let (channel_id_a, _channel_id_b) = create_channel_with_hermes(&test_a, &test_b)?; // Start relaying let hermes = run_hermes(&test_a)?; let _bg_hermes = hermes.background(); - // Get masp proof for the following IBC transfer from the destination chain - // It will send 1 APFEL to PA(B) on Chain B - // PA(B) on Chain B will receive APFEL on chain A - std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); - let token_addr = find_address(&test_a, APFEL)?; // wait the next epoch not to update the epoch during the IBC transfer wait_epochs(&test_b, 1)?; - let file_path = gen_ibc_shielded_transfer( - &test_b, - AB_PAYMENT_ADDRESS, - token_addr.to_string(), - 1, - &port_id_b, - &channel_id_b, - )?; // Transfer 1 from Chain A to a z-address on Chain B transfer( @@ -448,7 +503,6 @@ fn proposal_ibc_token_inflation() -> Result<()> { ALBERT_KEY, &port_id_a, &channel_id_a, - Some(&file_path.to_string_lossy()), None, None, false, @@ -464,6 +518,142 @@ fn proposal_ibc_token_inflation() -> Result<()> { Ok(()) } +#[test] +fn ibc_rate_limit() -> Result<()> { + // Mint limit 2 transfer/channel-0/nam, per-epoch throughput limit 1 NAM + let update_genesis = |mut genesis: templates::All< + templates::Unvalidated, + >, + base_dir: &_| { + genesis.parameters.parameters.epochs_per_year = + epochs_per_year_from_min_duration(50); + genesis.parameters.ibc_params.default_mint_limit = Amount::from_u64(2); + genesis + .parameters + .ibc_params + .default_per_epoch_throughput_limit = Amount::from_u64(1_000_000); + setup::set_validators(1, genesis, base_dir, |_| 0) + }; + let (ledger_a, ledger_b, test_a, test_b) = run_two_nets(update_genesis)?; + let _bg_ledger_a = ledger_a.background(); + let _bg_ledger_b = ledger_b.background(); + + setup_hermes(&test_a, &test_b)?; + let port_id_a = "transfer".parse().unwrap(); + let port_id_b: PortId = "transfer".parse().unwrap(); + let (channel_id_a, channel_id_b) = + create_channel_with_hermes(&test_a, &test_b)?; + + // Start relaying + let hermes = run_hermes(&test_a)?; + let _bg_hermes = hermes.background(); + + // wait for the next epoch + std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); + let rpc_a = get_actor_rpc(&test_a, Who::Validator(0)); + let mut epoch = get_epoch(&test_a, &rpc_a).unwrap(); + let next_epoch = epoch.next(); + while epoch <= next_epoch { + sleep(5); + epoch = get_epoch(&test_a, &rpc_a).unwrap(); + } + + // Transfer 1 NAM from Chain A to Chain B + std::env::set_var(ENV_VAR_CHAIN_ID, test_b.net.chain_id.to_string()); + let receiver = find_address(&test_b, BERTHA)?; + transfer( + &test_a, + ALBERT, + receiver.to_string(), + NAM, + "1", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + None, + None, + false, + )?; + + // Transfer 1 NAM from Chain A to Chain B again will fail + transfer( + &test_a, + ALBERT, + receiver.to_string(), + NAM, + "1", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + None, + // expect an error of the throughput limit + Some("Transaction was rejected by VPs"), + false, + )?; + + // wait for the next epoch + let mut epoch = get_epoch(&test_a, &rpc_a).unwrap(); + let next_epoch = epoch.next(); + while epoch <= next_epoch { + sleep(5); + epoch = get_epoch(&test_a, &rpc_a).unwrap(); + } + + // Transfer 1 NAM from Chain A to Chain B will succeed in the new epoch + transfer( + &test_a, + ALBERT, + receiver.to_string(), + NAM, + "1", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + None, + None, + false, + )?; + + // wait for the next epoch + let mut epoch = get_epoch(&test_a, &rpc_a).unwrap(); + let next_epoch = epoch.next(); + while epoch <= next_epoch { + sleep(5); + epoch = get_epoch(&test_a, &rpc_a).unwrap(); + } + + // Transfer 1 NAM from Chain A to Chain B will succeed, but Chain B can't + // receive due to the mint limit and the packet will be timed out + transfer( + &test_a, + ALBERT, + receiver.to_string(), + NAM, + "1", + ALBERT_KEY, + &port_id_a, + &channel_id_a, + Some(Duration::new(20, 0)), + None, + false, + )?; + wait_for_packet_relay(&port_id_a, &channel_id_a, &test_a)?; + + // Check the balance on Chain B + let ibc_denom = format!("{port_id_b}/{channel_id_b}/nam"); + std::env::set_var(ENV_VAR_CHAIN_ID, test_b.net.chain_id.to_string()); + let rpc_b = get_actor_rpc(&test_b, Who::Validator(0)); + let query_args = vec![ + "balance", "--owner", BERTHA, "--token", &ibc_denom, "--node", &rpc_b, + ]; + let expected = format!("{ibc_denom}: 2"); + let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; + client.exp_string(&expected)?; + client.assert_success(); + + Ok(()) +} + fn run_two_nets( update_genesis: impl FnMut( templates::All, @@ -699,7 +889,13 @@ fn create_client(test_a: &Test, test_b: &Test) -> Result<(ClientId, ClientId)> { consensus_state: make_consensus_state(test_b, height)?.into(), signer: signer(), }; - let height_a = submit_ibc_tx(test_a, message, ALBERT, ALBERT_KEY, false)?; + let height_a = submit_ibc_tx( + test_a, + make_ibc_data(message.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let height = query_height(test_a)?; let client_state = make_client_state(test_a, height); @@ -709,7 +905,13 @@ fn create_client(test_a: &Test, test_b: &Test) -> Result<(ClientId, ClientId)> { consensus_state: make_consensus_state(test_a, height)?.into(), signer: signer(), }; - let height_b = submit_ibc_tx(test_b, message, ALBERT, ALBERT_KEY, false)?; + let height_b = submit_ibc_tx( + test_b, + make_ibc_data(message.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let events = get_events(test_a, height_a)?; let client_id_a = @@ -836,7 +1038,13 @@ fn update_client( client_message: header.into(), signer: signer(), }; - submit_ibc_tx(target_test, message, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + target_test, + make_ibc_data(message.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; check_ibc_update_query( target_test, @@ -876,7 +1084,13 @@ fn connection_handshake( signer: signer(), }; // OpenInitConnection on Chain A - let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + let height = submit_ibc_tx( + test_a, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let events = get_events(test_a, height)?; let conn_id_a = get_connection_id_from_events(&events) .ok_or(eyre!("No connection ID is set"))?; @@ -910,7 +1124,13 @@ fn connection_handshake( // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; // OpenTryConnection on Chain B - let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + let height = submit_ibc_tx( + test_b, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let events = get_events(test_b, height)?; let conn_id_b = get_connection_id_from_events(&events) .ok_or(eyre!("No connection ID is set"))?; @@ -937,7 +1157,13 @@ fn connection_handshake( // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; // OpenAckConnection on Chain A - submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + test_a, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; // get the proofs on Chain A let height_a = query_height(test_a)?; @@ -951,7 +1177,13 @@ fn connection_handshake( // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; // OpenConfirmConnection on Chain B - submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + test_b, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; Ok((conn_id_a, conn_id_b)) } @@ -989,7 +1221,13 @@ fn channel_handshake( signer: signer(), version_proposal: channel_version.clone(), }; - let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + let height = submit_ibc_tx( + test_a, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let events = get_events(test_a, height)?; let channel_id_a = get_channel_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; @@ -1016,10 +1254,16 @@ fn channel_handshake( // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; // OpenTryChannel on Chain B - let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; - let events = get_events(test_b, height)?; - let channel_id_b = - get_channel_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; + let height = submit_ibc_tx( + test_b, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; + let events = get_events(test_b, height)?; + let channel_id_b = + get_channel_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; // get the A's proofs on Chain B let height_b = query_height(test_b)?; @@ -1037,7 +1281,13 @@ fn channel_handshake( // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; // OpenAckChannel on Chain A - submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + test_a, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; // get the proofs on Chain A let height_a = query_height(test_a)?; @@ -1053,7 +1303,13 @@ fn channel_handshake( // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; // OpenConfirmChannel on Chain B - submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + test_b, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; Ok(((port_id.clone(), channel_id_a), (port_id, channel_id_b))) } @@ -1128,7 +1384,6 @@ fn transfer_token( channel_id_a, None, None, - None, false, )?; let events = get_events(test_a, height)?; @@ -1138,7 +1393,7 @@ fn transfer_token( let height_a = query_height(test_a)?; let proof_commitment_on_a = get_commitment_proof(test_a, &packet, height_a)?; - let msg = MsgRecvPacket { + let msg = IbcMsgRecvPacket { packet, proof_commitment_on_a, proof_height_on_a: height_a, @@ -1147,7 +1402,13 @@ fn transfer_token( // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; // Receive the token on Chain B - let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + let height = submit_ibc_tx( + test_b, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let events = get_events(test_b, height)?; let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; @@ -1170,7 +1431,13 @@ fn transfer_token( // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; // Acknowledge on Chain A - submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + test_a, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; Ok(()) } @@ -1195,12 +1462,12 @@ fn try_invalid_transfers( port_id_a, channel_id_a, None, - None, Some("The amount for the IBC transfer should be an integer"), false, )?; // invalid port + let nam_addr = find_address(test_a, NAM)?; transfer( test_a, ALBERT, @@ -1211,8 +1478,8 @@ fn try_invalid_transfers( &"port".parse().unwrap(), channel_id_a, None, - None, - Some("Error trying to apply a transaction"), + // the IBC denom can't be parsed when using an invalid port + Some(&format!("Invalid IBC denom: {nam_addr}")), false, )?; @@ -1227,7 +1494,6 @@ fn try_invalid_transfers( port_id_a, &"channel-42".parse().unwrap(), None, - None, Some("Error trying to apply a transaction"), false, )?; @@ -1261,7 +1527,6 @@ fn transfer_on_chain( &rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(120))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); @@ -1294,7 +1559,6 @@ fn transfer_back( channel_id_b, None, None, - None, false, )?; let events = get_events(test_b, height)?; @@ -1302,7 +1566,7 @@ fn transfer_back( let height_b = query_height(test_b)?; let proof = get_commitment_proof(test_b, &packet, height_b)?; - let msg = MsgRecvPacket { + let msg = IbcMsgRecvPacket { packet, proof_commitment_on_a: proof, proof_height_on_a: height_b, @@ -1311,7 +1575,13 @@ fn transfer_back( // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; // Receive the token on Chain A - let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; + let height = submit_ibc_tx( + test_a, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; let events = get_events(test_a, height)?; let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; @@ -1329,7 +1599,13 @@ fn transfer_back( // Update the client state of Chain A on Chain B update_client_with_height(test_a, test_b, client_id_b, height_a)?; // Acknowledge on Chain B - submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; + submit_ibc_tx( + test_b, + make_ibc_data(msg.to_any()), + ALBERT, + ALBERT_KEY, + false, + )?; Ok(()) } @@ -1354,7 +1630,6 @@ fn transfer_timeout( ALBERT_KEY, port_id_a, channel_id_a, - None, Some(Duration::new(5, 0)), None, false, @@ -1368,7 +1643,7 @@ fn transfer_timeout( let height_b = query_height(test_b)?; let proof_unreceived_on_b = get_receipt_absence_proof(test_b, &packet, height_b)?; - let msg = MsgTimeout { + let msg = IbcMsgTimeout { packet, next_seq_recv_on_b: 1.into(), // not used proof_unreceived_on_b, @@ -1378,245 +1653,17 @@ fn transfer_timeout( // Update the client state of Chain B on Chain A update_client_with_height(test_b, test_a, client_id_a, height_b)?; // Timeout on Chain A - submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; - - Ok(()) -} - -fn gen_ibc_shielded_transfer( - test: &Test, - target: impl AsRef, - token: impl AsRef, - amount: u64, - port_id: &PortId, - channel_id: &ChannelId, -) -> Result { - std::env::set_var(ENV_VAR_CHAIN_ID, test.net.chain_id.to_string()); - let rpc = get_actor_rpc(test, Who::Validator(0)); - let output_folder = test.test_dir.path().to_string_lossy(); - let args = [ - "ibc-gen-shielded", - "--output-folder-path", - &output_folder, - "--target", - target.as_ref(), - "--token", - token.as_ref(), - "--amount", - &amount.to_string(), - "--port-id", - port_id.as_ref(), - "--channel-id", - channel_id.as_ref(), - "--node", - &rpc, - ]; - let mut client = run!(test, Bin::Client, args, Some(120))?; - let file_path = get_shielded_transfer_path(&mut client)?; - Ok(file_path) -} - -#[allow(clippy::too_many_arguments)] -fn shielded_transfer( - test_a: &Test, - test_b: &Test, - client_id_a: &ClientId, - client_id_b: &ClientId, - port_id_a: &PortId, - channel_id_a: &ChannelId, - port_id_b: &PortId, - channel_id_b: &ChannelId, -) -> Result<()> { - // Get masp proof for the following IBC transfer from the destination chain - // It will send 10 BTC from Chain A to PA(B) on Chain B - // PA(B) on Chain B will receive BTC on chain A - std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); - let token_addr = find_address(test_a, BTC)?; - let file_path = gen_ibc_shielded_transfer( - test_b, - AB_PAYMENT_ADDRESS, - token_addr.to_string(), - 10, - port_id_b, - channel_id_b, - )?; - - // Send a token to the shielded address on Chain A - transfer_on_chain(test_a, ALBERT, AA_PAYMENT_ADDRESS, BTC, 10, ALBERT_KEY)?; - let rpc = get_actor_rpc(test_a, Who::Validator(0)); - let tx_args = vec![ - "shielded-sync", - "--viewing-keys", - AA_VIEWING_KEY, - "--node", - &rpc, - ]; - let mut client = run!(test_a, Bin::Client, tx_args, Some(120))?; - client.assert_success(); - - // Send a token from SP(A) on Chain A to PA(B) on Chain B - let amount = Amount::native_whole(10).to_string_native(); - let height = transfer( + submit_ibc_tx( test_a, - A_SPENDING_KEY, - AB_PAYMENT_ADDRESS, - BTC, - amount, - ALBERT_KEY, - port_id_a, - channel_id_a, - Some(&file_path.to_string_lossy()), - None, - None, - false, - )?; - let events = get_events(test_a, height)?; - let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; - check_ibc_packet_query(test_a, &"send_packet".parse().unwrap(), &packet)?; - - let height_a = query_height(test_a)?; - let proof_commitment_on_a = - get_commitment_proof(test_a, &packet, height_a)?; - let msg = MsgRecvPacket { - packet, - proof_commitment_on_a, - proof_height_on_a: height_a, - signer: signer(), - }; - // Update the client state of Chain A on Chain B - update_client_with_height(test_a, test_b, client_id_b, height_a)?; - // Receive the token on Chain B - let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; - let events = get_events(test_b, height)?; - let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; - let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; - check_ibc_packet_query( - test_b, - &"write_acknowledgement".parse().unwrap(), - &packet, - )?; - - // get the proof on Chain B - let height_b = query_height(test_b)?; - let proof_acked_on_b = get_ack_proof(test_b, &packet, height_b)?; - let msg = MsgAcknowledgement { - packet, - acknowledgement: ack.try_into().expect("invalid ack"), - proof_acked_on_b, - proof_height_on_b: height_b, - signer: signer(), - }; - // Update the client state of Chain B on Chain A - update_client_with_height(test_b, test_a, client_id_a, height_b)?; - // Acknowledge on Chain A - submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; - - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -fn shielded_transfer_back( - test_a: &Test, - test_b: &Test, - client_id_a: &ClientId, - client_id_b: &ClientId, - port_id_a: &PortId, - channel_id_a: &ChannelId, - port_id_b: &PortId, - channel_id_b: &ChannelId, -) -> Result<()> { - // Get masp proof for the following IBC transfer from the destination chain - let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); - // It will send 5 BTC from Chain B to PA(A) on Chain A - // Chain A will receive Chain A's BTC - std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); - let output_folder = test_b.test_dir.path().to_string_lossy(); - // PA(A) on Chain A will receive BTC on chain A - let token_addr = find_address(test_a, BTC)?; - let ibc_token = format!("{port_id_b}/{channel_id_b}/{token_addr}"); - let args = [ - "ibc-gen-shielded", - "--output-folder-path", - &output_folder, - "--target", - AA_PAYMENT_ADDRESS, - "--token", - &ibc_token, - "--amount", - "5", - "--port-id", - port_id_a.as_ref(), - "--channel-id", - channel_id_a.as_ref(), - "--node", - &rpc_a, - ]; - let mut client = run!(test_a, Bin::Client, args, Some(120))?; - let file_path = get_shielded_transfer_path(&mut client)?; - client.assert_success(); - - // Send a token from SP(B) on Chain B to PA(A) on Chain A - let height = transfer( - test_b, - B_SPENDING_KEY, - AA_PAYMENT_ADDRESS, - &ibc_token, - "5", + make_ibc_data(msg.to_any()), + ALBERT, ALBERT_KEY, - port_id_b, - channel_id_b, - Some(&file_path.to_string_lossy()), - None, - None, false, )?; - let events = get_events(test_b, height)?; - let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; - - let height_b = query_height(test_b)?; - let proof_commitment_on_b = - get_commitment_proof(test_b, &packet, height_b)?; - // the message member names are confusing, "_a" means the source - let msg = MsgRecvPacket { - packet, - proof_commitment_on_a: proof_commitment_on_b, - proof_height_on_a: height_b, - signer: signer(), - }; - // Update the client state of Chain B on Chain A - update_client_with_height(test_b, test_a, client_id_a, height_b)?; - // Receive the token on Chain A - let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; - let events = get_events(test_a, height)?; - let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; - let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; - - // get the proof on Chain A - let height_a = query_height(test_a)?; - let proof_acked_on_a = get_ack_proof(test_a, &packet, height_a)?; - // the message member names are confusing, "_b" means the destination - let msg = MsgAcknowledgement { - packet, - acknowledgement: ack.try_into().expect("invalid ack"), - proof_acked_on_b: proof_acked_on_a, - proof_height_on_b: height_a, - signer: signer(), - }; - // Update the client state of Chain A on Chain B - update_client_with_height(test_a, test_b, client_id_b, height_a)?; - // Acknowledge on Chain B - submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; Ok(()) } -fn get_shielded_transfer_path(client: &mut NamadaCmd) -> Result { - let (_unread, matched) = - client.exp_regex("Output IBC shielded transfer .*")?; - let file_path = matched.trim().split(' ').last().expect("invalid output"); - Ok(PathBuf::from_str(file_path).expect("invalid file path")) -} - fn get_commitment_proof( test: &Test, packet: &Packet, @@ -1669,14 +1716,13 @@ fn commitment_prefix() -> CommitmentPrefix { fn submit_ibc_tx( test: &Test, - message: impl Msg + std::fmt::Debug, + data: Vec, owner: &str, signer: &str, wait_reveal_pk: bool, ) -> Result { std::env::set_var(ENV_VAR_CHAIN_ID, test.net.chain_id.to_string()); let data_path = test.test_dir.path().join("tx.data"); - let data = make_ibc_data(message); std::fs::write(&data_path, data).expect("writing data failed"); let data_path = data_path.to_string_lossy(); @@ -1718,7 +1764,6 @@ fn transfer( signer: impl AsRef, port_id: &PortId, channel_id: &ChannelId, - memo: Option<&str>, timeout_sec: Option, expected_err: Option<&str>, wait_reveal_pk: bool, @@ -1748,12 +1793,6 @@ fn transfer( &rpc, ]; - let memo_path = memo.unwrap_or_default(); - if memo.is_some() { - tx_args.push("--memo-path"); - tx_args.push(memo_path); - } - let timeout = timeout_sec.unwrap_or_default().as_secs().to_string(); if timeout_sec.is_some() { tx_args.push("--timeout-sec-offset"); @@ -1791,7 +1830,6 @@ fn delegate_token(test: &Test) -> Result<()> { &rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); Ok(()) @@ -1826,9 +1864,14 @@ fn propose_funding( let albert = find_address(test_a, ALBERT)?; let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); let epoch = get_epoch(test_a, &rpc_a)?; - let start_epoch = (epoch.0 + 3) / 3 * 3; - let proposal_json_path = - prepare_proposal_data(test_a, 0, albert, pgf_funding, start_epoch); + let start_epoch = (epoch.0 + 6) / 3 * 3; + let proposal_json_path = prepare_proposal_data( + test_a.test_dir.path(), + 0, + albert, + pgf_funding, + start_epoch, + ); let submit_proposal_args = vec![ "init-proposal", @@ -1839,7 +1882,6 @@ fn propose_funding( &rpc_a, ]; let mut client = run!(test_a, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); Ok(start_epoch.into()) @@ -1868,7 +1910,7 @@ fn propose_inflation(test: &Test) -> Result { "author": albert, "voting_start_epoch": start_epoch, "voting_end_epoch": start_epoch + 3_u64, - "grace_epoch": start_epoch + 6_u64, + "activation_epoch": start_epoch + 6_u64, }, "data": TestWasms::TxProposalIbcTokenInflation.read_bytes() }); @@ -1886,7 +1928,6 @@ fn propose_inflation(test: &Test) -> Result { &rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(100))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); Ok(start_epoch.into()) @@ -1914,7 +1955,6 @@ fn submit_votes(test: &Test) -> Result<()> { submit_proposal_vote, Some(40) )?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); @@ -1932,7 +1972,6 @@ fn submit_votes(test: &Test) -> Result<()> { ]; let mut client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); Ok(()) @@ -1959,10 +1998,9 @@ fn check_tx_height(test: &Test, client: &mut NamadaCmd) -> Result { Ok(height) } -fn make_ibc_data(message: impl Msg) -> Vec { - let msg = message.to_any(); +fn make_ibc_data(message: Any) -> Vec { let mut tx_data = vec![]; - prost::Message::encode(&msg, &mut tx_data) + prost::Message::encode(&message, &mut tx_data) .expect("encoding IBC message shouldn't fail"); tx_data } @@ -2192,102 +2230,42 @@ fn check_shielded_balances( test_a: &Test, test_b: &Test, ) -> Result<()> { - // Check the balance on Chain B - std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); - // PA(B) on Chain B has received BTC on chain A - let token_addr = find_address(test_a, BTC)?.to_string(); - std::env::set_var(ENV_VAR_CHAIN_ID, test_b.net.chain_id.to_string()); - let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); - let tx_args = vec![ - "shielded-sync", - "--viewing-keys", - AB_VIEWING_KEY, - "--node", - &rpc_b, - ]; - let mut client = run!(test_b, Bin::Client, tx_args, Some(120))?; - client.assert_success(); - let ibc_denom = format!("{dest_port_id}/{dest_channel_id}/btc"); + // Check the balance on Chain A + let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); + shielded_sync(test_a, AA_VIEWING_KEY)?; let query_args = vec![ "balance", "--owner", - AB_VIEWING_KEY, + AA_VIEWING_KEY, "--token", - &token_addr, + BTC, "--no-conversions", "--node", - &rpc_b, + &rpc_a, ]; - let expected = format!("{ibc_denom}: 10"); - let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; - client.exp_string(&expected)?; + let mut client = run!(test_a, Bin::Client, query_args, Some(40))?; + client.exp_string("btc: 90")?; client.assert_success(); - Ok(()) -} -/// Check balances after IBC shielded transfer after transfer back -fn check_shielded_balances_after_back( - src_port_id: &PortId, - src_channel_id: &ChannelId, - test_a: &Test, - test_b: &Test, -) -> Result<()> { - std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); - let token_addr = find_address(test_a, BTC)?.to_string(); // Check the balance on Chain B - std::env::set_var(ENV_VAR_CHAIN_ID, test_b.net.chain_id.to_string()); + // PA(B) on Chain B has received BTC on chain A + shielded_sync(test_b, AB_VIEWING_KEY)?; let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); - let tx_args = vec![ - "shielded-sync", - "--viewing-keys", - AB_VIEWING_KEY, - "--node", - &rpc_b, - ]; - let mut client = run!(test_b, Bin::Client, tx_args, Some(120))?; - client.assert_success(); - let ibc_denom = format!("{src_port_id}/{src_channel_id}/btc"); + let ibc_denom = format!("{dest_port_id}/{dest_channel_id}/btc"); let query_args = vec![ "balance", "--owner", AB_VIEWING_KEY, "--token", - &token_addr, + &ibc_denom, "--no-conversions", "--node", &rpc_b, ]; - let expected = format!("{ibc_denom}: 5"); + let expected = format!("{ibc_denom}: 10"); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; client.exp_string(&expected)?; client.assert_success(); - - // Check the balance on Chain A - std::env::set_var(ENV_VAR_CHAIN_ID, test_a.net.chain_id.to_string()); - let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); - let tx_args = vec![ - "shielded-sync", - "--viewing-keys", - AA_VIEWING_KEY, - "--node", - &rpc_a, - ]; - let mut client = run!(test_a, Bin::Client, tx_args, Some(120))?; - client.assert_success(); - let query_args = vec![ - "balance", - "--owner", - AA_VIEWING_KEY, - "--token", - &token_addr, - "--no-conversions", - "--node", - &rpc_a, - ]; - let mut client = run!(test_a, Bin::Client, query_args, Some(40))?; - client.exp_string("btc: 5")?; - client.assert_success(); - Ok(()) } @@ -2303,17 +2281,25 @@ fn check_funded_balances( let query_args = vec![ "balance", "--owner", BERTHA, "--token", &ibc_denom, "--node", &rpc_b, ]; - let expected = format!("{ibc_denom}: 10"); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; - client.exp_string(&expected)?; + let regex = format!("{ibc_denom}: .*"); + let (_, matched) = client.exp_regex(®ex)?; + let regex = regex::Regex::new(r"[0-9]+").unwrap(); + let iter = regex.find_iter(&matched); + let balance: u64 = iter.last().unwrap().as_str().parse().unwrap(); + assert!(balance >= 10); client.assert_success(); let query_args = vec![ "balance", "--owner", CHRISTEL, "--token", &ibc_denom, "--node", &rpc_b, ]; - let expected = format!("{ibc_denom}: 5"); let mut client = run!(test_b, Bin::Client, query_args, Some(40))?; - client.exp_string(&expected)?; + let regex = format!("{ibc_denom}: .*"); + let (_, matched) = client.exp_regex(®ex)?; + let regex = regex::Regex::new(r"[0-9]+").unwrap(); + let iter = regex.find_iter(&matched); + let balance: u64 = iter.last().unwrap().as_str().parse().unwrap(); + assert!(balance >= 5); client.assert_success(); Ok(()) @@ -2470,3 +2456,18 @@ fn get_events(test: &Test, height: u32) -> Result> { .end_block_events .ok_or_else(|| eyre!("IBC event was not found: height {}", height)) } + +fn shielded_sync(test: &Test, viewing_key: impl AsRef) -> Result<()> { + std::env::set_var(ENV_VAR_CHAIN_ID, test.net.chain_id.to_string()); + let rpc = get_actor_rpc(test, Who::Validator(0)); + let tx_args = vec![ + "shielded-sync", + "--viewing-keys", + viewing_key.as_ref(), + "--node", + &rpc, + ]; + let mut client = run!(test, Bin::Client, tx_args, Some(120))?; + client.assert_success(); + Ok(()) +} diff --git a/crates/tests/src/e2e/ledger_tests.rs b/crates/tests/src/e2e/ledger_tests.rs index 08b2f1e752..454c3a678b 100644 --- a/crates/tests/src/e2e/ledger_tests.rs +++ b/crates/tests/src/e2e/ledger_tests.rs @@ -10,7 +10,6 @@ //! `NAMADA_E2E_KEEP_TEMP=true`. #![allow(clippy::type_complexity)] -use std::collections::HashMap; use std::fmt::Display; use std::path::PathBuf; use std::process::Command; @@ -18,14 +17,10 @@ use std::str::FromStr; use std::sync::Arc; use std::time::{Duration, Instant}; -use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use color_eyre::owo_colors::OwoColorize; -use data_encoding::HEXLOWER; use namada::core::address::Address; use namada::core::storage::Epoch; -use namada::governance::cli::onchain::{PgfFunding, StewardsUpdate}; -use namada::governance::storage::proposal::{PGFInternalTarget, PGFTarget}; use namada::token; use namada_apps::cli::context::ENV_VAR_CHAIN_ID; use namada_apps::config::ethereum_bridge; @@ -33,24 +28,18 @@ use namada_apps::config::utils::convert_tm_addr_to_socket_addr; use namada_apps::facade::tendermint_config::net::Address as TendermintAddress; use namada_core::chain::ChainId; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_sdk::governance::pgf::cli::steward::Commission; use namada_sdk::masp::fs::FsShieldedUtils; use namada_test_utils::TestWasms; -use namada_tx_prelude::dec::Dec; -use namada_vp_prelude::BTreeSet; use serde::Serialize; use serde_json::json; use setup::constants::*; use setup::Test; use super::helpers::{ - epochs_per_year_from_min_duration, get_established_addr_from_pregenesis, - get_height, get_pregenesis_wallet, wait_for_block_height, - wait_for_wasm_pre_compile, -}; -use super::setup::{ - get_all_wasms_hashes, set_ethereum_bridge_mode, working_dir, NamadaCmd, + epochs_per_year_from_min_duration, get_height, get_pregenesis_wallet, + wait_for_block_height, wait_for_wasm_pre_compile, }; +use super::setup::{set_ethereum_bridge_mode, working_dir, NamadaCmd}; use crate::e2e::helpers::{ epoch_sleep, find_address, find_bonded_stake, get_actor_rpc, get_epoch, is_debug_mode, parse_reached_epoch, @@ -60,8 +49,8 @@ use crate::e2e::setup::{ Who, }; use crate::strings::{ - LEDGER_SHUTDOWN, LEDGER_STARTED, NON_VALIDATOR_NODE, TX_ACCEPTED, - TX_APPLIED_SUCCESS, TX_FAILED, TX_REJECTED, VALIDATOR_NODE, + LEDGER_SHUTDOWN, LEDGER_STARTED, NON_VALIDATOR_NODE, TX_APPLIED_SUCCESS, + TX_REJECTED, VALIDATOR_NODE, }; use crate::{run, run_as}; @@ -485,284 +474,6 @@ fn stop_ledger_at_height() -> Result<()> { Ok(()) } -/// In this test we: -/// 1. Run the ledger node -/// 2. Submit a token transfer tx -/// 3. Submit a transaction to update an account's validity predicate -/// 4. Submit a custom tx -/// 5. Submit a tx to initialize a new account -/// 6. Submit a tx to withdraw from faucet account (requires PoW challenge -/// solution) -/// 7. Query token balance -/// 8. Query the raw bytes of a storage key -#[test] -fn ledger_txs_and_queries() -> Result<()> { - let test = setup::single_node_net()?; - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - // 1. Run the ledger node - let _bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - // for a custom tx - let transfer = token::Transfer { - source: find_address(&test, BERTHA).unwrap(), - target: find_address(&test, ALBERT).unwrap(), - token: find_address(&test, NAM).unwrap(), - amount: token::DenominatedAmount::new( - token::Amount::native_whole(10), - token::NATIVE_MAX_DECIMAL_PLACES.into(), - ), - key: None, - shielded: None, - } - .serialize_to_vec(); - let tx_data_path = test.test_dir.path().join("tx.data"); - std::fs::write(&tx_data_path, transfer).unwrap(); - let tx_data_path = tx_data_path.to_string_lossy(); - - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let multisig_account = - format!("{},{},{}", BERTHA_KEY, ALBERT_KEY, CHRISTEL_KEY); - - let txs_args = vec![ - // 2. Submit a token transfer tx (from an established account) - vec![ - "transfer", - "--source", - BERTHA, - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "10.1", - "--signing-keys", - BERTHA_KEY, - "--node", - &validator_one_rpc, - ], - // Submit a token transfer tx (from an ed25519 implicit account) - vec![ - "transfer", - "--source", - DAEWON, - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "10.1", - "--signing-keys", - DAEWON, - "--node", - &validator_one_rpc, - ], - // Submit a token transfer tx (from a secp256k1 implicit account) - vec![ - "transfer", - "--source", - ESTER, - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "10.1", - "--node", - &validator_one_rpc, - ], - // 3. Submit a transaction to update an account's validity - // predicate - vec![ - "update-account", - "--address", - BERTHA, - "--code-path", - VP_USER_WASM, - "--signing-keys", - BERTHA_KEY, - "--node", - &validator_one_rpc, - ], - // 4. Submit a custom tx - vec![ - "tx", - "--code-path", - TX_TRANSFER_WASM, - "--data-path", - &tx_data_path, - "--owner", - BERTHA, - "--signing-keys", - BERTHA_KEY, - "--node", - &validator_one_rpc, - ], - // 5. Submit a tx to initialize a new account - vec![ - "init-account", - "--public-keys", - // Value obtained from `namada::core::key::ed25519::tests::gen_keypair` - "tpknam1qpqfzxu3gt05jx2mvg82f4anf90psqerkwqhjey4zlqv0qfgwuvkzt5jhkp", - "--threshold", - "1", - "--code-path", - VP_USER_WASM, - "--alias", - "Test-Account", - "--signing-keys", - BERTHA_KEY, - "--node", - &validator_one_rpc, - ], - // 5. Submit a tx to initialize a new multisig account - vec![ - "init-account", - "--public-keys", - &multisig_account, - "--threshold", - "2", - "--code-path", - VP_USER_WASM, - "--alias", - "Test-Account-2", - "--signing-keys", - BERTHA_KEY, - "--node", - &validator_one_rpc, - ], - ]; - - for tx_args in &txs_args { - for &dry_run in &[true, false] { - let tx_args = if dry_run && tx_args[0] == "tx" { - continue; - } else if dry_run { - [tx_args.clone(), vec!["--dry-run"]].concat() - } else { - tx_args.clone() - }; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - - if !dry_run { - client.exp_string(TX_ACCEPTED)?; - } - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - } - } - - let query_args_and_expected_response = vec![ - // 7. Query token balance - ( - vec![ - "balance", - "--owner", - BERTHA, - "--token", - NAM, - "--node", - &validator_one_rpc, - ], - // expect a decimal - vec![r"nam: \d+(\.\d+)?"], - // check also as validator node - true, - ), - // Unspecified token expect all tokens from wallet derived from genesis - ( - vec!["balance", "--owner", ALBERT, "--node", &validator_one_rpc], - // expect all genesis tokens, sorted by alias - vec![ - r"apfel: \d+(\.\d+)?", - r"btc: \d+(\.\d+)?", - r"dot: \d+(\.\d+)?", - r"eth: \d+(\.\d+)?", - r"kartoffel: \d+(\.\d+)?", - r"schnitzel: \d+(\.\d+)?", - ], - // check also as validator node - true, - ), - ( - vec![ - "query-account", - "--owner", - "Test-Account-2", - "--node", - &validator_one_rpc, - ], - vec!["Threshold: 2"], - // check also as validator node - false, - ), - ]; - for (query_args, expected, check_as_validator) in - &query_args_and_expected_response - { - // Run as a non-validator - let mut client = run!(test, Bin::Client, query_args, Some(40))?; - for pattern in expected { - client.exp_regex(pattern)?; - } - client.assert_success(); - - if !check_as_validator { - continue; - } - - // Run as a validator - let mut client = run_as!( - test, - Who::Validator(0), - Bin::Client, - query_args, - Some(40) - )?; - for pattern in expected { - client.exp_regex(pattern)?; - } - client.assert_success(); - } - let christel = find_address(&test, CHRISTEL)?; - // as setup in `genesis/e2e-tests-single-node.toml` - let christel_balance = token::Amount::native_whole(2000000); - let nam = find_address(&test, NAM)?; - let storage_key = - token::storage_key::balance_key(&nam, &christel).to_string(); - let query_args_and_expected_response = vec![ - // 8. Query storage key and get hex-encoded raw bytes - ( - vec![ - "query-bytes", - "--storage-key", - &storage_key, - "--node", - &validator_one_rpc, - ], - // expect hex encoded of borsh encoded bytes - HEXLOWER.encode(&christel_balance.serialize_to_vec()), - ), - ]; - for (query_args, expected) in &query_args_and_expected_response { - let mut client = run!(test, Bin::Client, query_args, Some(40))?; - client.exp_string(expected)?; - - client.assert_success(); - } - - Ok(()) -} - /// Test the optional disposable keypair for wrapper signing /// /// 1. Test that a tx requesting a disposable signer with a correct unshielding @@ -826,7 +537,6 @@ fn wrapper_disposable_signer() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(720))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; } @@ -865,7 +575,6 @@ fn wrapper_disposable_signer() -> Result<()> { ]; let mut client = run!(test, Bin::Client, tx_args, Some(720))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; let _ep1 = epoch_sleep(&test, &validator_one_rpc, 720)?; let tx_args = vec!["shielded-sync", "--node", &validator_one_rpc]; @@ -922,114 +631,10 @@ fn wrapper_disposable_signer() -> Result<()> { ]; let mut client = run!(test, Bin::Client, tx_args, Some(720))?; - client.exp_string(TX_ACCEPTED)?; client.exp_string(TX_APPLIED_SUCCESS)?; Ok(()) } -/// In this test we: -/// 1. Run the ledger node -/// 2. Submit an invalid transaction (disallowed by state machine) -/// 3. Shut down the ledger -/// 4. Restart the ledger -/// 5. Submit and invalid transactions (malformed) -#[test] -fn invalid_transactions() -> Result<()> { - let test = setup::single_node_net()?; - - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - // 1. Run the ledger node - let bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - // 2. Submit a an invalid transaction (trying to transfer tokens should fail - // in the user's VP due to the wrong signer) - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let tx_args = vec![ - "transfer", - "--source", - BERTHA, - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "1", - "--signing-keys", - ALBERT_KEY, - "--node", - &validator_one_rpc, - "--force", - ]; - - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_ACCEPTED)?; - client.exp_string(TX_REJECTED)?; - - client.assert_success(); - let mut ledger = bg_ledger.foreground(); - ledger.exp_string("rejected inner txs: 1")?; - - // Wait to commit a block - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; - - // 3. Shut it down - ledger.interrupt()?; - // Wait for the node to stop running to finish writing the state and tx - // queue - ledger.exp_string(LEDGER_SHUTDOWN)?; - ledger.exp_eof()?; - drop(ledger); - - // 4. Restart the ledger - let mut ledger = start_namada_ledger_node(&test, Some(0), Some(40))?; - - // There should be previous state now - ledger.exp_string("Last state root hash:")?; - // Wait for a block by which time the RPC should be ready - ledger.exp_string("Committed block hash")?; - let _bg_ledger = ledger.background(); - - // we need to wait for the rpc endpoint to start - sleep(10); - - // 5. Submit an invalid transactions (invalid token address) - let daewon_lower = DAEWON.to_lowercase(); - let tx_args = vec![ - "transfer", - "--source", - DAEWON, - "--signing-keys", - &daewon_lower, - "--target", - ALBERT, - "--token", - BERTHA, - "--amount", - "1000000.1", - // Force to ignore client check that fails on the balance check of the - // source address - "--force", - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_ACCEPTED)?; - client.exp_string(TX_FAILED)?; - client.assert_success(); - Ok(()) -} - /// PoS bonding, unbonding and withdrawal tests. In this test we: /// /// 1. Run the ledger node with shorter epochs for faster progression @@ -1215,10 +820,17 @@ fn pos_bonds() -> Result<()> { "Current epoch: {}, earliest epoch for withdrawal: {}", epoch, delegation_withdrawable_epoch ); + #[allow(clippy::disallowed_methods)] let start = Instant::now(); let loop_timeout = Duration::new(120, 0); loop { - if Instant::now().duration_since(start) > loop_timeout { + if { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + .duration_since(start) + > loop_timeout + { panic!( "Timed out waiting for epoch: {}", delegation_withdrawable_epoch @@ -1280,529 +892,222 @@ fn pos_bonds() -> Result<()> { Ok(()) } -/// Test for claiming PoS inflationary rewards +/// PoS validator creation test. In this test we: /// -/// 1. Run the ledger node -/// 2. Wait some epochs while inflationary rewards accumulate in the PoS system -/// 3. Submit a claim-rewards tx -/// 4. Query the validator's balance before and after the claim tx to ensure -/// that reward tokens were actually transferred +/// 1. Run the ledger node with shorter epochs for faster progression +/// 2. Initialize a new validator account +/// 3. Submit a delegation to the new validator +/// 4. Transfer some NAM to the new validator +/// 5. Submit a self-bond for the new validator +/// 6. Wait for the pipeline epoch +/// 7. Check the new validator's bonded stake #[test] -fn pos_rewards() -> Result<()> { +fn pos_init_validator() -> Result<()> { + let pipeline_len = 1; + let validator_stake = token::Amount::native_whole(100000_u64); let test = setup::network( - |mut genesis, base_dir| { - genesis.parameters.parameters.max_expected_time_per_block = 4; + |mut genesis, base_dir: &_| { + genesis.parameters.parameters.min_num_of_blocks = 4; genesis.parameters.parameters.epochs_per_year = 31_536_000; genesis.parameters.parameters.max_expected_time_per_block = 1; - genesis.parameters.pos_params.pipeline_len = 2; - genesis.parameters.pos_params.unbonding_len = 4; - setup::set_validators(1, genesis, base_dir, default_port_offset) + genesis.parameters.pos_params.pipeline_len = pipeline_len; + genesis.parameters.pos_params.unbonding_len = 2; + let genesis = setup::set_validators( + 1, + genesis, + base_dir, + default_port_offset, + ); + println!("{:?}", genesis.transactions.bond); + let stake = genesis + .transactions + .bond + .as_ref() + .unwrap() + .iter() + .map(|bond| { + bond.data + .amount + .increase_precision(NATIVE_MAX_DECIMAL_PLACES.into()) + .unwrap() + .amount() + }) + .sum::(); + assert_eq!( + stake, validator_stake, + "Assuming this stake, we give the same amount to the new \ + validator to have half of voting power", + ); + genesis }, None, )?; - for i in 0..1 { - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(i), - ethereum_bridge::ledger::Mode::Off, - None, - ); - } + // 1. Run a validator and non-validator ledger node + let mut validator_0 = + start_namada_ledger_node_wait_wasm(&test, Some(0), Some(60))?; + let mut non_validator = + start_namada_ledger_node_wait_wasm(&test, None, Some(60))?; - // 1. Run 3 genesis validator ledger nodes - let _bg_validator_0 = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); + // Wait for a first block + validator_0.exp_string("Committed block hash")?; + let _bg_validator_0 = validator_0.background(); + non_validator.exp_string("Committed block hash")?; + let bg_non_validator = non_validator.background(); - let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); + let non_validator_rpc = get_actor_rpc(&test, Who::NonValidator); - // Query the current rewards for the validator self-bond + // 2. Initialize a new validator account with the non-validator node + let new_validator = "new-validator"; + let _new_validator_key = format!("{}-key", new_validator); let tx_args = vec![ - "rewards", - "--validator", - "validator-0", - "--node", - &validator_0_rpc, - ]; - let mut client = - run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - let (_, res) = client - .exp_regex(r"Current rewards available for claim: [0-9\.]+ NAM") - .unwrap(); - let words = res.split(' ').collect::>(); - let res = words[words.len() - 2]; - let mut last_amount = token::Amount::from_str( - res.split(' ').last().unwrap(), - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(); - client.assert_success(); - - // Wait some epochs - let mut last_epoch = get_epoch(&test, &validator_0_rpc)?; - let wait_epoch = last_epoch + 4_u64; - - let start = Instant::now(); - let loop_timeout = Duration::new(40, 0); - loop { - if Instant::now().duration_since(start) > loop_timeout { - panic!("Timed out waiting for epoch: {}", wait_epoch); - } - - let epoch = epoch_sleep(&test, &validator_0_rpc, 40)?; - if dbg!(epoch) >= wait_epoch { - break; - } - - // Query the current rewards for the validator self-bond and see that it - // grows - let tx_args = vec![ - "rewards", - "--validator", - "validator-0", - "--node", - &validator_0_rpc, - ]; - let mut client = - run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - let (_, res) = client - .exp_regex(r"Current rewards available for claim: [0-9\.]+ NAM") - .unwrap(); - let words = res.split(' ').collect::>(); - let res = words[words.len() - 2]; - let amount = token::Amount::from_str( - res.split(' ').last().unwrap(), - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(); - client.assert_success(); - - if epoch > last_epoch { - assert!(amount > last_amount); - } else { - assert_eq!(amount, last_amount); - } - - last_amount = amount; - last_epoch = epoch; - } - - // Query the balance of the validator account - let query_balance_args = vec![ - "balance", - "--owner", - "validator-0", - "--token", - NAM, - "--node", - &validator_0_rpc, - ]; - let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - let (_, res) = client.exp_regex(r"nam: [0-9\.]+").unwrap(); - let amount_pre = token::Amount::from_str( - res.split(' ').last().unwrap(), - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(); - client.assert_success(); - - // Claim rewards - let tx_args = vec![ - "claim-rewards", - "--validator", - "validator-0", + "init-validator", + "--alias", + new_validator, + "--account-keys", + "bertha-key", + "--commission-rate", + "0.05", + "--max-commission-rate-change", + "0.01", + "--email", + "null@null.net", "--signing-keys", - "validator-0-balance-key", + "bertha-key", "--node", - &validator_0_rpc, + &non_validator_rpc, + "--unsafe-dont-encrypt", ]; - let mut client = - run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); - // Query the validator balance again and check that the balance has grown - // after claiming - let query_balance_args = vec![ - "balance", - "--owner", - "validator-0", - "--token", - NAM, - "--node", - &validator_0_rpc, - ]; - let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - let (_, res) = client.exp_regex(r"nam: [0-9\.]+").unwrap(); - let amount_post = token::Amount::from_str( - res.split(' ').last().unwrap(), - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(); - client.assert_success(); - - assert!(amount_post > amount_pre); + // Stop the non-validator node and run it as the new validator + let mut non_validator = bg_non_validator.foreground(); + non_validator.interrupt()?; + non_validator.exp_eof()?; - Ok(()) -} + // it takes a bit before the node is shutdown. We dont want flasky test. + if is_debug_mode() { + sleep(10); + } else { + sleep(5); + } -/// Test for PoS bonds and unbonds queries. -/// -/// 1. Run the ledger node -/// 2. Submit a delegation to the genesis validator -/// 3. Wait for epoch 4 -/// 4. Submit another delegation to the genesis validator -/// 5. Submit an unbond of the delegation -/// 6. Wait for epoch 7 -/// 7. Check the output of the bonds query -#[test] -fn test_bond_queries() -> Result<()> { - let pipeline_len = 2; - let unbonding_len = 4; - let test = setup::network( - |mut genesis, base_dir: &_| { - genesis.parameters.parameters.min_num_of_blocks = 2; - genesis.parameters.parameters.max_expected_time_per_block = 1; - genesis.parameters.parameters.epochs_per_year = 31_536_000; - genesis.parameters.pos_params.pipeline_len = pipeline_len; - genesis.parameters.pos_params.unbonding_len = unbonding_len; - setup::set_validators(1, genesis, base_dir, default_port_offset) - }, - None, + let loc = format!("{}:{}", std::file!(), std::line!()); + let validator_1_base_dir = test.get_base_dir(Who::NonValidator); + let mut validator_1 = setup::run_cmd( + Bin::Node, + ["ledger"], + Some(60), + &test.working_dir, + validator_1_base_dir, + loc, )?; - // 1. Run the ledger node - let _bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - let validator_alias = "validator-0"; + validator_1.exp_string(LEDGER_STARTED)?; + validator_1.exp_string(VALIDATOR_NODE)?; + validator_1.exp_string("Committed block hash")?; + let _bg_validator_1 = validator_1.background(); - // 2. Submit a delegation to the genesis validator + // 3. Submit a delegation to the new validator First, transfer some tokens + // to the validator's key for fees: let tx_args = vec![ - "bond", - "--validator", - validator_alias, + "transfer", + "--source", + BERTHA, + "--target", + new_validator, + "--token", + NAM, "--amount", - "100", - "--ledger-address", - &validator_one_rpc, + "10000.5", + "--signing-keys", + BERTHA_KEY, + "--node", + &non_validator_rpc, ]; - let mut client = - run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); - - // 3. Submit a delegation to the genesis validator + // Then self-bond the tokens: + let delegation = 5_u64; + let delegation_str = &delegation.to_string(); let tx_args = vec![ "bond", "--validator", - "validator-0", + new_validator, "--source", BERTHA, "--amount", - "200", + delegation_str, "--signing-keys", BERTHA_KEY, - "--ledger-address", - &validator_one_rpc, + "--node", + &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); - // 3. Wait for epoch 4 - let start = Instant::now(); - let loop_timeout = Duration::new(20, 0); - loop { - if Instant::now().duration_since(start) > loop_timeout { - panic!("Timed out waiting for epoch: {}", 1); - } - let epoch = epoch_sleep(&test, &validator_one_rpc, 40)?; - if epoch >= Epoch(4) { - break; - } - } - - // 4. Submit another delegation to the genesis validator + // 4. Transfer some NAM to the new validator + let validator_stake_str = &validator_stake.to_string_native(); let tx_args = vec![ - "bond", - "--validator", - validator_alias, + "transfer", "--source", BERTHA, + "--target", + new_validator, + "--token", + NAM, "--amount", - "300", + validator_stake_str, "--signing-keys", BERTHA_KEY, - "--ledger-address", - &validator_one_rpc, + "--node", + &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); - // 5. Submit an unbond of the delegation + // 5. Submit a self-bond for the new validator let tx_args = vec![ - "unbond", + "bond", "--validator", - validator_alias, - "--source", - BERTHA, + new_validator, "--amount", - "412", - "--signing-keys", - BERTHA_KEY, - "--ledger-address", - &validator_one_rpc, + validator_stake_str, + "--node", + &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; client.exp_string(TX_APPLIED_SUCCESS)?; - let (_, res) = client - .exp_regex(r"withdrawable starting from epoch [0-9]+") - .unwrap(); - let withdraw_epoch = - Epoch::from_str(res.split(' ').last().unwrap()).unwrap(); client.assert_success(); - // 6. Wait for withdraw_epoch + // 6. Wait for the pipeline epoch when the validator's bonded stake should + // be non-zero + let epoch = get_epoch(&test, &non_validator_rpc)?; + let earliest_update_epoch = epoch + pipeline_len; + println!( + "Current epoch: {}, earliest epoch with updated bonded stake: {}", + epoch, earliest_update_epoch + ); + #[allow(clippy::disallowed_methods)] + let start = Instant::now(); + let loop_timeout = Duration::new(20, 0); loop { - let epoch = epoch_sleep(&test, &validator_one_rpc, 120)?; - // NOTE: test passes from epoch ~13 onwards - if epoch >= withdraw_epoch { - break; - } - } - - // 7. Check the output of the bonds query - let tx_args = vec!["bonds", "--ledger-address", &validator_one_rpc]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string( - "All bonds total active: 100188.000000\r -All bonds total: 100188.000000\r -All bonds total slashed: 0.000000\r -All unbonds total active: 412.000000\r -All unbonds total: 412.000000\r -All unbonds total withdrawable: 412.000000\r -All unbonds total slashed: 0.000000\r", - )?; - client.assert_success(); - - Ok(()) -} - -/// PoS validator creation test. In this test we: -/// -/// 1. Run the ledger node with shorter epochs for faster progression -/// 2. Initialize a new validator account -/// 3. Submit a delegation to the new validator -/// 4. Transfer some NAM to the new validator -/// 5. Submit a self-bond for the new validator -/// 6. Wait for the pipeline epoch -/// 7. Check the new validator's bonded stake -#[test] -fn pos_init_validator() -> Result<()> { - let pipeline_len = 1; - let validator_stake = token::Amount::native_whole(100000_u64); - let test = setup::network( - |mut genesis, base_dir: &_| { - genesis.parameters.parameters.min_num_of_blocks = 4; - genesis.parameters.parameters.epochs_per_year = 31_536_000; - genesis.parameters.parameters.max_expected_time_per_block = 1; - genesis.parameters.pos_params.pipeline_len = pipeline_len; - genesis.parameters.pos_params.unbonding_len = 2; - let genesis = setup::set_validators( - 1, - genesis, - base_dir, - default_port_offset, - ); - println!("{:?}", genesis.transactions.bond); - let stake = genesis - .transactions - .bond - .as_ref() - .unwrap() - .iter() - .map(|bond| { - bond.data - .amount - .increase_precision(NATIVE_MAX_DECIMAL_PLACES.into()) - .unwrap() - .amount() - }) - .sum::(); - assert_eq!( - stake, validator_stake, - "Assuming this stake, we give the same amount to the new \ - validator to have half of voting power", - ); - genesis - }, - None, - )?; - - // 1. Run a validator and non-validator ledger node - let mut validator_0 = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(60))?; - let mut non_validator = - start_namada_ledger_node_wait_wasm(&test, None, Some(60))?; - - // Wait for a first block - validator_0.exp_string("Committed block hash")?; - let _bg_validator_0 = validator_0.background(); - non_validator.exp_string("Committed block hash")?; - let bg_non_validator = non_validator.background(); - - let non_validator_rpc = get_actor_rpc(&test, Who::NonValidator); - - // 2. Initialize a new validator account with the non-validator node - let new_validator = "new-validator"; - let _new_validator_key = format!("{}-key", new_validator); - let tx_args = vec![ - "init-validator", - "--alias", - new_validator, - "--account-keys", - "bertha-key", - "--commission-rate", - "0.05", - "--max-commission-rate-change", - "0.01", - "--email", - "null@null.net", - "--signing-keys", - "bertha-key", - "--node", - &non_validator_rpc, - "--unsafe-dont-encrypt", - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 3. Submit a delegation to the new validator First, transfer some tokens - // to the validator's key for fees: - let tx_args = vec![ - "transfer", - "--source", - BERTHA, - "--target", - new_validator, - "--token", - NAM, - "--amount", - "10000.5", - "--signing-keys", - BERTHA_KEY, - "--node", - &non_validator_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - // Then self-bond the tokens: - let delegation = 5_u64; - let delegation_str = &delegation.to_string(); - let tx_args = vec![ - "bond", - "--validator", - new_validator, - "--source", - BERTHA, - "--amount", - delegation_str, - "--signing-keys", - BERTHA_KEY, - "--node", - &non_validator_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 4. Transfer some NAM to the new validator - let validator_stake_str = &validator_stake.to_string_native(); - let tx_args = vec![ - "transfer", - "--source", - BERTHA, - "--target", - new_validator, - "--token", - NAM, - "--amount", - validator_stake_str, - "--signing-keys", - BERTHA_KEY, - "--node", - &non_validator_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 5. Submit a self-bond for the new validator - let tx_args = vec![ - "bond", - "--validator", - new_validator, - "--amount", - validator_stake_str, - "--node", - &non_validator_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // Stop the non-validator node and run it as the new validator - let mut non_validator = bg_non_validator.foreground(); - non_validator.interrupt()?; - non_validator.exp_eof()?; - - // it takes a bit before the node is shutdown. We dont want flasky test. - if is_debug_mode() { - sleep(10); - } else { - sleep(5); - } - - let loc = format!("{}:{}", std::file!(), std::line!()); - let validator_1_base_dir = test.get_base_dir(Who::NonValidator); - let mut validator_1 = setup::run_cmd( - Bin::Node, - ["ledger"], - Some(60), - &test.working_dir, - validator_1_base_dir, - loc, - )?; - - validator_1.exp_string(LEDGER_STARTED)?; - validator_1.exp_string(VALIDATOR_NODE)?; - validator_1.exp_string("Committed block hash")?; - let _bg_validator_1 = validator_1.background(); - - // 6. Wait for the pipeline epoch when the validator's bonded stake should - // be non-zero - let epoch = get_epoch(&test, &non_validator_rpc)?; - let earliest_update_epoch = epoch + pipeline_len; - println!( - "Current epoch: {}, earliest epoch with updated bonded stake: {}", - epoch, earliest_update_epoch - ); - let start = Instant::now(); - let loop_timeout = Duration::new(20, 0); - loop { - if Instant::now().duration_since(start) > loop_timeout { - panic!("Timed out waiting for epoch: {}", earliest_update_epoch); - } - let epoch = epoch_sleep(&test, &non_validator_rpc, 40)?; - if epoch >= earliest_update_epoch { + if { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + .duration_since(start) + > loop_timeout + { + panic!("Timed out waiting for epoch: {}", earliest_update_epoch); + } + let epoch = epoch_sleep(&test, &non_validator_rpc, 40)?; + if epoch >= earliest_update_epoch { break; } } @@ -1813,843 +1118,25 @@ fn pos_init_validator() -> Result<()> { assert_eq!( bonded_stake, token::Amount::native_whole(delegation) + validator_stake - ); - - Ok(()) -} - -/// Test that multiple txs submitted in the same block all get the tx result. -/// -/// In this test we: -/// 1. Run the ledger node with 10s consensus timeout -/// 2. Spawn threads each submitting token transfer tx -#[test] -fn ledger_many_txs_in_a_block() -> Result<()> { - let test = Arc::new(setup::network( - |genesis, base_dir: &_| { - setup::set_validators(1, genesis, base_dir, |_| 0) - }, - // Set 10s consensus timeout to have more time to submit txs - Some("10s"), - )?); - - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - // 1. Run the ledger node - let bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - let validator_one_rpc = Arc::new(get_actor_rpc(&test, Who::Validator(0))); - - // A token transfer tx args - let tx_args = Arc::new(vec![ - "transfer", - "--source", - BERTHA, - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "1.01", - "--signing-keys", - BERTHA_KEY, - "--node", - ]); - - // 2. Spawn threads each submitting token transfer tx - // We collect to run the threads in parallel. - #[allow(clippy::needless_collect)] - let tasks: Vec> = (0..4) - .map(|_| { - let test = Arc::clone(&test); - let validator_one_rpc = Arc::clone(&validator_one_rpc); - let tx_args = Arc::clone(&tx_args); - std::thread::spawn(move || { - let mut args = (*tx_args).clone(); - args.push(&*validator_one_rpc); - let mut client = run!(*test, Bin::Client, args, Some(80))?; - client.exp_string(TX_ACCEPTED)?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - let res: Result<()> = Ok(()); - res - }) - }) - .collect(); - for task in tasks.into_iter() { - task.join().unwrap()?; - } - // Wait to commit a block - let mut ledger = bg_ledger.foreground(); - ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; - - Ok(()) -} - -/// In this test we: -/// 1. Run the ledger node -/// 2. Submit a valid proposal -/// 3. Query the proposal -/// 4. Query token balance (submitted funds) -/// 5. Query governance address balance -/// 6. Submit an invalid proposal -/// 7. Check invalid proposal was not accepted -/// 8. Query token balance (funds shall not be submitted) -/// 9. Send a yay vote from a validator -/// 10. Send a yay vote from a normal user -/// 11. Query the proposal and check the result -/// 12. Wait proposal grace and check proposal author funds -/// 13. Check governance address funds are 0 -#[test] -fn proposal_submission() -> Result<()> { - let test = setup::network( - |mut genesis, base_dir: &_| { - genesis.parameters.gov_params.max_proposal_code_size = 600000; - genesis.parameters.parameters.max_expected_time_per_block = 1; - setup::set_validators(1, genesis, base_dir, |_| 0u16) - }, - None, - )?; - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - let namadac_help = vec!["--help"]; - - let mut client = run!(test, Bin::Client, namadac_help, Some(40))?; - client.exp_string("Namada client command line interface.")?; - client.assert_success(); - - // 1. Run the ledger node - let bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); - - // 1.1 Delegate some token - let tx_args = vec![ - "bond", - "--validator", - "validator-0", - "--source", - BERTHA, - "--amount", - "900", - "--node", - &validator_0_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 2. Submit valid proposal - let albert = find_address(&test, ALBERT)?; - let valid_proposal_json_path = prepare_proposal_data( - &test, - 0, - albert, - TestWasms::TxProposalCode.read_bytes(), - 12, - ); - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let submit_proposal_args = vec![ - "init-proposal", - "--data-path", - valid_proposal_json_path.to_str().unwrap(), - "--gas-limit", - "2000000", - "--node", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // Wait for the proposal to be committed - let mut ledger = bg_ledger.foreground(); - ledger.exp_string("Committed block hash")?; - let _bg_ledger = ledger.background(); - - // 3. Query the proposal - let proposal_query_args = vec![ - "query-proposal", - "--proposal-id", - "0", - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, proposal_query_args, Some(40))?; - client.exp_string("Proposal Id: 0")?; - client.assert_success(); - - // 4. Query token balance proposal author (submitted funds) - let query_balance_args = vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("nam: 1999500")?; - client.assert_success(); - - // 5. Query token balance governance - let query_balance_args = vec![ - "balance", - "--owner", - GOVERNANCE_ADDRESS, - "--token", - NAM, - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("nam: 500")?; - client.assert_success(); - - // 6. Submit an invalid proposal - // proposal is invalid due to voting_end_epoch - voting_start_epoch < 3 - let albert = find_address(&test, ALBERT)?; - let invalid_proposal_json = prepare_proposal_data( - &test, - 1, - albert, - TestWasms::TxProposalCode.read_bytes(), - 1, - ); - - let submit_proposal_args = vec![ - "init-proposal", - "--data-path", - invalid_proposal_json.to_str().unwrap(), - "--node", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_regex( - "Proposal data are invalid: Invalid proposal start epoch: 1 must be \ - greater than current epoch .* and a multiple of 3", - )?; - client.assert_failure(); - - // 7. Check invalid proposal was not submitted - let proposal_query_args = vec![ - "query-proposal", - "--proposal-id", - "1", - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, proposal_query_args, Some(40))?; - client.exp_string("No proposal found with id: 1")?; - client.assert_success(); - - // 8. Query token balance (funds shall not be submitted) - let query_balance_args = vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("nam: 1999500")?; - client.assert_success(); - - // 9. Send a yay vote from a validator - let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - while epoch.0 <= 13 { - sleep(10); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let submit_proposal_vote = vec![ - "vote-proposal", - "--proposal-id", - "0", - "--vote", - "yay", - "--address", - "validator-0", - "--node", - &validator_one_rpc, - ]; - - let mut client = run_as!( - test, - Who::Validator(0), - Bin::Client, - submit_proposal_vote, - Some(15) - )?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - let submit_proposal_vote_delagator = vec![ - "vote-proposal", - "--proposal-id", - "0", - "--vote", - "nay", - "--address", - BERTHA, - "--node", - &validator_one_rpc, - ]; - - let mut client = - run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 10. Send a yay vote from a non-validator/non-delegator user - let submit_proposal_vote = vec![ - "vote-proposal", - "--proposal-id", - "0", - "--vote", - "yay", - "--address", - ALBERT, - "--node", - &validator_one_rpc, - ]; - - // this is valid because the client filter ALBERT delegation and there are - // none - let mut client = run!(test, Bin::Client, submit_proposal_vote, Some(15))?; - client.exp_string("Voter address must have delegations")?; - client.assert_failure(); - - // 11. Query the proposal and check the result - let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - while epoch.0 <= 25 { - sleep(10); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let query_proposal = vec![ - "query-proposal-result", - "--proposal-id", - "0", - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, query_proposal, Some(15))?; - client.exp_string("Proposal Id: 0")?; - client.exp_string( - "passed with 100000.000000 yay votes, 900.000000 nay votes and \ - 0.000000 abstain votes, total voting power: 100900.000000, threshold \ - (fraction) of total voting power needed to tally: 67266.666667 \ - (0.666666666669)", - )?; - client.assert_success(); - - // 12. Wait proposal grace and check proposal author funds - let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - while epoch.0 < 31 { - sleep(10); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let query_balance_args = vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("nam: 200000")?; - client.assert_success(); - - // 13. Check if governance funds are 0 - let query_balance_args = vec![ - "balance", - "--owner", - GOVERNANCE_ADDRESS, - "--token", - NAM, - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("nam: 0")?; - client.assert_success(); - - // // 14. Query parameters - let query_protocol_parameters = - vec!["query-protocol-parameters", "--node", &validator_one_rpc]; - - let mut client = - run!(test, Bin::Client, query_protocol_parameters, Some(30))?; - client.exp_regex(".*Min. proposal grace epochs: 9.*")?; - client.assert_success(); - - Ok(()) -} - -/// Test submission and vote of a PGF proposal -/// -/// 1 - Submit two proposals -/// 2 - Check balance -/// 3 - Vote for the accepted proposals -/// 4 - Check one proposal passed and the other one didn't -/// 5 - Check funds -#[test] -fn pgf_governance_proposal() -> Result<()> { - let test = setup::network( - |mut genesis, base_dir: &_| { - genesis.parameters.parameters.epochs_per_year = - epochs_per_year_from_min_duration(1); - genesis.parameters.parameters.max_proposal_bytes = - Default::default(); - genesis.parameters.parameters.min_num_of_blocks = 4; - genesis.parameters.parameters.max_expected_time_per_block = 1; - setup::set_validators(1, genesis, base_dir, |_| 0) - }, - None, - )?; - - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - let namadac_help = vec!["--help"]; - - let mut client = run!(test, Bin::Client, namadac_help, Some(40))?; - client.exp_string("Namada client command line interface.")?; - client.assert_success(); - - // Run the ledger node - let _bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - // Delegate some token - let tx_args = vec![ - "bond", - "--validator", - "validator-0", - "--source", - BERTHA, - "--amount", - "900", - "--ledger-address", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 1 - Submit proposal - let albert = find_address(&test, ALBERT)?; - let pgf_stewards = StewardsUpdate { - add: Some(albert.clone()), - remove: vec![], - }; - - let valid_proposal_json_path = - prepare_proposal_data(&test, 0, albert, pgf_stewards, 12); - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let submit_proposal_args = vec![ - "init-proposal", - "--pgf-stewards", - "--data-path", - valid_proposal_json_path.to_str().unwrap(), - "--ledger-address", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 2 - Query the proposal - let proposal_query_args = vec![ - "query-proposal", - "--proposal-id", - "0", - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, proposal_query_args, Some(40))?; - client.exp_string("Proposal Id: 0")?; - client.assert_success(); - - // Query token balance proposal author (submitted funds) - let query_balance_args = vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("nam: 1999500")?; - client.assert_success(); - - // Query token balance governance - let query_balance_args = vec![ - "balance", - "--owner", - GOVERNANCE_ADDRESS, - "--token", - NAM, - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("nam: 500")?; - client.assert_success(); - - // 3 - Send a yay vote from a validator - let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - while epoch.0 <= 13 { - sleep(1); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let albert_address = find_address(&test, ALBERT)?; - let submit_proposal_vote = vec![ - "vote-proposal", - "--proposal-id", - "0", - "--vote", - "yay", - "--address", - "validator-0", - "--ledger-address", - &validator_one_rpc, - ]; - - client = run_as!( - test, - Who::Validator(0), - Bin::Client, - submit_proposal_vote, - Some(15) - )?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // Send different yay vote from delegator to check majority on 1/3 - let submit_proposal_vote_delagator = vec![ - "vote-proposal", - "--proposal-id", - "0", - "--vote", - "yay", - "--address", - BERTHA, - "--ledger-address", - &validator_one_rpc, - ]; - - let mut client = - run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 4 - Query the proposal and check the result is the one voted by the - // validator (majority) - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - while epoch.0 <= 25 { - sleep(1); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let query_proposal = vec![ - "query-proposal-result", - "--proposal-id", - "0", - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, query_proposal, Some(15))?; - client.exp_string("passed")?; - client.assert_success(); - - // 12. Wait proposals grace and check proposal author funds - while epoch.0 < 31 { - sleep(2); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let query_balance_args = vec![ - "balance", - "--owner", - ALBERT, - "--token", - NAM, - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("nam: 2000000.")?; - client.assert_success(); - - // Check if governance funds are 0 - let query_balance_args = vec![ - "balance", - "--owner", - GOVERNANCE_ADDRESS, - "--token", - NAM, - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("nam: 0")?; - client.assert_success(); - - // 14. Query pgf stewards - let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; - - let mut client = run!(test, Bin::Client, query_pgf, Some(30))?; - client.exp_string("Pgf stewards:")?; - client.exp_string(&format!("- {}", albert_address))?; - client.exp_string("Reward distribution:")?; - client.exp_string(&format!("- 1 to {}", albert_address))?; - client.exp_string("Pgf fundings: no fundings are currently set.")?; - client.assert_success(); - - // 15 - Submit proposal funding - let albert = find_address(&test, ALBERT)?; - let bertha = find_address(&test, BERTHA)?; - let christel = find_address(&test, CHRISTEL)?; - - let pgf_funding = PgfFunding { - continuous: vec![PGFTarget::Internal(PGFInternalTarget { - amount: token::Amount::from_u64(10), - target: bertha.clone(), - })], - retro: vec![PGFTarget::Internal(PGFInternalTarget { - amount: token::Amount::from_u64(5), - target: christel, - })], - }; - - let valid_proposal_json_path = - prepare_proposal_data(&test, 1, albert, pgf_funding, 36); - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let submit_proposal_args = vec![ - "init-proposal", - "--pgf-funding", - "--data-path", - valid_proposal_json_path.to_str().unwrap(), - "--ledger-address", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 2 - Query the funding proposal - let proposal_query_args = vec![ - "query-proposal", - "--proposal-id", - "1", - "--ledger-address", - &validator_one_rpc, - ]; - - client = run!(test, Bin::Client, proposal_query_args, Some(40))?; - client.exp_string("Proposal Id: 1")?; - client.assert_success(); - - // 13. Wait proposals grace and check proposal author funds - while epoch.0 < 55 { - sleep(2); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - // 14. Query pgf fundings - let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; - let mut client = run!(test, Bin::Client, query_pgf, Some(30))?; - client.exp_string("Pgf fundings")?; - client.exp_string(&format!( - "{} for {}", - bertha, - token::Amount::from_u64(10).to_string_native() - ))?; - client.assert_success(); - - Ok(()) -} - -/// Test if a steward can correctly change his distribution reward -#[test] -fn pgf_steward_change_commissions() -> Result<()> { - let test = setup::network( - |mut genesis, base_dir: &_| { - genesis.parameters.parameters.epochs_per_year = - epochs_per_year_from_min_duration(1); - genesis.parameters.parameters.max_proposal_bytes = - Default::default(); - genesis.parameters.parameters.min_num_of_blocks = 4; - genesis.parameters.parameters.max_expected_time_per_block = 1; - genesis.parameters.pgf_params.stewards_inflation_rate = - Dec::from_str("0.1").unwrap(); - genesis.parameters.pgf_params.stewards = - BTreeSet::from_iter([get_established_addr_from_pregenesis( - "albert-key", - base_dir, - &genesis, - ) - .unwrap()]); - setup::set_validators(1, genesis, base_dir, |_| 0) - }, - None, - )?; - - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - let namadac_help = vec!["--help"]; - - let mut client = run!(test, Bin::Client, namadac_help, Some(40))?; - client.exp_string("Namada client command line interface.")?; - client.assert_success(); - - // Run the ledger node - let _bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let albert = find_address(&test, ALBERT)?; - let bertha = find_address(&test, BERTHA)?; - let christel = find_address(&test, CHRISTEL)?; - - // Query pgf stewards - let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; - - let mut client = run!(test, Bin::Client, query_pgf, Some(30))?; - client.exp_string("Pgf stewards:")?; - client.exp_string(&format!("- {}", albert))?; - client.exp_string("Reward distribution:")?; - client.exp_string(&format!("- 1 to {}", albert))?; - client.exp_string("Pgf fundings: no fundings are currently set.")?; - client.assert_success(); - - let commission = Commission { - reward_distribution: HashMap::from_iter([ - (albert.clone(), Dec::from_str("0.25").unwrap()), - (bertha.clone(), Dec::from_str("0.70").unwrap()), - (christel.clone(), Dec::from_str("0.05").unwrap()), - ]), - }; - - let commission_path = - prepare_steward_commission_update_data(&test, commission); - - // Update steward commissions - let tx_args = vec![ - "update-steward-rewards", - "--steward", - ALBERT, - "--data-path", - commission_path.to_str().unwrap(), - "--node", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 14. Query pgf stewards - let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; - - let mut client = run!(test, Bin::Client, query_pgf, Some(30))?; - client.exp_string("Pgf stewards:")?; - client.exp_string(&format!("- {}", albert))?; - client.exp_string("Reward distribution:")?; - client.exp_string(&format!("- 0.25 to {}", albert))?; - client.exp_string(&format!("- 0.7 to {}", bertha))?; - client.exp_string(&format!("- 0.05 to {}", christel))?; - client.exp_string("Pgf fundings: no fundings are currently set.")?; - client.assert_success(); + ); Ok(()) } +/// Test that multiple txs submitted in the same block all get the tx result. +/// /// In this test we: -/// 1. Run the ledger node -/// 2. Create an offline proposal -/// 3. Create an offline vote -/// 4. Tally offline +/// 1. Run the ledger node with 10s consensus timeout +/// 2. Spawn threads each submitting token transfer tx #[test] -fn proposal_offline() -> Result<()> { - let working_dir = setup::working_dir(); - let test = setup::network( - |mut genesis, base_dir: &_| { - genesis.parameters.parameters.epochs_per_year = - epochs_per_year_from_min_duration(1); - genesis.parameters.parameters.max_proposal_bytes = - Default::default(); - genesis.parameters.parameters.min_num_of_blocks = 4; - genesis.parameters.parameters.max_expected_time_per_block = 1; - genesis.parameters.parameters.vp_allowlist = - Some(get_all_wasms_hashes(&working_dir, Some("vp_"))); - // Enable tx allowlist to test the execution of a - // non-allowed tx by governance - genesis.parameters.parameters.tx_allowlist = - Some(get_all_wasms_hashes(&working_dir, Some("tx_"))); +fn ledger_many_txs_in_a_block() -> Result<()> { + let test = Arc::new(setup::network( + |genesis, base_dir: &_| { setup::set_validators(1, genesis, base_dir, |_| 0) }, - None, - )?; + // Set 10s consensus timeout to have more time to submit txs + Some("10s"), + )?); set_ethereum_bridge_mode( &test, @@ -2660,120 +1147,53 @@ fn proposal_offline() -> Result<()> { ); // 1. Run the ledger node - let _bg_ledger = + let bg_ledger = start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); + let validator_one_rpc = Arc::new(get_actor_rpc(&test, Who::Validator(0))); - // 1.1 Delegate some token - let tx_args = vec![ - "bond", - "--validator", - "validator-0", + // A token transfer tx args + let tx_args = Arc::new(vec![ + "transfer", "--source", + BERTHA, + "--target", ALBERT, + "--token", + NAM, "--amount", - "900", - "--node", - &validator_one_rpc, - ]; - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 2. Create an offline proposal - let albert = find_address(&test, ALBERT)?; - let valid_proposal_json = json!( - { - "content": { - "title": "TheTitle", - "authors": "test@test.com", - "discussions-to": "www.github.com/anoma/aip/1", - "created": "2022-03-10T08:54:37Z", - "license": "MIT", - "abstract": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "motivation": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "details": "Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "requires": "2" - }, - "author": albert, - "tally_epoch": 3_u64, - } - ); - let valid_proposal_json_path = - test.test_dir.path().join("valid_proposal.json"); - write_json_file(valid_proposal_json_path.as_path(), valid_proposal_json); - - let mut epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - while epoch.0 <= 3 { - sleep(1); - epoch = get_epoch(&test, &validator_one_rpc).unwrap(); - } - - let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); - - let offline_proposal_args = vec![ - "init-proposal", - "--data-path", - valid_proposal_json_path.to_str().unwrap(), - "--offline", - "--signing-keys", - ALBERT_KEY, - "--output-folder-path", - test.test_dir.path().to_str().unwrap(), - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, offline_proposal_args, Some(15))?; - let (_, matched) = client.exp_regex("Proposal serialized to: .*")?; - client.assert_success(); - - let proposal_path = matched - .split(':') - .collect::>() - .get(1) - .unwrap() - .trim() - .to_string(); - - // 3. Generate an offline yay vote - let submit_proposal_vote = vec![ - "vote-proposal", - "--data-path", - &proposal_path, - "--vote", - "yay", - "--address", - ALBERT, - "--offline", + "1.01", "--signing-keys", - ALBERT_KEY, - "--output-folder-path", - test.test_dir.path().to_str().unwrap(), - "--node", - &validator_one_rpc, - ]; - - let mut client = run!(test, Bin::Client, submit_proposal_vote, Some(15))?; - client.exp_string("Proposal vote serialized to: ")?; - client.assert_success(); - - // 4. Compute offline tally - let tally_offline = vec![ - "query-proposal-result", - "--data-path", - test.test_dir.path().to_str().unwrap(), - "--offline", + BERTHA_KEY, "--node", - &validator_one_rpc, - ]; + ]); - let mut client = run!(test, Bin::Client, tally_offline, Some(15))?; - client.exp_string("Parsed 1 votes")?; - client.exp_string("rejected with 900.000000 yay votes")?; - client.assert_success(); + // 2. Spawn threads each submitting token transfer tx + // We collect to run the threads in parallel. + #[allow(clippy::needless_collect)] + let tasks: Vec> = (0..4) + .map(|_| { + let test = Arc::clone(&test); + let validator_one_rpc = Arc::clone(&validator_one_rpc); + let tx_args = Arc::clone(&tx_args); + std::thread::spawn(move || { + let mut args = (*tx_args).clone(); + args.push(&*validator_one_rpc); + let mut client = run!(*test, Bin::Client, args, Some(80))?; + client.exp_string(TX_APPLIED_SUCCESS)?; + client.assert_success(); + let res: Result<()> = Ok(()); + res + }) + }) + .collect(); + for task in tasks.into_iter() { + task.join().unwrap()?; + } + // Wait to commit a block + let mut ledger = bg_ledger.foreground(); + ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; Ok(()) } @@ -3102,150 +1522,6 @@ fn double_signing_gets_slashed() -> Result<()> { Ok(()) } -/// In this test we: -/// 1. Run the ledger node -/// 2. For some transactions that need signature authorization: 2a. Generate a -/// new key for an implicit account. 2b. Send some funds to the implicit -/// account. 2c. Submit the tx with the implicit account as the source, that -/// requires that the account has revealed its PK. This should be done by the -/// client automatically. 2d. Submit same tx again, this time the client -/// shouldn't reveal again. -#[test] -fn implicit_account_reveal_pk() -> Result<()> { - let test = setup::single_node_net()?; - - // 1. Run the ledger node - let _bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - // 2. Some transactions that need signature authorization: - let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); - let txs_args: Vec Vec>> = vec![ - // A token transfer tx - Box::new(|source| { - [ - "transfer", - "--source", - source, - "--target", - ALBERT, - "--token", - NAM, - "--amount", - "10.1", - "--signing-keys", - source, - "--node", - &validator_0_rpc, - ] - .into_iter() - .map(|x| x.to_owned()) - .collect() - }), - // A bond - Box::new(|source| { - vec![ - "bond", - "--validator", - "validator-0", - "--source", - source, - "--amount", - "10.1", - "--signing-keys", - source, - "--node", - &validator_0_rpc, - ] - .into_iter() - .map(|x| x.to_owned()) - .collect() - }), - // Submit proposal - Box::new(|source| { - // Gen data for proposal tx - let author = find_address(&test, source).unwrap(); - let valid_proposal_json_path = prepare_proposal_data( - &test, - 0, - author, - TestWasms::TxProposalCode.read_bytes(), - 12, - ); - vec![ - "init-proposal", - "--data-path", - valid_proposal_json_path.to_str().unwrap(), - "--signing-keys", - source, - "--gas-limit", - "2000000", - "--node", - &validator_0_rpc, - ] - .into_iter() - .map(|x| x.to_owned()) - .collect() - }), - ]; - - for (ix, tx_args) in txs_args.into_iter().enumerate() { - let key_alias = format!("key-{ix}"); - - // 2a. Generate a new key for an implicit account. - let mut cmd = run!( - test, - Bin::Wallet, - &[ - "gen", - "--alias", - &key_alias, - "--unsafe-dont-encrypt", - "--raw" - ], - Some(20), - )?; - cmd.assert_success(); - - // Apply the key_alias once the key is generated to obtain tx args - let tx_args = tx_args(&key_alias); - - // 2b. Send some funds to the implicit account. - let credit_args = [ - "transfer", - "--source", - BERTHA, - "--target", - &key_alias, - "--token", - NAM, - "--amount", - "1000", - "--signing-keys", - BERTHA_KEY, - "--node", - &validator_0_rpc, - ]; - let mut client = run!(test, Bin::Client, credit_args, Some(40))?; - client.assert_success(); - - // 2c. Submit the tx with the implicit account as the source. - let expected_reveal = "Submitting a tx to reveal the public key"; - let mut client = run!(test, Bin::Client, &tx_args, Some(40))?; - client.exp_string(expected_reveal)?; - client.assert_success(); - - // 2d. Submit same tx again, this time the client shouldn't reveal - // again. - let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - let unread = client.exp_eof()?; - assert!(!unread.contains(expected_reveal)) - } - - Ok(()) -} - #[test] fn test_epoch_sleep() -> Result<()> { // Use slightly longer epochs to give us time to sleep @@ -3290,7 +1566,7 @@ fn test_epoch_sleep() -> Result<()> { /// Prepare proposal data in the test's temp dir from the given source address. /// This can be submitted with "init-proposal" command. pub fn prepare_proposal_data( - test: &setup::Test, + test_dir: impl AsRef, id: u64, source: Address, data: impl serde::Serialize, @@ -3313,29 +1589,17 @@ pub fn prepare_proposal_data( "author": source, "voting_start_epoch": start_epoch, "voting_end_epoch": start_epoch + 12_u64, - "grace_epoch": start_epoch + 12u64 + 6_u64, + "activation_epoch": start_epoch + 12u64 + 6_u64, }, "data": data }); let valid_proposal_json_path = - test.test_dir.path().join("valid_proposal.json"); + test_dir.as_ref().join("valid_proposal.json"); write_json_file(valid_proposal_json_path.as_path(), valid_proposal_json); valid_proposal_json_path } -/// Prepare steward commission reward in the test temp directory. -/// This can be submitted with "update-steward-commission" command. -pub fn prepare_steward_commission_update_data( - test: &setup::Test, - data: impl serde::Serialize, -) -> PathBuf { - let valid_commission_json_path = - test.test_dir.path().join("commission.json"); - write_json_file(valid_commission_json_path.as_path(), &data); - valid_commission_json_path -} - #[test] fn deactivate_and_reactivate_validator() -> Result<()> { let pipeline_len = 2; @@ -3376,6 +1640,13 @@ fn deactivate_and_reactivate_validator() -> Result<()> { ethereum_bridge::ledger::Mode::Off, None, ); + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + Who::Validator(1), + ethereum_bridge::ledger::Mode::Off, + None, + ); // 1. Run the ledger node let _bg_validator_0 = @@ -3416,10 +1687,17 @@ fn deactivate_and_reactivate_validator() -> Result<()> { client.assert_success(); let deactivate_epoch = get_epoch(&test, &validator_1_rpc)?; + #[allow(clippy::disallowed_methods)] let start = Instant::now(); let loop_timeout = Duration::new(120, 0); loop { - if Instant::now().duration_since(start) > loop_timeout { + if { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + .duration_since(start) + > loop_timeout + { panic!( "Timed out waiting for epoch: {}", deactivate_epoch + pipeline_len @@ -3459,10 +1737,17 @@ fn deactivate_and_reactivate_validator() -> Result<()> { client.assert_success(); let reactivate_epoch = get_epoch(&test, &validator_1_rpc)?; + #[allow(clippy::disallowed_methods)] let start = Instant::now(); let loop_timeout = Duration::new(120, 0); loop { - if Instant::now().duration_since(start) > loop_timeout { + if { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + .duration_since(start) + > loop_timeout + { panic!( "Timed out waiting for epoch: {}", reactivate_epoch + pipeline_len @@ -3489,117 +1774,6 @@ fn deactivate_and_reactivate_validator() -> Result<()> { Ok(()) } -/// Change validator metadata -#[test] -fn change_validator_metadata() -> Result<()> { - let test = setup::single_node_net()?; - - set_ethereum_bridge_mode( - &test, - &test.net.chain_id, - Who::Validator(0), - ethereum_bridge::ledger::Mode::Off, - None, - ); - - // 1. Run the ledger node - let _bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); - - let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); - - // 2. Query the validator metadata loaded from genesis - let metadata_query_args = vec![ - "validator-metadata", - "--validator", - "validator-0", - "--node", - &validator_0_rpc, - ]; - let mut client = - run!(test, Bin::Client, metadata_query_args.clone(), Some(40))?; - client.exp_string("Email:")?; - client.exp_string("No description")?; - client.exp_string("No website")?; - client.exp_string("No discord handle")?; - client.exp_string("commission rate:")?; - client.exp_string("max change per epoch:")?; - client.assert_success(); - - // 3. Add some metadata to the validator - let metadata_change_args = vec![ - "change-metadata", - "--validator", - "validator-0", - "--email", - "theokayestvalidator@namada.net", - "--description", - "We are just an okay validator node trying to get by", - "--website", - "theokayestvalidator.com", - "--node", - &validator_0_rpc, - ]; - let mut client = run_as!( - test, - Who::Validator(0), - Bin::Client, - metadata_change_args, - Some(40) - )?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 4. Query the metadata after the change - let mut client = - run!(test, Bin::Client, metadata_query_args.clone(), Some(40))?; - client.exp_string("Email: theokayestvalidator@namada.net")?; - client.exp_string( - "Description: We are just an okay validator node trying to get by", - )?; - client.exp_string("Website: theokayestvalidator.com")?; - client.exp_string("No discord handle")?; - client.exp_string("commission rate:")?; - client.exp_string("max change per epoch:")?; - client.assert_success(); - - // 5. Remove the validator website - let metadata_change_args = vec![ - "change-metadata", - "--validator", - "validator-0", - "--website", - "", - "--node", - &validator_0_rpc, - ]; - let mut client = run_as!( - test, - Who::Validator(0), - Bin::Client, - metadata_change_args, - Some(40) - )?; - client.exp_string(TX_APPLIED_SUCCESS)?; - client.assert_success(); - - // 6. Query the metadata to see that the validator website is removed - let mut client = - run!(test, Bin::Client, metadata_query_args.clone(), Some(40))?; - client.exp_string("Email: theokayestvalidator@namada.net")?; - client.exp_string( - "Description: We are just an okay validator node trying to get by", - )?; - client.exp_string("No website")?; - client.exp_string("No discord handle")?; - client.exp_string("commission rate:")?; - client.exp_string("max change per epoch:")?; - client.assert_success(); - - Ok(()) -} - #[test] fn test_invalid_validator_txs() -> Result<()> { let pipeline_len = 2; @@ -3717,10 +1891,17 @@ fn test_invalid_validator_txs() -> Result<()> { client.assert_success(); let deactivate_epoch = get_epoch(&test, &validator_1_rpc)?; + #[allow(clippy::disallowed_methods)] let start = Instant::now(); let loop_timeout = Duration::new(120, 0); loop { - if Instant::now().duration_since(start) > loop_timeout { + if { + #[allow(clippy::disallowed_methods)] + Instant::now() + } + .duration_since(start) + > loop_timeout + { panic!( "Timed out waiting for epoch: {}", deactivate_epoch + pipeline_len @@ -3888,6 +2069,7 @@ fn change_consensus_key() -> Result<()> { Ok(()) } + #[test] fn proposal_change_shielded_reward() -> Result<()> { let test = setup::network( @@ -3907,9 +2089,10 @@ fn proposal_change_shielded_reward() -> Result<()> { ); // 1. Run the ledger node - let bg_ledger = - start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? - .background(); + let mut ledger = + start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))?; + ledger.exp_string("Committed block hash")?; + let bg_ledger = ledger.background(); let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); @@ -3932,7 +2115,7 @@ fn proposal_change_shielded_reward() -> Result<()> { // 2. Submit valid proposal let albert = find_address(&test, ALBERT)?; let valid_proposal_json_path = prepare_proposal_data( - &test, + test.test_dir.path(), 0, albert, TestWasms::TxProposalMaspRewards.read_bytes(), diff --git a/crates/tests/src/e2e/setup.rs b/crates/tests/src/e2e/setup.rs index 4d996052d2..267ea15d1d 100644 --- a/crates/tests/src/e2e/setup.rs +++ b/crates/tests/src/e2e/setup.rs @@ -1,4 +1,3 @@ -use std::collections::HashMap; use std::ffi::OsStr; use std::fmt::Display; use std::fs::{create_dir_all, File, OpenOptions}; @@ -28,6 +27,7 @@ use namada_apps::config::genesis::{templates, transactions, GenesisAddress}; use namada_apps::config::{ethereum_bridge, genesis, Config}; use namada_apps::{config, wallet}; use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::key::{RefTo, SchemeType}; use namada_core::string_encoding::StringEncoded; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; @@ -869,6 +869,7 @@ impl NamadaCmd { } /// Assert that the process exited with failure + #[allow(dead_code)] pub fn assert_failure(&mut self) { // Make sure that there is no unread output first let _ = self.exp_eof().unwrap(); @@ -1230,7 +1231,7 @@ where #[allow(dead_code)] pub mod constants { // Paths to the WASMs used for tests - pub use namada_sdk::tx::{TX_IBC_WASM, TX_TRANSFER_WASM, VP_USER_WASM}; + pub use namada_sdk::tx::TX_IBC_WASM; // User addresses aliases pub const ALBERT: &str = "Albert"; diff --git a/crates/tests/src/integration.rs b/crates/tests/src/integration.rs index 1a7c84dbfb..00423e8dc0 100644 --- a/crates/tests/src/integration.rs +++ b/crates/tests/src/integration.rs @@ -1,2 +1,4 @@ +mod helpers; +mod ledger_tests; mod masp; mod setup; diff --git a/crates/tests/src/integration/helpers.rs b/crates/tests/src/integration/helpers.rs new file mode 100644 index 0000000000..b8268858e7 --- /dev/null +++ b/crates/tests/src/integration/helpers.rs @@ -0,0 +1,68 @@ +use std::path::PathBuf; +use std::str::FromStr; + +use eyre::eyre; +use namada_apps::node::ledger::shell::testing::client::run; +use namada_apps::node::ledger::shell::testing::node::MockNode; +use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; +use namada_core::address::Address; + +/// Query the wallet to get an address from a given alias. +pub fn find_address( + node: &MockNode, + alias: impl AsRef, +) -> eyre::Result
{ + let captured = CapturedOutput::of(|| { + run( + node, + Bin::Wallet, + vec!["find", "--addr", "--alias", alias.as_ref()], + ) + }); + assert!(captured.result.is_ok()); + assert!(captured.contains("Found transparent address:")); + let matched = captured.matches("\".*\": .*").unwrap(); + let address_str = strip_trailing_newline(matched) + .trim() + .rsplit_once(' ') + .unwrap() + .1; + let address = Address::from_str(address_str).map_err(|e| { + eyre!(format!( + "Address: {} parsed from {}, Error: {}", + address_str, matched, e, + )) + })?; + println!("Found {}", address); + Ok(address) +} + +fn strip_trailing_newline(input: &str) -> &str { + input + .strip_suffix("\r\n") + .or_else(|| input.strip_suffix('\n')) + .unwrap_or(input) +} + +pub fn prepare_steward_commission_update_data( + test_dir: &std::path::Path, + data: impl serde::Serialize, +) -> PathBuf { + let valid_commission_json_path = test_dir.join("commission.json"); + write_json_file(valid_commission_json_path.as_path(), &data); + valid_commission_json_path +} + +fn write_json_file(proposal_path: &std::path::Path, proposal_content: T) +where + T: serde::Serialize, +{ + let intent_writer = std::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(proposal_path) + .unwrap(); + + serde_json::to_writer(intent_writer, &proposal_content).unwrap(); +} diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs new file mode 100644 index 0000000000..4b6e2c2b7b --- /dev/null +++ b/crates/tests/src/integration/ledger_tests.rs @@ -0,0 +1,1513 @@ +use std::collections::BTreeSet; +use std::str::FromStr; + +use assert_matches::assert_matches; +use borsh_ext::BorshSerializeExt; +use color_eyre::eyre::Result; +use data_encoding::HEXLOWER; +use namada::core::collections::HashMap; +use namada::token; +use namada_apps::node::ledger::shell::testing::client::run; +use namada_apps::node::ledger::shell::testing::utils::{Bin, CapturedOutput}; +use namada_apps::wallet::defaults; +use namada_core::dec::Dec; +use namada_core::storage::Epoch; +use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_sdk::tx::{TX_TRANSFER_WASM, VP_USER_WASM}; +use namada_test_utils::TestWasms; +use test_log::test; + +use crate::e2e::ledger_tests::prepare_proposal_data; +use crate::e2e::setup::constants::{ + ALBERT, ALBERT_KEY, BERTHA, BERTHA_KEY, CHRISTEL, CHRISTEL_KEY, DAEWON, + ESTER, GOVERNANCE_ADDRESS, NAM, +}; +use crate::integration::helpers::{ + find_address, prepare_steward_commission_update_data, +}; +use crate::integration::setup; +use crate::strings::{TX_APPLIED_SUCCESS, TX_FAILED, TX_REJECTED}; +use crate::tx::tx_host_env::gov_storage::proposal::{ + PGFInternalTarget, PGFTarget, +}; +use crate::tx::tx_host_env::governance::cli::onchain::{ + PgfFunding, StewardsUpdate, +}; +use crate::tx::tx_host_env::governance::pgf::cli::steward::Commission; + +/// In this test we: +/// 1. Run the ledger node +/// 2. Submit a token transfer tx +/// 3. Submit a transaction to update an account's validity predicate +/// 4. Submit a custom tx +/// 5. Submit a tx to initialize a new account +/// 6. Submit a tx to withdraw from faucet account (requires PoW challenge +/// solution) +/// 7. Query token balance +/// 8. Query the raw bytes of a storage key +#[test] +fn ledger_txs_and_queries() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + + let (node, _services) = setup::setup()?; + let transfer = token::Transfer { + source: defaults::bertha_address(), + target: defaults::albert_address(), + token: node.native_token(), + amount: token::DenominatedAmount::new( + token::Amount::native_whole(10), + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ), + key: None, + shielded: None, + } + .serialize_to_vec(); + let tx_data_path = node.test_dir.path().join("tx.data"); + std::fs::write(&tx_data_path, transfer).unwrap(); + let tx_data_path = tx_data_path.to_string_lossy(); + + let multisig_account = + format!("{},{},{}", BERTHA_KEY, ALBERT_KEY, CHRISTEL_KEY); + + let txs_args = vec![ + // 2. Submit a token transfer tx (from an established account) + vec![ + "transfer", + "--source", + BERTHA, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "10.1", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ], + // Submit a token transfer tx (from an ed25519 implicit account) + vec![ + "transfer", + "--source", + DAEWON, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "10.1", + "--signing-keys", + DAEWON, + "--node", + &validator_one_rpc, + ], + // Submit a token transfer tx (from a secp256k1 implicit account) + vec![ + "transfer", + "--source", + ESTER, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "10.1", + "--node", + &validator_one_rpc, + ], + // 3. Submit a transaction to update an account's validity + // predicate + vec![ + "update-account", + "--address", + BERTHA, + "--code-path", + VP_USER_WASM, + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ], + // 4. Submit a custom tx + vec![ + "tx", + "--code-path", + TX_TRANSFER_WASM, + "--data-path", + &tx_data_path, + "--owner", + BERTHA, + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ], + // 5. Submit a tx to initialize a new account + vec![ + "init-account", + "--public-keys", + // Value obtained from `namada::core::key::ed25519::tests::gen_keypair` + "tpknam1qpqfzxu3gt05jx2mvg82f4anf90psqerkwqhjey4zlqv0qfgwuvkzt5jhkp", + "--threshold", + "1", + "--code-path", + VP_USER_WASM, + "--alias", + "Test-Account", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ], + // 5. Submit a tx to initialize a new multisig account + vec![ + "init-account", + "--public-keys", + &multisig_account, + "--threshold", + "2", + "--code-path", + VP_USER_WASM, + "--alias", + "Test-Account-2", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ], + ]; + + for tx_args in &txs_args { + for &dry_run in &[true, false] { + let tx_args = if dry_run && tx_args[0] == "tx" { + continue; + } else if dry_run { + [tx_args.clone(), vec!["--dry-run"]].concat() + } else { + tx_args.clone() + }; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + } + } + + let query_args_and_expected_response = vec![ + // 7. Query token balance + ( + vec![ + "balance", + "--owner", + BERTHA, + "--token", + NAM, + "--node", + &validator_one_rpc, + ], + // expect a decimal + vec![r"nam: \d+(\.\d+)?"], + ), + // Unspecified token expect all tokens from wallet derived from genesis + ( + vec!["balance", "--owner", ALBERT, "--node", &validator_one_rpc], + // expect all genesis tokens, sorted by alias + vec![ + r"apfel: \d+(\.\d+)?", + r"btc: \d+(\.\d+)?", + r"dot: \d+(\.\d+)?", + r"eth: \d+(\.\d+)?", + r"kartoffel: \d+(\.\d+)?", + r"schnitzel: \d+(\.\d+)?", + ], + ), + ( + vec![ + "query-account", + "--owner", + "Test-Account-2", + "--node", + &validator_one_rpc, + ], + vec!["Threshold: 2"], + ), + ]; + + for (query_args, expected) in query_args_and_expected_response { + // Run as a non-validator + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_args)); + assert_matches!(captured.result, Ok(_)); + for pattern in expected { + assert!(captured.contains(pattern)); + } + } + + let christel = defaults::christel_address(); + let nam = node.native_token(); + // as setup in `genesis/e2e-tests-single-node.toml` + let christel_balance = token::Amount::native_whole(2000000); + let storage_key = + token::storage_key::balance_key(&nam, &christel).to_string(); + let query_args_and_expected_response = vec![ + // 8. Query storage key and get hex-encoded raw bytes + ( + vec![ + "query-bytes", + "--storage-key", + &storage_key, + "--node", + &validator_one_rpc, + ], + // expect hex encoded of borsh encoded bytes + HEXLOWER.encode(&christel_balance.serialize_to_vec()), + ), + ]; + for (query_args, expected) in query_args_and_expected_response { + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(&expected)); + } + + Ok(()) +} + +/// In this test we: +/// 1. Run the ledger node +/// 2. Submit an invalid transaction (disallowed by state machine) +/// 3. Check that the state was changed +/// 5. Submit and invalid transactions (malformed) +#[test] +fn invalid_transactions() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + + let (node, _services) = setup::setup()?; + + // 2. Submit an invalid transaction (trying to transfer tokens should fail + // in the user's VP due to the wrong signer) + let tx_args = vec![ + "transfer", + "--source", + BERTHA, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "1", + "--signing-keys", + ALBERT_KEY, + "--node", + &validator_one_rpc, + "--force", + ]; + + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_REJECTED)); + + node.finalize_and_commit(); + // There should be state now + { + let locked = node.shell.lock().unwrap(); + assert_ne!(locked.last_state().last_block_app_hash, Default::default()); + } + + let daewon_lower = DAEWON.to_lowercase(); + let tx_args = vec![ + "transfer", + "--source", + DAEWON, + "--signing-keys", + &daewon_lower, + "--target", + ALBERT, + "--token", + BERTHA, + "--amount", + "1000000.1", + // Force to ignore client check that fails on the balance check of the + // source address + "--force", + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert!(captured.contains(TX_FAILED)); + + Ok(()) +} + +/// Test for claiming PoS inflationary rewards +/// +/// 1. Run the ledger node +/// 2. Wait some epochs while inflationary rewards accumulate in the PoS system +/// 3. Submit a claim-rewards tx +/// 4. Query the validator's balance before and after the claim tx to ensure +/// that reward tokens were actually transferred +#[test] +fn pos_rewards() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + + let (mut node, _services) = setup::setup()?; + // Query the current rewards for the validator self-bond + let tx_args = vec![ + "rewards", + "--validator", + "validator-0", + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + let res = captured + .matches(r"Current rewards available for claim: [0-9\.]+ NAM") + .expect("Test failed"); + + let words = res.split(' ').collect::>(); + let res = words[words.len() - 2]; + let mut last_amount = token::Amount::from_str( + res.split(' ').last().unwrap(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap(); + + for _ in 0..4 { + node.next_epoch(); + // Query the current rewards for the validator self-bond and see that it + // grows + let tx_args = vec![ + "rewards", + "--validator", + "validator-0", + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + let res = captured + .matches(r"Current rewards available for claim: [0-9\.]+ NAM") + .expect("Test failed"); + + let words = res.split(' ').collect::>(); + let res = words[words.len() - 2]; + let amount = token::Amount::from_str( + res.split(' ').last().unwrap(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap(); + + assert!(amount > last_amount); + last_amount = amount; + } + + // Query the balance of the validator account + let query_balance_args = vec![ + "balance", + "--owner", + "validator-0", + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + let res = captured.matches(r"nam: [0-9\.]+").expect("Test failed"); + let amount_pre = token::Amount::from_str( + res.split(' ').last().unwrap(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap(); + + // Claim rewards + let tx_args = vec![ + "claim-rewards", + "--validator", + "validator-0", + "--signing-keys", + "validator-0-account-key", + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + println!("{:?}", captured.result); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // Query the validator balance again and check that the balance has grown + // after claiming + let query_balance_args = vec![ + "balance", + "--owner", + "validator-0", + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + let res = captured.matches(r"nam: [0-9\.]+").expect("Test failed"); + let amount_post = token::Amount::from_str( + res.split(' ').last().unwrap(), + NATIVE_MAX_DECIMAL_PLACES, + ) + .unwrap(); + assert!(amount_post > amount_pre); + + Ok(()) +} + +/// Test for PoS bonds and unbonds queries. +/// +/// 1. Run the ledger node +/// 2. Submit a delegation to the genesis validator +/// 3. Wait for epoch 4 +/// 4. Submit another delegation to the genesis validator +/// 5. Submit an unbond of the delegation +/// 6. Wait for epoch 7 +/// 7. Check the output of the bonds query +#[test] +fn test_bond_queries() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // 1. start the ledger node + let (mut node, _services) = setup::setup()?; + + let validator_alias = "validator-0"; + // 2. Submit a delegation to the genesis validator + let tx_args = vec![ + "bond", + "--validator", + validator_alias, + "--amount", + "100", + "--ledger-address", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 3. Submit a delegation to the genesis validator + let tx_args = vec![ + "bond", + "--validator", + "validator-0", + "--source", + BERTHA, + "--amount", + "200", + "--signing-keys", + BERTHA_KEY, + "--ledger-address", + &validator_one_rpc, + ]; + + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 3. Wait for epoch 4 + for _ in 0..4 { + node.next_epoch(); + } + + // 4. Submit another delegation to the genesis validator + let tx_args = vec![ + "bond", + "--validator", + validator_alias, + "--source", + BERTHA, + "--amount", + "300", + "--signing-keys", + BERTHA_KEY, + "--ledger-address", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 5. Submit an unbond of the delegation + let tx_args = vec![ + "unbond", + "--validator", + validator_alias, + "--source", + BERTHA, + "--amount", + "412", + "--signing-keys", + BERTHA_KEY, + "--ledger-address", + &validator_one_rpc, + ]; + + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + let res = captured + .matches(r"withdrawable starting from epoch [0-9]+") + .expect("Test failed"); + let withdraw_epoch = + Epoch::from_str(res.split(' ').last().unwrap()).unwrap(); + + // 6. Wait for withdraw_epoch + loop { + if node.current_epoch() >= withdraw_epoch { + break; + } else { + node.next_epoch(); + } + } + + // 7. Check the output of the bonds query + let tx_args = vec!["bonds", "--ledger-address", &validator_one_rpc]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains( + "All bonds total active: 120188.000000 +All bonds total: 120188.000000 +All bonds total slashed: 0.000000 +All unbonds total active: 412.000000 +All unbonds total: 412.000000 +All unbonds total withdrawable: 412.000000 +All unbonds total slashed: 0.000000", + )); + + Ok(()) +} + +/// In this test we: +/// 1. Run the ledger node +/// 2. Submit a valid proposal +/// 3. Query the proposal +/// 4. Query token balance (submitted funds) +/// 5. Query governance address balance +/// 6. Submit an invalid proposal +/// 7. Check invalid proposal was not accepted +/// 8. Query token balance (funds shall not be submitted) +/// 9. Send a yay vote from a validator +/// 10. Send a yay vote from a normal user +/// 11. Query the proposal and check the result +/// 12. Wait proposal grace and check proposal author funds +/// 13. Check governance address funds are 0 +/// 14. Query the new parameters +/// 15. Try to initialize a new account which should fail +/// 16. Submit a tx that triggers an already existing account which should +/// succeed +#[test] +fn proposal_submission() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // 1. start the ledger node + let (mut node, _services) = setup::setup()?; + + // 1.1 Delegate some token + let tx_args = vec![ + "bond", + "--validator", + "validator-0", + "--source", + BERTHA, + "--amount", + "900", + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 2. Submit valid proposal + let albert = defaults::albert_address(); + let valid_proposal_json_path = prepare_proposal_data( + node.test_dir.path(), + 0, + albert.clone(), + TestWasms::TxProposalCode.read_bytes(), + 12, + ); + + let submit_proposal_args = vec![ + "init-proposal", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--gas-limit", + "2000000", + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 3. Query the proposal + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "0", + "--node", + &validator_one_rpc, + ]; + + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, proposal_query_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Proposal Id: 0")); + + // 4. Query token balance proposal author (submitted funds) + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 1979500")); + + // 5. Query token balance governance + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 500")); + + // 6. Submit an invalid proposal + // proposal is invalid due to voting_end_epoch - voting_start_epoch < 3 + let invalid_proposal_json = prepare_proposal_data( + node.test_dir.path(), + 1, + albert, + TestWasms::TxProposalCode.read_bytes(), + 1, + ); + + let submit_proposal_args = vec![ + "init-proposal", + "--data-path", + invalid_proposal_json.to_str().unwrap(), + "--node", + &validator_one_rpc, + ]; + + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_args)); + assert!(captured.result.is_err()); + println!("{:?}", captured.result); + assert!(captured.err_contains( + "Proposal data are invalid: Invalid proposal start epoch: 1 must be \ + greater than current epoch .* and a multiple of 3" + )); + + // 7. Check invalid proposal was not submitted + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "1", + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, proposal_query_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("No proposal found with id: 1")); + + // 8. Query token balance (funds shall not be submitted) + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 1979500")); + + // 9.1. Send a yay vote from a validator + while node.current_epoch().0 <= 13 { + node.next_epoch(); + } + + let submit_proposal_vote = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--address", + "validator-0", + "--node", + &validator_one_rpc, + ]; + + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_vote)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 9.2. Send a valid yay vote from a delegator with bonds + let submit_proposal_vote_delegator = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "nay", + "--address", + BERTHA, + "--node", + &validator_one_rpc, + ]; + + let captured = CapturedOutput::of(|| { + run(&node, Bin::Client, submit_proposal_vote_delegator) + }); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 10. Send a yay vote from a non-validator/non-delegator user + let submit_proposal_vote = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--address", + CHRISTEL, + "--node", + &validator_one_rpc, + ]; + + // Expect a client failure here + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_vote)); + assert!(captured.result.is_err()); + assert!(captured.err_contains(r"The account .* has no active delegations")); + + // 11. Query the proposal and check the result + while node.current_epoch().0 <= 25 { + node.next_epoch(); + } + + let query_proposal = vec![ + "query-proposal-result", + "--proposal-id", + "0", + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_proposal)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Proposal Id: 0")); + let expected = regex::escape( + "passed with 120000.000000 yay votes, 900.000000 nay votes and \ + 0.000000 abstain votes, total voting power: 120900.000000, threshold \ + (fraction) of total voting power needed to tally: 80600.000000 \ + (0.666666666666)", + ); + assert!(captured.contains(&expected)); + + // 12. Wait proposal grace and check proposal author funds + while node.current_epoch().0 < 31 { + node.next_epoch(); + } + + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 1980000")); + + // 13. Check if governance funds are 0 + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 0")); + + // 14. Query parameters + let query_protocol_parameters = + vec!["query-protocol-parameters", "--node", &validator_one_rpc]; + let captured = CapturedOutput::of(|| { + run(&node, Bin::Client, query_protocol_parameters) + }); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(".*Min. proposal grace epochs: 9.*")); + + // 15. Try to initialize a new account with the no more allowlisted vp + let init_account = vec![ + "init-account", + "--public-keys", + // Value obtained from `namada::core::key::ed25519::tests::gen_keypair` + "tpknam1qpqfzxu3gt05jx2mvg82f4anf90psqerkwqhjey4zlqv0qfgwuvkzt5jhkp", + "--threshold", + "1", + "--code-path", + VP_USER_WASM, + "--alias", + "Test-Account", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, init_account)); + assert_matches!(captured.result, Ok(_)); + assert!( + captured.contains(".*VP code is not allowed in allowlist parameter.*") + ); + + // 16. Submit a tx touching a previous account with the no more allowlisted + // vp and verify that the transaction succeeds, i.e. the non allowlisted + // vp can still run + let transfer = vec![ + "transfer", + "--source", + BERTHA, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "10", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, transfer)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + Ok(()) +} + +/// Test submission and vote of a PGF proposal +/// +/// 1. Submit proposal +/// 2. Query the proposal +/// 3. Vote for the accepted proposals and query balances +/// 4. Query the proposal and check the result is the one voted by the validator +/// (majority) +/// 5. Wait proposals grace and check proposal author funds +/// 6. Check if governance funds are 0 +/// 7. Query pgf stewards +/// 8. Submit proposal funding +/// 9. Query the funding proposal +/// 10. Wait proposals grace and check proposal author funds +/// 11. Query pgf fundings +#[test] +fn pgf_governance_proposal() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // 1. start the ledger node + let (mut node, _services) = setup::setup()?; + + let tx_args = vec![ + "bond", + "--validator", + "validator-0", + "--source", + BERTHA, + "--amount", + "900", + "--ledger-address", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 1. Submit proposal + let albert = defaults::albert_address(); + let pgf_stewards = StewardsUpdate { + add: Some(albert.clone()), + remove: vec![], + }; + + let valid_proposal_json_path = prepare_proposal_data( + node.test_dir.path(), + 0, + albert, + pgf_stewards, + 12, + ); + let submit_proposal_args = vec![ + "init-proposal", + "--pgf-stewards", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 2. Query the proposal + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "0", + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, proposal_query_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Proposal Id: 0")); + + // Query token balance proposal author (submitted funds) + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 1979500")); + + // Query token balance governance + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 500")); + + // 3. Send a yay vote from a validator + while node.current_epoch().0 <= 13 { + node.next_epoch(); + } + + let submit_proposal_vote = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--address", + "validator-0", + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_vote)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // Send different yay vote from delegator to check majority on 1/3 + let submit_proposal_vote_delegator = vec![ + "vote-proposal", + "--proposal-id", + "0", + "--vote", + "yay", + "--address", + BERTHA, + "--ledger-address", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| { + run(&node, Bin::Client, submit_proposal_vote_delegator) + }); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 4. Query the proposal and check the result is the one voted by the + // validator (majority) + while node.current_epoch().0 <= 25 { + node.next_epoch(); + } + + let query_proposal = vec![ + "query-proposal-result", + "--proposal-id", + "0", + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_proposal)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("passed")); + + // 5. Wait proposals grace and check proposal author funds + while node.current_epoch().0 < 31 { + node.next_epoch(); + } + let query_balance_args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 1980000")); + + // 6. Check if governance funds are 0 + let query_balance_args = vec![ + "balance", + "--owner", + GOVERNANCE_ADDRESS, + "--token", + NAM, + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, query_balance_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("nam: 0")); + + // 7. Query pgf stewards + let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, query_pgf)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Pgf stewards:")); + assert!(captured.contains(&format!("- {}", defaults::albert_address()))); + assert!(captured.contains("Reward distribution:")); + assert!( + captured.contains(&format!("- 1 to {}", defaults::albert_address())) + ); + assert!(captured.contains("Pgf fundings: no fundings are currently set.")); + + // 8. Submit proposal funding + let albert = defaults::albert_address(); + let bertha = defaults::bertha_address(); + let christel = defaults::christel_address(); + + let pgf_funding = PgfFunding { + continuous: vec![PGFTarget::Internal(PGFInternalTarget { + amount: token::Amount::from_u64(10), + target: bertha.clone(), + })], + retro: vec![PGFTarget::Internal(PGFInternalTarget { + amount: token::Amount::from_u64(5), + target: christel, + })], + }; + let valid_proposal_json_path = + prepare_proposal_data(node.test_dir.path(), 1, albert, pgf_funding, 36); + + let submit_proposal_args = vec![ + "init-proposal", + "--pgf-funding", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, submit_proposal_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 9. Query the funding proposal + let proposal_query_args = vec![ + "query-proposal", + "--proposal-id", + "1", + "--ledger-address", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, proposal_query_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Proposal Id: 1")); + + // 10. Wait proposals grace and check proposal author funds + while node.current_epoch().0 < 55 { + node.next_epoch(); + } + + // 11. Query pgf fundings + let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, query_pgf)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Pgf fundings")); + assert!(captured.contains(&format!( + "{} for {}", + bertha, + token::Amount::from_u64(10).to_string_native() + ))); + + Ok(()) +} + +/// Test if a steward can correctly change his distribution reward +#[test] +fn pgf_steward_change_commission() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // 1. start the ledger node + let (node, _services) = setup::initialize_genesis(|mut genesis| { + genesis.parameters.pgf_params.stewards_inflation_rate = + Dec::from_str("0.1").unwrap(); + genesis.parameters.pgf_params.stewards = + BTreeSet::from_iter([defaults::albert_address()]); + genesis + })?; + + // Query pgf stewards + let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, query_pgf)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Pgf stewards:")); + assert!(captured.contains(&format!("- {}", defaults::albert_address()))); + assert!(captured.contains("Reward distribution:")); + assert!( + captured.contains(&format!("- 1 to {}", defaults::albert_address())) + ); + assert!(captured.contains("Pgf fundings: no fundings are currently set.")); + + let commission = Commission { + reward_distribution: HashMap::from_iter([ + (defaults::albert_address(), Dec::from_str("0.25").unwrap()), + (defaults::bertha_address(), Dec::from_str("0.70").unwrap()), + (defaults::christel_address(), Dec::from_str("0.05").unwrap()), + ]), + }; + let commission_path = prepare_steward_commission_update_data( + node.test_dir.path(), + commission, + ); + // Update steward commissions + let tx_args = vec![ + "update-steward-rewards", + "--steward", + ALBERT, + "--data-path", + commission_path.to_str().unwrap(), + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 14. Query pgf stewards + let query_pgf = vec!["query-pgf", "--node", &validator_one_rpc]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, query_pgf)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Pgf stewards:")); + assert!(captured.contains(&format!("- {}", defaults::albert_address()))); + assert!(captured.contains("Reward distribution:")); + assert!( + captured.contains(&format!("- 0.25 to {}", defaults::albert_address())) + ); + assert!( + captured.contains(&format!("- 0.7 to {}", defaults::bertha_address())) + ); + assert!( + captured + .contains(&format!("- 0.05 to {}", defaults::christel_address())) + ); + assert!(captured.contains("Pgf fundings: no fundings are currently set.")); + + Ok(()) +} + +/// In this test we: +/// 1. Run the ledger node +/// 2. For some transactions that need signature authorization: 2a. Generate a +/// new key for an implicit account. 2b. Send some funds to the implicit +/// account. 2c. Submit the tx with the implicit account as the source, that +/// requires that the account has revealed its PK. This should be done by the +/// client automatically. 2d. Submit same tx again, this time the client +/// shouldn't reveal again. +#[test] +fn implicit_account_reveal_pk() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // 1. start the ledger node + let (node, _services) = setup::setup()?; + // 2. Some transactions that need signature authorization: + #[allow(clippy::type_complexity)] + let txs_args: Vec Vec>> = vec![ + // Submit proposal + Box::new(|source| { + // Gen data for proposal tx + let author = find_address(&node, source).unwrap(); + let valid_proposal_json_path = prepare_proposal_data( + node.test_dir.path(), + 0, + author, + TestWasms::TxProposalCode.read_bytes(), + 12, + ); + vec![ + "init-proposal", + "--data-path", + valid_proposal_json_path.to_str().unwrap(), + "--signing-keys", + source, + "--gas-limit", + "2000000", + "--node", + &validator_one_rpc, + ] + .into_iter() + .map(|x| x.to_owned()) + .collect() + }), + // A token transfer tx + Box::new(|source| { + [ + "transfer", + "--source", + source, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "10.1", + "--signing-keys", + source, + "--node", + validator_one_rpc, + ] + .into_iter() + .map(|x| x.to_owned()) + .collect() + }), + // A bond + Box::new(|source| { + vec![ + "bond", + "--validator", + "validator-0", + "--source", + source, + "--amount", + "10.1", + "--signing-keys", + source, + "--node", + &validator_one_rpc, + ] + .into_iter() + .map(|x| x.to_owned()) + .collect() + }), + ]; + + for (ix, tx_args) in txs_args.into_iter().enumerate() { + let key_alias = format!("key-{ix}"); + // 2a. Generate a new key for an implicit account. + run( + &node, + Bin::Wallet, + vec![ + "gen", + "--alias", + &key_alias, + "--unsafe-dont-encrypt", + "--raw", + ], + )?; + // Apply the key_alias once the key is generated to obtain tx args + let tx_args = tx_args(&key_alias); + // 2b. Send some funds to the implicit account. + let credit_args = vec![ + "transfer", + "--source", + BERTHA, + "--target", + &key_alias, + "--token", + NAM, + "--amount", + "2000", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + ]; + run(&node, Bin::Client, credit_args)?; + node.assert_success(); + + // 2c. Submit the tx with the implicit account as the source. + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + tx_args.iter().map(|arg| arg.as_ref()).collect(), + ) + }); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Submitting a tx to reveal the public key")); + + // 2d. Submit same tx again, this time the client shouldn't reveal + // again. + let captured = CapturedOutput::of(|| { + run( + &node, + Bin::Client, + tx_args.iter().map(|arg| arg.as_ref()).collect(), + ) + }); + assert!(!captured.contains("Submitting a tx to reveal the public key")); + node.assert_success(); + } + + Ok(()) +} + +/// Change validator metadata +#[test] +fn change_validator_metadata() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + // 1. start the ledger node + let (node, _services) = setup::setup()?; + + // 2. Query the validator metadata loaded from genesis + let metadata_query_args = vec![ + "validator-metadata", + "--validator", + "validator-0", + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| { + run(&node, Bin::Client, metadata_query_args.clone()) + }); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Email:")); + assert!(captured.contains("No description")); + assert!(captured.contains("No website")); + assert!(captured.contains("No discord handle")); + assert!(captured.contains("commission rate:")); + assert!(captured.contains("max change per epoch:")); + + // 3. Add some metadata to the validator + let metadata_change_args = vec![ + "change-metadata", + "--validator", + "validator-0", + "--email", + "theokayestvalidator@namada.net", + "--description", + "We are just an okay validator node trying to get by", + "--website", + "theokayestvalidator.com", + "--node", + &validator_one_rpc, + ]; + + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, metadata_change_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 4. Query the metadata after the change + let captured = CapturedOutput::of(|| { + run(&node, Bin::Client, metadata_query_args.clone()) + }); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Email: theokayestvalidator@namada.net")); + assert!(captured.contains( + "Description: We are just an okay validator node trying to get by" + )); + assert!(captured.contains("Website: theokayestvalidator.com")); + assert!(captured.contains("No discord handle")); + assert!(captured.contains("commission rate:")); + assert!(captured.contains("max change per epoch:")); + + // 5. Remove the validator website + let metadata_change_args = vec![ + "change-metadata", + "--validator", + "validator-0", + "--website", + "", + "--node", + &validator_one_rpc, + ]; + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, metadata_change_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + // 6. Query the metadata to see that the validator website is removed + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, metadata_query_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains("Email: theokayestvalidator@namada.net")); + assert!(captured.contains( + "Description: We are just an okay validator node trying to get by" + )); + assert!(captured.contains("No website")); + assert!(captured.contains("No discord handle")); + assert!(captured.contains("commission rate:")); + assert!(captured.contains("max change per epoch:")); + + Ok(()) +} diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 3e0bc5dd64..ba05f5af36 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -368,7 +368,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 0.359578")); + assert!(captured.contains("nam: 0.362747")); // Assert NAM balance at MASP pool is an accumulation of // rewards from both the shielded BTC and shielded ETH @@ -388,7 +388,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 0.671183")); + assert!(captured.contains("nam: 0.674354")); // Wait till epoch boundary node.next_epoch(); @@ -468,7 +468,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 0.719514")); + assert!(captured.contains("nam: 0.725855")); node.next_epoch(); // sync the shielded context @@ -497,7 +497,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 1.58943")); + assert!(captured.contains("nam: 1.595775")); // Wait till epoch boundary node.next_epoch(); @@ -588,7 +588,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 1.83743")); + assert!(captured.contains("nam: 1.843775")); // Wait till epoch boundary node.next_epoch(); @@ -640,7 +640,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 0.719514")); + assert!(captured.contains("nam: 0.725855")); // Assert NAM balance at MASP pool is // the accumulation of rewards from the shielded assets (BTC and ETH) @@ -660,7 +660,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 1.83743")); + assert!(captured.contains("nam: 1.843775")); // Wait till epoch boundary to prevent conversion expiry during transaction // construction @@ -685,7 +685,7 @@ fn masp_incentives() -> Result<()> { "--token", NAM, "--amount", - "0.719514", + "0.725855", "--signing-keys", BERTHA_KEY, "--node", @@ -795,7 +795,7 @@ fn masp_incentives() -> Result<()> { ) }); assert!(captured.result.is_ok()); - assert!(captured.contains("nam: 0.004005")); + assert!(captured.contains("nam: 0.004009")); Ok(()) } diff --git a/crates/tests/src/integration/setup.rs b/crates/tests/src/integration/setup.rs index e8aa6b4ee7..798df70665 100644 --- a/crates/tests/src/integration/setup.rs +++ b/crates/tests/src/integration/setup.rs @@ -1,4 +1,3 @@ -use std::collections::HashMap; use std::mem::ManuallyDrop; use std::path::Path; use std::str::FromStr; @@ -24,6 +23,7 @@ use namada_apps::node::ledger::shell::testing::utils::TestDir; use namada_apps::node::ledger::shell::Shell; use namada_apps::wallet::pre_genesis; use namada_core::chain::ChainIdPrefix; +use namada_core::collections::HashMap; use namada_sdk::wallet::alias::Alias; use crate::e2e::setup::{copy_wasm_to_chain_dir, SINGLE_NODE_NET_GENESIS}; @@ -33,11 +33,15 @@ const ENV_VAR_KEEP_TEMP: &str = "NAMADA_INT_KEEP_TEMP"; /// Setup a network with a single genesis validator node. pub fn setup() -> Result<(MockNode, MockServicesController)> { - initialize_genesis() + initialize_genesis(|genesis| genesis) } /// Setup folders with genesis, configs, wasm, etc. -pub fn initialize_genesis() -> Result<(MockNode, MockServicesController)> { +pub fn initialize_genesis( + mut update_genesis: impl FnMut( + templates::All, + ) -> templates::All, +) -> Result<(MockNode, MockServicesController)> { let working_dir = std::fs::canonicalize("../..").unwrap(); let keep_temp = match std::env::var(ENV_VAR_KEEP_TEMP) { Ok(val) => val.to_ascii_lowercase() != "false", @@ -57,6 +61,7 @@ pub fn initialize_genesis() -> Result<(MockNode, MockServicesController)> { locked_amount_target: 1_000_000u64, }); } + let templates = update_genesis(templates); let genesis_path = test_dir.path().join("int-test-genesis-src"); std::fs::create_dir(&genesis_path) .expect("Could not create test chain directory."); diff --git a/crates/tests/src/native_vp/eth_bridge_pool.rs b/crates/tests/src/native_vp/eth_bridge_pool.rs index 6a2505b171..3f047b3eca 100644 --- a/crates/tests/src/native_vp/eth_bridge_pool.rs +++ b/crates/tests/src/native_vp/eth_bridge_pool.rs @@ -19,7 +19,6 @@ mod test_bridge_pool_vp { use namada::tx::Tx; use namada_apps::wallet::defaults::{albert_address, bertha_address}; use namada_apps::wasm_loader; - use namada_core::validity_predicate::VpSentinel; use namada_sdk::eth_bridge::{ wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeParams, UpgradeableContract, @@ -117,15 +116,23 @@ mod test_bridge_pool_vp { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, BRIDGE_POOL_ADDRESS); vp_env - .validate_tx(&gas_meter, &sentinel, |ctx| BridgePoolVp { ctx }) - .expect("Test failed") + .validate_tx(&gas_meter, |ctx| BridgePoolVp { ctx }) + .is_ok() } fn validate_tx(tx: Tx) { - assert!(run_vp(tx)); + #[cfg(feature = "namada-eth-bridge")] + { + assert!(run_vp(tx)); + } + #[cfg(not(feature = "namada-eth-bridge"))] + { + // NB: small hack to always check we reject txs + // if the bridge is disabled at compile time + invalidate_tx(tx) + } } fn invalidate_tx(tx: Tx) { diff --git a/crates/tests/src/native_vp/mod.rs b/crates/tests/src/native_vp/mod.rs index 375e7af7fa..e059705bb0 100644 --- a/crates/tests/src/native_vp/mod.rs +++ b/crates/tests/src/native_vp/mod.rs @@ -10,7 +10,6 @@ use namada::ledger::gas::VpGasMeter; use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::state::testing::TestState; use namada::vm::WasmCacheRwAccess; -use namada_core::validity_predicate::VpSentinel; use crate::tx::TestTxEnv; @@ -45,9 +44,8 @@ impl TestNativeVpEnv { pub fn validate_tx<'a, T>( &'a self, gas_meter: &'a RefCell, - sentinel: &'a RefCell, init_native_vp: impl Fn(NativeVpCtx<'a>) -> T, - ) -> Result::Error> + ) -> Result<(), ::Error> where T: NativeVp, { @@ -57,7 +55,6 @@ impl TestNativeVpEnv { &self.tx_env.tx, &self.tx_env.tx_index, gas_meter, - sentinel, &self.keys_changed, &self.verifiers, self.tx_env.vp_wasm_cache.clone(), diff --git a/crates/tests/src/native_vp/pos.rs b/crates/tests/src/native_vp/pos.rs index a79c5c3d5c..57f04c706d 100644 --- a/crates/tests/src/native_vp/pos.rs +++ b/crates/tests/src/native_vp/pos.rs @@ -154,7 +154,6 @@ mod tests { use namada::gas::VpGasMeter; use namada::ledger::pos::PosVP; use namada::token; - use namada_core::validity_predicate::VpSentinel; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::Address; use proptest::prelude::*; @@ -440,23 +439,27 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); + let result = vp_env.validate_tx(&gas_meter, PosVP::new); // Put the tx_env back before checking the result tx_host_env::set(vp_env.tx_env); - let result = - result.expect("Validation of valid changes must not fail!"); - // The expected result depends on the current state - if self.is_current_tx_valid { - // Changes must be accepted - assert!(result, "Validation of valid changes must pass!"); - } else { - // Invalid changes must be rejected - assert!(!result, "Validation of invalid changes must fail!"); + match (self.is_current_tx_valid, result) { + (true, Ok(())) => {} + (true, Err(err)) => { + // Changes must be accepted + panic!( + "Validation of valid changes must pass! Got error: \ + {err}" + ); + } + (false, Err(_)) => {} + (false, Ok(())) => { + // Invalid changes must be rejected + panic!("Validation of invalid changes must fail!"); + } } } } diff --git a/crates/tests/src/storage_api/collections/lazy_map.rs b/crates/tests/src/storage_api/collections/lazy_map.rs index 5533fccfc0..48d5f64511 100644 --- a/crates/tests/src/storage_api/collections/lazy_map.rs +++ b/crates/tests/src/storage_api/collections/lazy_map.rs @@ -58,7 +58,8 @@ mod tests { /// `Transition`s, which are also being accumulated into /// `current_transitions`. It then: /// - /// - checks its state against an in-memory `std::collections::HashMap` + /// - checks its state against an in-memory + /// `namada_core::collections::HashMap` /// - runs validation and checks that the `LazyMap::Action`s reported from /// validation match with transitions that were applied /// diff --git a/crates/tests/src/storage_api/collections/nested_lazy_map.rs b/crates/tests/src/storage_api/collections/nested_lazy_map.rs index 2df9ae95bf..7658a66223 100644 --- a/crates/tests/src/storage_api/collections/nested_lazy_map.rs +++ b/crates/tests/src/storage_api/collections/nested_lazy_map.rs @@ -68,7 +68,8 @@ mod tests { /// `Transition`s, which are also being accumulated into /// `current_transitions`. It then: /// - /// - checks its state against an in-memory `std::collections::HashMap` + /// - checks its state against an in-memory + /// `namada_core::collections::HashMap` /// - runs validation and checks that the `LazyMap::Action`s reported from /// validation match with transitions that were applied /// diff --git a/crates/tests/src/strings.rs b/crates/tests/src/strings.rs index f0b1f8ea82..0a9223dcc5 100644 --- a/crates/tests/src/strings.rs +++ b/crates/tests/src/strings.rs @@ -21,9 +21,6 @@ pub const TX_REJECTED: &str = "Transaction was rejected by VPs"; /// Inner transaction failed in execution (no VPs ran). pub const TX_FAILED: &str = "Transaction failed"; -/// Wrapper transaction accepted. -pub const TX_ACCEPTED: &str = "Wrapper transaction accepted"; - pub const WALLET_HD_PASSPHRASE_PROMPT: &str = "Enter BIP39 passphrase (empty for none): "; diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index bf5ed4dabe..c69cd43d08 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -1,6 +1,5 @@ use core::time::Duration; use std::cell::RefCell; -use std::collections::HashMap; use ibc_testkit::testapp::ibc::clients::mock::client_state::{ client_type, MockClientState, @@ -16,7 +15,7 @@ use namada::core::time::DurationSecs; use namada::gas::TxGasMeter; use namada::governance::parameters::GovernanceParameters; use namada::ibc::apps::transfer::types::error::TokenTransferError; -use namada::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; +use namada::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; use namada::ibc::apps::transfer::types::packet::PacketData; use namada::ibc::apps::transfer::types::{ ack_success_b64, PrefixedCoin, VERSION, @@ -55,7 +54,9 @@ pub use namada::ibc::core::host::types::identifiers::{ }; use namada::ibc::primitives::proto::{Any, Protobuf}; use namada::ibc::primitives::Timestamp; +use namada::ibc::MsgTransfer; use namada::ledger::gas::VpGasMeter; +use namada::ledger::ibc::parameters::IbcParameters; pub use namada::ledger::ibc::storage::{ ack_key, channel_counter_key, channel_key, client_counter_key, client_state_key, client_update_height_key, client_update_timestamp_key, @@ -82,7 +83,7 @@ use namada::tendermint::time::Time as TmTime; use namada::token::{self, Amount, DenominatedAmount}; use namada::tx::Tx; use namada::vm::{wasm, WasmCacheRwAccess}; -use namada_core::validity_predicate::VpSentinel; +use namada_core::collections::HashMap; use namada_sdk::state::StateRead; use namada_test_utils::TestWasms; use namada_tx_prelude::BorshSerializeExt; @@ -101,7 +102,7 @@ impl<'a> TestIbcVp<'a> { pub fn validate( &self, tx_data: &Tx, - ) -> std::result::Result { + ) -> std::result::Result<(), namada::ledger::native_vp::ibc::Error> { self.ibc.validate_tx( tx_data, self.ibc.ctx.keys_changed, @@ -118,7 +119,7 @@ impl<'a> TestMultitokenVp<'a> { pub fn validate( &self, tx: &Tx, - ) -> std::result::Result { + ) -> std::result::Result<(), MultitokenVpError> { self.multitoken_vp.validate_tx( tx, self.multitoken_vp.ctx.keys_changed, @@ -131,7 +132,7 @@ impl<'a> TestMultitokenVp<'a> { pub fn validate_ibc_vp_from_tx<'a>( tx_env: &'a TestTxEnv, tx: &'a Tx, -) -> std::result::Result { +) -> std::result::Result<(), namada::ledger::native_vp::ibc::Error> { let (verifiers, keys_changed) = tx_env .state .write_log() @@ -147,16 +148,14 @@ pub fn validate_ibc_vp_from_tx<'a>( wasm::compilation_cache::common::testing::cache(); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(1_000_000.into()), + &TxGasMeter::new_from_sub_limit(10_000_000_000.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &tx_env.state, tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -171,7 +170,7 @@ pub fn validate_multitoken_vp_from_tx<'a>( tx_env: &'a TestTxEnv, tx: &'a Tx, target: &Key, -) -> std::result::Result { +) -> std::result::Result<(), MultitokenVpError> { let (verifiers, keys_changed) = tx_env .state .write_log() @@ -187,16 +186,14 @@ pub fn validate_multitoken_vp_from_tx<'a>( wasm::compilation_cache::common::testing::cache(); let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(1_000_000.into()), + &TxGasMeter::new_from_sub_limit(10_000_000_000.into()), )); - let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, &tx_env.state, tx, &TxIndex(0), &gas_meter, - &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -213,9 +210,15 @@ pub fn init_storage() -> (Address, Address) { let code_hash = Hash::sha256(&code); tx_host_env::with(|env| { + namada::parameters::init_test_storage(&mut env.state).unwrap(); ibc::init_genesis_storage(&mut env.state); let gov_params = GovernanceParameters::default(); gov_params.init_storage(&mut env.state).unwrap(); + let ibc_params = IbcParameters { + default_mint_limit: Amount::native_whole(100), + default_per_epoch_throughput_limit: Amount::native_whole(100), + }; + ibc_params.init_storage(&mut env.state).unwrap(); pos::test_utils::test_init_genesis( &mut env.state, OwnedPosParams::default(), @@ -239,11 +242,15 @@ pub fn init_storage() -> (Address, Address) { }); // initialize a token - let token = tx_host_env::ctx().init_account(code_hash, &None).unwrap(); + let token = tx_host_env::ctx() + .init_account(code_hash, &None, &[]) + .unwrap(); let denom_key = token::storage_key::denom_key(&token); let token_denom = token::Denomination(ANY_DENOMINATION); // initialize an account - let account = tx_host_env::ctx().init_account(code_hash, &None).unwrap(); + let account = tx_host_env::ctx() + .init_account(code_hash, &None, &[]) + .unwrap(); let key = token::storage_key::balance_key(&token, &account); let init_bal = Amount::from_uint(100, token_denom).unwrap(); tx_host_env::with(|env| { @@ -294,7 +301,7 @@ pub fn init_storage() -> (Address, Address) { } pub fn client_id() -> ClientId { - ClientId::new(client_type(), 0).expect("invalid client ID") + ClientId::new(&client_type().to_string(), 0).expect("invalid client ID") } pub fn prepare_client() -> (ClientId, Any, HashMap>) { @@ -442,8 +449,8 @@ pub fn msg_upgrade_client(client_id: ClientId) -> MsgUpgradeClient { } pub fn msg_connection_open_init(client_id: ClientId) -> MsgConnectionOpenInit { - let client_type = client_type(); - let counterparty_client_id = ClientId::new(client_type, 42).unwrap(); + let counterparty_client_id = + ClientId::new(&client_type().to_string(), 42).unwrap(); let commitment_prefix = CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()).unwrap(); let counterparty = @@ -522,8 +529,8 @@ fn dummy_proof_height() -> Height { } fn dummy_connection_counterparty() -> ConnCounterparty { - let client_type = client_type(); - let client_id = ClientId::new(client_type, 42).expect("invalid client ID"); + let client_id = ClientId::new(&client_type().to_string(), 42) + .expect("invalid client ID"); let conn_id = ConnectionId::new(12); let commitment_prefix = CommitmentPrefix::try_from(COMMITMENT_PREFIX.to_vec()) @@ -636,7 +643,7 @@ pub fn msg_transfer( ) -> MsgTransfer { let amount = DenominatedAmount::native(Amount::native_whole(100)); let timestamp = (Timestamp::now() + Duration::from_secs(100)).unwrap(); - MsgTransfer { + let message = IbcMsgTransfer { port_id_on_a: port_id, chan_id_on_a: channel_id, packet_data: PacketData { @@ -652,10 +659,14 @@ pub fn msg_transfer( }, timeout_height_on_b: TimeoutHeight::Never, timeout_timestamp_on_b: timestamp, + }; + MsgTransfer { + message, + transfer: None, } } -pub fn set_timeout_timestamp(msg: &mut MsgTransfer) { +pub fn set_timeout_timestamp(msg: &mut IbcMsgTransfer) { msg.timeout_timestamp_on_b = (msg.timeout_timestamp_on_b - Duration::from_secs(201)).unwrap(); } @@ -738,7 +749,7 @@ pub fn msg_timeout_on_close( } pub fn packet_from_message( - msg: &MsgTransfer, + msg: &IbcMsgTransfer, sequence: Sequence, counterparty: &ChanCounterparty, ) -> Packet { diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index 06dca03ef7..424330a39c 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -18,8 +18,10 @@ pub mod vp; #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; use std::panic; + use std::rc::Rc; use borsh_ext::BorshSerializeExt; use itertools::Itertools; @@ -29,8 +31,9 @@ mod tests { use namada::core::storage::{self, BlockHash, BlockHeight, Key, KeySeg}; use namada::core::time::DateTimeUtc; use namada::core::{address, key}; + use namada::ibc::context::nft_transfer_mod::testing::DummyNftTransferModule; use namada::ibc::context::transfer_mod::testing::DummyTransferModule; - use namada::ibc::primitives::Msg; + use namada::ibc::primitives::ToProto; use namada::ibc::Error as IbcActionError; use namada::ledger::ibc::storage as ibc_storage; use namada::ledger::native_vp::ibc::{ @@ -214,7 +217,7 @@ mod tests { tx_host_env::init(); let code = vec![]; - tx::ctx().init_account(code, &None).unwrap(); + tx::ctx().init_account(code, &None, &[]).unwrap(); } #[test] @@ -229,7 +232,7 @@ mod tests { let key = Key::wasm_code(&code_hash); env.state.write_bytes(&key, &code).unwrap(); }); - tx::ctx().init_account(code_hash, &None).unwrap(); + tx::ctx().init_account(code_hash, &None, &[]).unwrap(); } /// Test that a tx updating validity predicate that is not in the allowlist @@ -329,7 +332,7 @@ mod tests { }); // Initializing a new account with the VP should fail - tx::ctx().init_account(vp_hash, &None).unwrap(); + tx::ctx().init_account(vp_hash, &None, &[]).unwrap(); } #[test] @@ -547,6 +550,7 @@ mod tests { // Use some arbitrary bytes for tx code let code = vec![4, 3, 2, 1, 0]; + #[allow(clippy::disallowed_methods)] let expiration = Some(DateTimeUtc::now()); for data in &[ // Tx with some arbitrary data @@ -644,8 +648,8 @@ mod tests { .add_serialized_data(input_data.clone()) .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); - let result = vp::CTX.eval(empty_code, tx).unwrap(); - assert!(!result); + let result = vp::CTX.eval(empty_code, tx); + assert!(result.is_err()); // evaluating the VP template which always returns `true` should pass let code = TestWasms::VpAlwaysTrue.read_bytes(); @@ -663,8 +667,8 @@ mod tests { .add_serialized_data(input_data.clone()) .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); - let result = vp::CTX.eval(code_hash, tx).unwrap(); - assert!(result); + let result = vp::CTX.eval(code_hash, tx); + assert!(result.is_ok()); // evaluating the VP template which always returns `false` shouldn't // pass @@ -683,8 +687,8 @@ mod tests { .add_serialized_data(input_data) .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); - let result = vp::CTX.eval(code_hash, tx).unwrap(); - assert!(!result); + let result = vp::CTX.eval(code_hash, tx); + assert!(result.is_err()); } #[test] @@ -710,14 +714,15 @@ mod tests { .sign_wrapper(keypair.clone()); // create a client with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("creating a client failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Commit env.commit_tx_and_block(); @@ -743,14 +748,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // update the client with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("updating a client failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -783,14 +789,15 @@ mod tests { .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // init a connection with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("creating a connection failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Commit env.commit_tx_and_block(); @@ -816,14 +823,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open the connection with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("opening the connection failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -857,14 +865,15 @@ mod tests { .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // open try a connection with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("creating a connection failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Commit env.commit_tx_and_block(); @@ -890,14 +899,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open the connection with the mssage - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("opening the connection failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -933,14 +943,15 @@ mod tests { .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // init a channel with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("creating a channel failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Commit env.commit_tx_and_block(); @@ -966,14 +977,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open the channel with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("opening the channel failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -1009,14 +1021,15 @@ mod tests { .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // try open a channel with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("creating a channel failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Commit env.commit_tx_and_block(); @@ -1043,14 +1056,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // open a channel with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("opening the channel failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -1089,10 +1103,13 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // close the channel with the message - let mut actions = tx_host_env::ibc::ibc_actions(tx::ctx()); + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + let mut actions = tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers); // the dummy module closes the channel let dummy_module = DummyTransferModule {}; - actions.add_transfer_module(dummy_module.module_id(), dummy_module); + actions.add_transfer_module(dummy_module); + let dummy_module = DummyNftTransferModule {}; + actions.add_transfer_module(dummy_module); actions .execute(&tx_data) .expect("closing the channel failed"); @@ -1144,14 +1161,15 @@ mod tests { .sign_wrapper(keypair); // close the channel with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("closing the channel failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -1182,11 +1200,7 @@ mod tests { // Start a transaction to send a packet let msg = ibc::msg_transfer(port_id, channel_id, token.to_string(), &sender); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); + let tx_data = msg.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![], None) @@ -1194,14 +1208,15 @@ mod tests { .sign_raw(keypairs.clone(), pks_map.clone(), None) .sign_wrapper(keypair.clone()); // send the token and a packet with the data - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("sending a token failed"); // Check let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the token was escrowed let escrow = token::storage_key::balance_key( &token, @@ -1209,7 +1224,7 @@ mod tests { ); let token_vp_result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &escrow); - assert!(token_vp_result.expect("token validation failed unexpectedly")); + assert!(token_vp_result.is_ok()); // Commit env.commit_tx_and_block(); @@ -1227,7 +1242,7 @@ mod tests { // Start the next transaction for receiving an ack let counterparty = ibc::dummy_channel_counterparty(); let packet = ibc::packet_from_message( - &msg, + &msg.message, ibc::Sequence::from(1), &counterparty, ); @@ -1241,14 +1256,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // ack the packet with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("ack failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check the balance tx_host_env::set(env); let balance_key = token::storage_key::balance_key(&token, &sender); @@ -1304,6 +1320,10 @@ mod tests { minter_key, Address::Internal(InternalAddress::Ibc).serialize_to_vec(), ); + let mint_amount_key = ibc_storage::mint_amount_key(&ibc_token); + let init_bal = Amount::from_u64(100); + writes.insert(mint_amount_key, init_bal.serialize_to_vec()); + writes.insert(minted_key.clone(), init_bal.serialize_to_vec()); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { env.state.write_bytes(&key, &val).expect("write error"); @@ -1313,8 +1333,7 @@ mod tests { // Start a transaction to send a packet // Set this chain is the sink zone let msg = ibc::msg_transfer(port_id, channel_id, denom, &sender); - let mut tx_data = vec![]; - msg.to_any().encode(&mut tx_data).expect("encoding failed"); + let tx_data = msg.serialize_to_vec(); let mut tx = Tx::new(ChainId::default(), None); tx.add_code(vec![], None) @@ -1322,18 +1341,27 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // send the token and a packet with the data - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("sending a token failed"); // Check - let env = tx_host_env::take(); + let mut env = tx_host_env::take(); + // The token must be part of the verifier set (checked by MultitokenVp) + env.verifiers.insert(ibc_token); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!( + result.is_ok(), + "Expected VP to accept the tx, got {result:?}" + ); // Check if the token was burned let result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &minted_key); - assert!(result.expect("token validation failed unexpectedly")); + assert!( + result.is_ok(), + "Expected VP to accept the tx, got {result:?}" + ); // Check the balance tx_host_env::set(env); let balance: Option = tx_host_env::with(|env| { @@ -1392,21 +1420,33 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // receive a packet with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("receiving the token failed"); // Check - let env = tx_host_env::take(); + let mut env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the token was minted + // The token must be part of the verifier set (checked by MultitokenVp) let denom = format!("{}/{}/{}", port_id, channel_id, token); let ibc_token = ibc::ibc_token(&denom); + env.verifiers.insert(ibc_token.clone()); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!( + result.is_ok(), + "Expected VP to accept the tx, got {result:?}" + ); + // Check if the token was minted let minted_key = token::storage_key::minted_balance_key(&ibc_token); let result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &minted_key); - assert!(result.expect("token validation failed unexpectedly")); + assert!( + result.is_ok(), + "Expected VP to accept the tx, got {result:?}" + ); // Check the balance tx_host_env::set(env); let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); @@ -1467,14 +1507,15 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // Receive the packet, but no token is received - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("receiving the token failed"); // Check if the transaction is valid let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the ack has an error due to the invalid packet data tx_host_env::set(env); let ack_key = ibc_storage::ack_key(&port_id, &channel_id, sequence); @@ -1559,18 +1600,19 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // receive a packet with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("receiving a token failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the token was unescrowed let result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &escrow_key); - assert!(result.expect("token validation failed unexpectedly")); + assert!(result.is_ok()); // Check the balance tx_host_env::set(env); let key = token::storage_key::balance_key(&token, &receiver); @@ -1657,18 +1699,19 @@ mod tests { .sign_raw(keypairs, pks_map, None) .sign_wrapper(keypair); // receive a packet with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("receiving a token failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the token was unescrowed let result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &escrow_key); - assert!(result.expect("token validation failed unexpectedly")); + assert!(result.is_ok()); // Check the balance tx_host_env::set(env); // without the source trace path @@ -1712,14 +1755,11 @@ mod tests { // Start a transaction to send a packet let mut msg = ibc::msg_transfer(port_id, channel_id, token.to_string(), &sender); - ibc::set_timeout_timestamp(&mut msg); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); + ibc::set_timeout_timestamp(&mut msg.message); + let tx_data = msg.serialize_to_vec(); // send a packet with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("sending a token failed"); @@ -1740,7 +1780,7 @@ mod tests { // Start a transaction to notify the timeout let counterparty = ibc::dummy_channel_counterparty(); let packet = ibc::packet_from_message( - &msg, + &msg.message, ibc::Sequence::from(1), &counterparty, ); @@ -1754,21 +1794,22 @@ mod tests { .sign_wrapper(keypair); // timeout the packet - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("timeout failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the token was refunded let escrow = token::storage_key::balance_key( &token, &address::Address::Internal(address::InternalAddress::Ibc), ); let result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &escrow); - assert!(result.expect("token validation failed unexpectedly")); + assert!(result.is_ok()); } #[test] @@ -1799,13 +1840,10 @@ mod tests { // Start a transaction to send a packet let msg = ibc::msg_transfer(port_id, channel_id, token.to_string(), &sender); - let mut tx_data = vec![]; - msg.clone() - .to_any() - .encode(&mut tx_data) - .expect("encoding failed"); + let tx_data = msg.serialize_to_vec(); // send a packet with the message - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("sending a token failed"); @@ -1826,7 +1864,7 @@ mod tests { // Start a transaction to notify the timing-out on closed let counterparty = ibc::dummy_channel_counterparty(); let packet = ibc::packet_from_message( - &msg, + &msg.message, ibc::Sequence::from(1), &counterparty, ); @@ -1840,20 +1878,21 @@ mod tests { .sign_wrapper(keypair); // timeout the packet - tx_host_env::ibc::ibc_actions(tx::ctx()) + let verifiers = Rc::new(RefCell::new(BTreeSet::
::new())); + tx_host_env::ibc::ibc_actions(tx::ctx(), verifiers) .execute(&tx_data) .expect("timeout on close failed"); // Check let env = tx_host_env::take(); let result = ibc::validate_ibc_vp_from_tx(&env, &tx); - assert!(result.expect("validation failed unexpectedly")); + assert!(result.is_ok()); // Check if the token was refunded let escrow = token::storage_key::balance_key( &token, &address::Address::Internal(address::InternalAddress::Ibc), ); let result = ibc::validate_multitoken_vp_from_tx(&env, &tx, &escrow); - assert!(result.expect("token validation failed unexpectedly")); + assert!(result.is_ok()); } } diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index 7ef5a5e418..3223316bd4 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -53,6 +53,7 @@ pub struct TestTxEnv { pub sentinel: RefCell, pub tx_index: TxIndex, pub result_buffer: Option>, + pub yielded_value: Option>, pub vp_wasm_cache: VpCache, pub vp_cache_dir: TempDir, pub tx_wasm_cache: TxCache, @@ -72,12 +73,13 @@ impl Default for TestTxEnv { state, iterators: PrefixIterators::default(), gas_meter: RefCell::new(TxGasMeter::new_from_sub_limit( - 100_000_000.into(), + 100_000_000_000.into(), )), sentinel: RefCell::new(TxSentinel::default()), tx_index: TxIndex::default(), verifiers: BTreeSet::default(), result_buffer: None, + yielded_value: None, vp_wasm_cache, vp_cache_dir, tx_wasm_cache, @@ -340,6 +342,7 @@ mod native_tx_host_env { gas_meter, sentinel, result_buffer, + yielded_value, tx_index, vp_wasm_cache, vp_cache_dir: _, @@ -357,6 +360,7 @@ mod native_tx_host_env { tx, tx_index, result_buffer, + yielded_value, vp_wasm_cache, tx_wasm_cache, ); @@ -382,6 +386,7 @@ mod native_tx_host_env { gas_meter, sentinel, result_buffer, + yielded_value, vp_wasm_cache, vp_cache_dir: _, tx_wasm_cache, @@ -398,6 +403,7 @@ mod native_tx_host_env { tx, tx_index, result_buffer, + yielded_value, vp_wasm_cache, tx_wasm_cache, ); @@ -422,6 +428,7 @@ mod native_tx_host_env { gas_meter, sentinel, result_buffer, + yielded_value, tx_index, vp_wasm_cache, vp_cache_dir: _, @@ -439,6 +446,7 @@ mod native_tx_host_env { tx, tx_index, result_buffer, + yielded_value, vp_wasm_cache, tx_wasm_cache, ); @@ -454,6 +462,7 @@ mod native_tx_host_env { // Implement all the exported functions from // [`namada_vm_env::imports::tx`] `extern "C"` section. native_host_fn!(tx_read(key_ptr: u64, key_len: u64) -> i64); + native_host_fn!(tx_read_temp(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(tx_result_buffer(result_ptr: u64)); native_host_fn!(tx_has_key(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(tx_write( @@ -485,6 +494,8 @@ mod native_tx_host_env { code_hash_len: u64, code_tag_ptr: u64, code_tag_len: u64, + entropy_source_ptr: u64, + entropy_source_len: u64, result_ptr: u64 )); native_host_fn!(tx_emit_ibc_event(event_ptr: u64, event_len: u64)); @@ -509,6 +520,10 @@ mod native_tx_host_env { max_signatures_ptr: u64, max_signatures_len: u64, ) -> i64); + native_host_fn!(tx_yield_value( + buf_ptr: u64, + buf_len: u64, + )); } #[cfg(test)] @@ -723,6 +738,7 @@ mod tests { gas_meter, sentinel, result_buffer, + yielded_value, tx_index, vp_wasm_cache, vp_cache_dir: _, @@ -740,6 +756,7 @@ mod tests { tx, tx_index, result_buffer, + yielded_value, vp_wasm_cache, tx_wasm_cache, ); diff --git a/crates/tests/src/vm_host_env/vp.rs b/crates/tests/src/vm_host_env/vp.rs index d36e73dce9..82eab877a6 100644 --- a/crates/tests/src/vm_host_env/vp.rs +++ b/crates/tests/src/vm_host_env/vp.rs @@ -12,7 +12,6 @@ use namada::tx::Tx; use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::{self, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; -use namada_tx_prelude::validity_predicate::VpSentinel; use namada_vp_prelude::Ctx; use tempfile::TempDir; @@ -44,13 +43,13 @@ pub struct TestVpEnv { pub state: TestState, pub iterators: PrefixIterators<'static, MockDB>, pub gas_meter: RefCell, - pub sentinel: RefCell, pub tx: Tx, pub tx_index: TxIndex, pub keys_changed: BTreeSet, pub verifiers: BTreeSet
, pub eval_runner: native_vp_host_env::VpEval, pub result_buffer: Option>, + pub yielded_value: Option>, pub vp_wasm_cache: VpCache, pub vp_cache_dir: TempDir, } @@ -73,15 +72,15 @@ impl Default for TestVpEnv { state, iterators: PrefixIterators::default(), gas_meter: RefCell::new(VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(10_000_000_000.into()), + &TxGasMeter::new_from_sub_limit(1_000_000_000_000.into()), )), - sentinel: RefCell::new(VpSentinel::default()), tx, tx_index: TxIndex::default(), keys_changed: BTreeSet::default(), verifiers: BTreeSet::default(), eval_runner, result_buffer: None, + yielded_value: None, vp_wasm_cache, vp_cache_dir, } @@ -254,13 +253,13 @@ mod native_vp_host_env { state, iterators, gas_meter, - sentinel, tx, tx_index, keys_changed, verifiers, eval_runner, result_buffer, + yielded_value, vp_wasm_cache, vp_cache_dir: _, }: &mut TestVpEnv| { @@ -270,11 +269,11 @@ mod native_vp_host_env { state, iterators, gas_meter, - sentinel, tx, tx_index, verifiers, result_buffer, + yielded_value, keys_changed, eval_runner, vp_wasm_cache, @@ -298,13 +297,13 @@ mod native_vp_host_env { state, iterators, gas_meter, - sentinel, tx, tx_index, keys_changed, verifiers, eval_runner, result_buffer, + yielded_value, vp_wasm_cache, vp_cache_dir: _, }: &mut TestVpEnv| { @@ -314,11 +313,11 @@ mod native_vp_host_env { state, iterators, gas_meter, - sentinel, tx, tx_index, verifiers, result_buffer, + yielded_value, keys_changed, eval_runner, vp_wasm_cache, @@ -370,6 +369,7 @@ mod native_vp_host_env { threshold: u8, max_signatures_ptr: u64, max_signatures_len: u64, - ) -> i64); + )); native_host_fn!(vp_charge_gas(used_gas: u64)); + native_host_fn!(vp_yield_value(buf_ptr: u64, buf_len: u64)); } diff --git a/crates/trans_token/src/inflation.rs b/crates/trans_token/src/inflation.rs deleted file mode 100644 index b9be694a20..0000000000 --- a/crates/trans_token/src/inflation.rs +++ /dev/null @@ -1,397 +0,0 @@ -//! General inflation system that will be used to process rewards for -//! proof-of-stake, providing liquity to shielded asset pools, and public goods -//! funding. - -use namada_core::dec::Dec; -use namada_core::uint::Uint; - -/// Holds the PD controller values that should be updated in storage -#[allow(missing_docs)] -pub struct PosValsToUpdate { - pub locked_ratio: Dec, - pub inflation: Uint, -} - -/// Holds the PD controller values that should be updated in storage -#[allow(missing_docs)] -pub struct ShieldedValsToUpdate { - pub inflation: Uint, -} - -/// PD controller used to dynamically adjust the rewards rates -#[derive(Debug, Clone)] -pub struct PosRewardsController { - /// Locked token amount in the relevant system - pub locked_tokens: Uint, - /// Total native token supply - pub total_native_tokens: Uint, - /// PD target locked ratio - pub locked_ratio_target: Dec, - /// PD last locked ratio - pub locked_ratio_last: Dec, - /// Maximum reward rate - pub max_reward_rate: Dec, - /// Last inflation amount - pub last_inflation_amount: Uint, - /// Nominal proportional gain - pub p_gain_nom: Dec, - /// Nominal derivative gain - pub d_gain_nom: Dec, - /// Number of epochs per year - pub epochs_per_year: u64, -} - -impl PosRewardsController { - /// Calculate a new inflation rate for the Proof-of-stake rewards system. - /// Uses the ratios of locked (staked) tokens to the total native token - /// supply to determine the new inflation amount. - pub fn run(self) -> PosValsToUpdate { - let Self { - locked_tokens, - total_native_tokens, - locked_ratio_target, - locked_ratio_last, - max_reward_rate, - last_inflation_amount, - p_gain_nom, - d_gain_nom, - epochs_per_year, - } = self; - - // Token amounts must be expressed in terms of the raw amount - // to properly run the PD controller - let locked = Dec::try_from(locked_tokens) - .expect("Should not fail to convert Uint to Dec"); - let total_native = Dec::try_from(total_native_tokens) - .expect("Should not fail to convert Uint to Dec"); - let last_inflation_amount = Dec::try_from(last_inflation_amount) - .expect("Should not fail to convert Uint to Dec"); - - let epochs_py: Dec = epochs_per_year.into(); - - // Staked ratio - let locked_ratio = if total_native.is_zero() { - Dec::one() - } else { - locked / total_native - }; - - // Max inflation amount for this epoch - let max_inflation = total_native * max_reward_rate / epochs_py; - - // Intermediate values - let p_gain = p_gain_nom * max_inflation; - let d_gain = d_gain_nom * max_inflation; - let error = locked_ratio_target - locked_ratio; - let delta_error = locked_ratio_last - locked_ratio; - let control_val = p_gain * error - d_gain * delta_error; - - // New inflation amount - let new_inflation_amount_raw = last_inflation_amount + control_val; - let new_inflation_amount = if new_inflation_amount_raw.is_negative() { - Uint::zero() - } else { - new_inflation_amount_raw - .to_uint() - .expect("Should not fail to convert Dec to Uint") - }; - let max_inflation = max_inflation - .to_uint() - .expect("Should not fail to convert Dec to Uint"); - - let inflation = std::cmp::min(new_inflation_amount, max_inflation); - PosValsToUpdate { - locked_ratio, - inflation, - } - } -} - -/// PD controller used to dynamically adjust the rewards rates -#[derive(Debug, Clone)] -pub struct ShieldedRewardsController { - /// Locked token amount in the relevant system - pub locked_tokens: Uint, - /// Total native token supply - pub total_native_tokens: Uint, - /// PD target locked amount - pub locked_tokens_target: Uint, - /// PD last locked amount - pub locked_tokens_last: Uint, - /// Maximum reward rate - pub max_reward_rate: Dec, - /// Last inflation amount - pub last_inflation_amount: Uint, - /// Nominal proportional gain - pub p_gain_nom: Dec, - /// Nominal derivative gain - pub d_gain_nom: Dec, - /// Number of epochs per year - pub epochs_per_year: u64, -} - -impl ShieldedRewardsController { - /// Calculate a new inflation rate for the Proof-of-stake rewards system. - /// Uses the ratios of locked (staked) tokens to the total native token - /// supply to determine the new inflation amount. - pub fn run(self) -> ShieldedValsToUpdate { - let Self { - locked_tokens, - total_native_tokens, - locked_tokens_target, - locked_tokens_last, - max_reward_rate, - last_inflation_amount, - p_gain_nom, - d_gain_nom, - epochs_per_year, - } = self; - - // Token amounts must be expressed in terms of the raw amount - // to properly run the PD controller - let locked = Dec::try_from(locked_tokens) - .expect("Should not fail to convert Uint to Dec"); - let locked_amount_target = Dec::try_from(locked_tokens_target) - .expect("Should not fail to convert Uint to Dec"); - let locked_amount_last = Dec::try_from(locked_tokens_last) - .expect("Should not fail to convert Uint to Dec"); - let total_native = Dec::try_from(total_native_tokens) - .expect("Should not fail to convert Uint to Dec"); - let last_inflation_amount = Dec::try_from(last_inflation_amount) - .expect("Should not fail to convert Uint to Dec"); - - let epochs_py: Dec = epochs_per_year.into(); - - // Max inflation amount for this epoch - let max_inflation = total_native * max_reward_rate / epochs_py; - - // Intermediate values - let p_gain = p_gain_nom * max_reward_rate / epochs_py; - let d_gain = d_gain_nom * max_reward_rate / epochs_py; - let error = locked_amount_target - locked; - let delta_error = locked_amount_last - locked; - let control_val = p_gain * error - d_gain * delta_error; - - // New inflation amount - let new_inflation_amount_raw = last_inflation_amount + control_val; - let new_inflation_amount = if new_inflation_amount_raw.is_negative() { - Uint::zero() - } else { - new_inflation_amount_raw - .to_uint() - .expect("Should not fail to convert Dec to Uint") - }; - let max_inflation = max_inflation - .to_uint() - .expect("Should not fail to convert Dec to Uint"); - - let inflation = std::cmp::min(new_inflation_amount, max_inflation); - ShieldedValsToUpdate { inflation } - } -} - -#[cfg(test)] -mod test { - use std::str::FromStr; - - use super::*; - - #[test] - fn test_inflation_calc_up() { - let mut controller = PosRewardsController { - locked_tokens: Uint::from(2_000_000_000), - total_native_tokens: Uint::from(4_000_000_000_u64), - locked_ratio_target: Dec::from_str("0.66666666").unwrap(), - locked_ratio_last: Dec::from_str("0.5").unwrap(), - max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: Uint::zero(), - p_gain_nom: Dec::from_str("0.1").unwrap(), - d_gain_nom: Dec::from_str("0.1").unwrap(), - epochs_per_year: 365, - }; - dbg!(&controller); - - let PosValsToUpdate { - locked_ratio: locked_ratio_0, - inflation: inflation_0, - } = controller.clone().run(); - println!( - "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}" - ); - assert_eq!(locked_ratio_0, Dec::from_str("0.5").unwrap()); - assert_eq!(inflation_0, Uint::from(18_264)); - - controller.locked_ratio_last = locked_ratio_0; - controller.last_inflation_amount = inflation_0; - controller.locked_tokens += inflation_0; - - let PosValsToUpdate { - locked_ratio: locked_ratio_1, - inflation: inflation_1, - } = controller.clone().run(); - println!( - "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}" - ); - assert!(locked_ratio_1 > locked_ratio_0); - assert!(locked_ratio_1 > Dec::from_str("0.5").unwrap()); - assert!(locked_ratio_1 < Dec::from_str("0.51").unwrap()); - assert_eq!(inflation_1, Uint::from(36_528)); - - controller.locked_ratio_last = locked_ratio_1; - controller.last_inflation_amount = inflation_1; - controller.locked_tokens += inflation_1; - - let PosValsToUpdate { - locked_ratio: locked_ratio_2, - inflation: inflation_2, - } = controller.run(); - println!( - "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", - ); - assert!(locked_ratio_2 > locked_ratio_1); - assert!(locked_ratio_2 > Dec::from_str("0.5").unwrap()); - assert!(locked_ratio_2 < Dec::from_str("0.51").unwrap()); - assert_eq!(inflation_2, Uint::from(54_792)); - } - - #[test] - fn test_inflation_calc_down() { - let mut controller = PosRewardsController { - locked_tokens: Uint::from(900_000_000), - total_native_tokens: Uint::from(1_000_000_000), - locked_ratio_target: Dec::from_str("0.66666666").unwrap(), - locked_ratio_last: Dec::from_str("0.9").unwrap(), - max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: Uint::from(10_000), - p_gain_nom: Dec::from_str("0.1").unwrap(), - d_gain_nom: Dec::from_str("0.1").unwrap(), - epochs_per_year: 365, - }; - dbg!(&controller); - - let PosValsToUpdate { - locked_ratio: locked_ratio_0, - inflation: inflation_0, - } = controller.clone().run(); - println!( - "Round 0: Locked ratio: {locked_ratio_0}, inflation: {inflation_0}", - ); - assert_eq!(locked_ratio_0, Dec::from_str("0.9").unwrap()); - assert_eq!(inflation_0, Uint::from(3_607)); - - controller.locked_ratio_last = locked_ratio_0; - controller.last_inflation_amount = inflation_0; - controller.locked_tokens += inflation_0; - - let PosValsToUpdate { - locked_ratio: locked_ratio_1, - inflation: inflation_1, - } = controller.clone().run(); - println!( - "Round 1: Locked ratio: {locked_ratio_1}, inflation: {inflation_1}", - ); - assert!(locked_ratio_1 > locked_ratio_0); - assert!(locked_ratio_1 > Dec::from_str("0.9").unwrap()); - assert!(locked_ratio_1 < Dec::from_str("0.91").unwrap()); - assert_eq!(inflation_1, Uint::zero()); - - controller.locked_ratio_last = locked_ratio_1; - controller.last_inflation_amount = inflation_1; - controller.locked_tokens += inflation_1; - - let PosValsToUpdate { - locked_ratio: locked_ratio_2, - inflation: inflation_2, - } = controller.run(); - println!( - "Round 2: Locked ratio: {locked_ratio_2}, inflation: {inflation_2}", - ); - assert_eq!(locked_ratio_2, locked_ratio_1); - assert_eq!(inflation_2, Uint::zero()); - } - - #[test] - fn test_inflation_playground() { - let init_locked_ratio = Dec::from_str("0.1").unwrap(); - let total_tokens = 1_000_000_000_000_000_u64; - let epochs_per_year = 365_u64; - - let staking_growth = Dec::from_str("0.04").unwrap(); - // let mut do_add = true; - - // let a = (init_locked_ratio * total_tokens).to_uint().unwrap(); - let num_rounds = 100; - - let mut controller = PosRewardsController { - locked_tokens: (init_locked_ratio * total_tokens) - .to_uint() - .unwrap(), - total_native_tokens: Uint::from(total_tokens), - locked_ratio_target: Dec::from_str("0.66666666").unwrap(), - locked_ratio_last: init_locked_ratio, - max_reward_rate: Dec::from_str("0.1").unwrap(), - last_inflation_amount: Uint::zero(), - p_gain_nom: Dec::from_str("0.25").unwrap(), - d_gain_nom: Dec::from_str("0.25").unwrap(), - epochs_per_year, - }; - dbg!(&controller); - - for round in 0..num_rounds { - let PosValsToUpdate { - locked_ratio, - inflation, - } = controller.clone().run(); - let rate = Dec::try_from(inflation).unwrap() - * Dec::from(epochs_per_year) - / Dec::from(total_tokens); - println!( - "Round {round}: Locked ratio: {locked_ratio}, inflation rate: \ - {rate}", - ); - controller.last_inflation_amount = inflation; - controller.total_native_tokens += inflation; - - // if rate.abs_diff(&controller.max_reward_rate) - // < Dec::from_str("0.01").unwrap() - // { - // controller.locked_tokens = controller.total_tokens; - // } - - let tot_tokens = - u64::try_from(controller.total_native_tokens).unwrap(); - let change_staked_tokens = - (staking_growth * tot_tokens).to_uint().unwrap(); - controller.locked_tokens = std::cmp::min( - controller.total_native_tokens, - controller.locked_tokens + change_staked_tokens, - ); - - // if locked_ratio > Dec::from_str("0.8").unwrap() - // && locked_ratio - controller.locked_ratio_last >= Dec::zero() - // { - // do_add = false; - // } else if locked_ratio < Dec::from_str("0.4").unwrap() - // && locked_ratio - controller.locked_ratio_last < Dec::zero() - // { - // do_add = true; - // } - - // controller.locked_tokens = std::cmp::min( - // if do_add { - // controller.locked_tokens + change_staked_tokens - // } else { - // controller.locked_tokens - change_staked_tokens - // }, - // controller.total_tokens, - // ); - - controller.locked_ratio_last = locked_ratio; - } - - // controller.locked_ratio_last = locked_ratio_1; - // controller.last_inflation_amount = inflation_1; - // controller.total_tokens += inflation_1; - // controller.locked_tokens += inflation_1; - } -} diff --git a/crates/trans_token/src/lib.rs b/crates/trans_token/src/lib.rs index ea8b646005..6644f73d7a 100644 --- a/crates/trans_token/src/lib.rs +++ b/crates/trans_token/src/lib.rs @@ -1,6 +1,5 @@ //! Transparent token types, storage functions, and validation. -pub mod inflation; mod storage; pub mod storage_key; diff --git a/crates/trans_token/src/storage.rs b/crates/trans_token/src/storage.rs index ca429b7f5b..4324f0be29 100644 --- a/crates/trans_token/src/storage.rs +++ b/crates/trans_token/src/storage.rs @@ -45,6 +45,25 @@ where Ok(balance) } +/// Get the effective circulating total supply of native tokens. +pub fn get_effective_total_native_supply( + storage: &S, +) -> namada_storage::Result +where + S: StorageRead, +{ + let native_token = storage.get_native_token()?; + let pgf_address = Address::Internal(InternalAddress::Pgf); + + let raw_total = read_total_supply(storage, &native_token)?; + let pgf_balance = read_balance(storage, &native_token, &pgf_address)?; + + // Remove native balance in PGF address from the total supply + Ok(raw_total + .checked_sub(pgf_balance) + .expect("Raw total supply should be larger than PGF balance")) +} + /// Read the denomination of a given token, if any. Note that native /// transparent tokens do not have this set and instead use the constant /// [`token::NATIVE_MAX_DECIMAL_PLACES`]. diff --git a/crates/tx/Cargo.toml b/crates/tx/Cargo.toml index cf522e03b0..fa8eb22802 100644 --- a/crates/tx/Cargo.toml +++ b/crates/tx/Cargo.toml @@ -27,6 +27,7 @@ namada_macros = { path = "../macros" } namada_migrations = {path = "../migrations", optional = true } ark-bls12-381.workspace = true +bitflags.workspace = true borsh.workspace = true data-encoding.workspace = true linkme = {workspace = true, optional = true} diff --git a/crates/tx/src/action.rs b/crates/tx/src/action.rs new file mode 100644 index 0000000000..1d7377e87c --- /dev/null +++ b/crates/tx/src/action.rs @@ -0,0 +1,104 @@ +//! Tx actions are used to indicate from tx to VPs the type of actions that have +//! been applied by the tx to simplify validation (We can check that the +//! storage changes are valid based on the action, rather than trying to derive +//! the action from storage changes). When used, the kind is expected to written +//! to under temporary storage (discarded after tx execution and validation). + +use std::fmt; + +use namada_core::address::Address; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::storage::KeySeg; +use namada_core::{address, storage}; + +pub use crate::data::pos::{ + Bond, ClaimRewards, Redelegation, Unbond, Withdraw, +}; + +/// Actions applied from txs. +pub type Actions = Vec; + +/// An action applied from a tx. +#[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] +pub enum Action { + Pos(PosAction), + Gov(GovAction), + Pgf(PgfAction), +} + +/// PoS tx actions. +#[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] +pub enum PosAction { + BecomeValidator(Address), + DeactivateValidator(Address), + ReactivateValidator(Address), + Unjail(Address), + Bond(Bond), + Unbond(Unbond), + Withdraw(Withdraw), + Redelegation(Redelegation), + ClaimRewards(ClaimRewards), + CommissionChange(Address), + MetadataChange(Address), + ConsensusKeyChange(Address), +} + +/// Gov tx actions. +#[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] +pub enum GovAction { + InitProposal { author: Address }, + VoteProposal { id: u64, voter: Address }, +} + +/// PGF tx actions. +#[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] +pub enum PgfAction { + ResignSteward(Address), + UpdateStewardCommission(Address), +} + +/// Read actions from temporary storage +pub trait Read { + /// Storage access errors + type Err: fmt::Debug; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result, Self::Err>; + + /// Read all the actions applied by a tx + fn read_actions(&self) -> Result { + let key = storage_key(); + let actions = self.read_temp(&key)?; + let actions: Actions = actions.unwrap_or_default(); + Ok(actions) + } +} + +/// Write actions to temporary storage +pub trait Write: Read { + fn write_temp( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<(), Self::Err>; + + /// Push an action applied in a tx. + fn push_action(&mut self, action: Action) -> Result<(), Self::Err> { + let key = storage_key(); + let actions = self.read_temp(&key)?; + let mut actions: Actions = actions.unwrap_or_default(); + actions.push(action); + self.write_temp(&key, actions)?; + Ok(()) + } +} + +const TX_ACTIONS_KEY: &str = "tx_actions"; + +fn storage_key() -> storage::Key { + storage::Key::from(address::TEMP_STORAGE.to_db_key()) + .push(&TX_ACTIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} diff --git a/crates/tx/src/data/decrypted.rs b/crates/tx/src/data/decrypted.rs index f9f48db492..6d2d8fc9ac 100644 --- a/crates/tx/src/data/decrypted.rs +++ b/crates/tx/src/data/decrypted.rs @@ -21,7 +21,6 @@ pub mod decrypted_tx { serde::Serialize, serde::Deserialize, )] - #[allow(clippy::large_enum_variant)] /// Holds the result of attempting to decrypt /// a transaction and the data necessary for /// other validators to verify diff --git a/crates/tx/src/data/mod.rs b/crates/tx/src/data/mod.rs index 65e92d939a..f6a26a500d 100644 --- a/crates/tx/src/data/mod.rs +++ b/crates/tx/src/data/mod.rs @@ -17,6 +17,7 @@ use std::collections::BTreeSet; use std::fmt::{self, Display}; use std::str::FromStr; +use bitflags::bitflags; pub use decrypted::*; use namada_core::address::Address; use namada_core::borsh::{ @@ -49,6 +50,7 @@ use crate::data::protocol::ProtocolTx; ToPrimitive, PartialEq, Eq, + Hash, Serialize, Deserialize, )] @@ -63,30 +65,24 @@ pub enum ResultCode { InvalidTx = 2, /// Invalid signature InvalidSig = 3, - /// Tx is in invalid order - InvalidOrder = 4, - /// Tx wasn't expected - ExtraTxs = 5, - /// Undecryptable - Undecryptable = 6, /// The block is full - AllocationError = 7, + AllocationError = 4, /// Replayed tx - ReplayTx = 8, + ReplayTx = 5, /// Invalid chain ID - InvalidChainId = 9, + InvalidChainId = 6, /// Expired tx - ExpiredTx = 10, + ExpiredTx = 7, /// Exceeded gas limit - TxGasLimit = 11, + TxGasLimit = 8, /// Error in paying tx fee - FeeError = 12, + FeeError = 9, /// Invalid vote extension - InvalidVoteExtension = 13, + InvalidVoteExtension = 10, /// Tx is too large - TooLarge = 14, - /// Decrypted tx is expired - ExpiredDecryptedTx = 15, + TooLarge = 11, + /// Tx code is not allowlisted + TxNotAllowlisted = 12, // ========================================================================= // WARN: These codes shouldn't be changed between version! } @@ -99,11 +95,10 @@ impl ResultCode { // NOTE: pattern match on all `ResultCode` variants, in order // to catch potential bugs when adding new codes match self { - Ok | WasmRuntimeError | ExpiredDecryptedTx => true, - InvalidTx | InvalidSig | InvalidOrder | ExtraTxs - | Undecryptable | AllocationError | ReplayTx | InvalidChainId - | ExpiredTx | TxGasLimit | FeeError | InvalidVoteExtension - | TooLarge => false, + Ok | WasmRuntimeError => true, + InvalidTx | InvalidSig | AllocationError | ReplayTx + | InvalidChainId | ExpiredTx | TxGasLimit | FeeError + | InvalidVoteExtension | TooLarge | TxNotAllowlisted => false, } } @@ -179,6 +174,8 @@ pub fn hash_tx(tx_bytes: &[u8]) -> Hash { pub struct TxResult { /// Total gas used by the transaction (includes the gas used by VPs) pub gas_used: Gas, + /// Storage keys touched by the wrapper transaction + pub wrapper_changed_keys: BTreeSet, /// Storage keys touched by the transaction pub changed_keys: BTreeSet, /// The results of all the triggered validity predicates by the transaction @@ -198,6 +195,44 @@ impl TxResult { } } +bitflags! { + /// Validity predicate status flags. + #[derive( + Default, Debug, Clone, Copy, PartialEq, Eq, + PartialOrd, Ord, Hash, Serialize, Deserialize, + )] + pub struct VpStatusFlags: u64 { + /// The transaction had an invalid signature. + const INVALID_SIGNATURE = 0b0000_0001; + } +} + +impl BorshSerialize for VpStatusFlags { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + BorshSerialize::serialize(&self.bits(), writer) + } +} + +impl BorshDeserialize for VpStatusFlags { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + let bits = ::deserialize_reader(reader)?; + VpStatusFlags::from_bits(bits).ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Unexpected VpStatusFlags flag in input", + ) + }) + } +} + +#[cfg(feature = "migrations")] +namada_macros::derive_borshdeserializer!(VpStatusFlags); + /// Result of checking a transaction with validity predicates // TODO derive BorshSchema after #[derive( @@ -219,8 +254,10 @@ pub struct VpsResult { pub gas_used: VpsGas, /// Errors occurred in any of the VPs, if any pub errors: Vec<(Address, String)>, - /// Sentinel to signal an invalid transaction signature - pub invalid_sig: bool, + /// Validity predicate status flags, containing info + /// about conditions that caused their evaluation to + /// fail. + pub status_flags: VpStatusFlags, } impl fmt::Display for TxResult { @@ -305,8 +342,6 @@ pub enum TxType { Raw, /// A Tx that contains an encrypted raw tx Wrapper(Box), - /// An attempted decryption of a wrapper tx - Decrypted(DecryptedTx), /// Txs issued by validators as part of internal protocols Protocol(Box), } @@ -353,7 +388,7 @@ mod test_process_tx { use namada_core::token::{Amount, DenominatedAmount}; use super::*; - use crate::{Code, Data, Section, Signature, Tx, TxError}; + use crate::{Authorization, Code, Data, Section, Tx, TxError}; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; @@ -414,7 +449,7 @@ mod test_process_tx { let data_sec = tx .set_data(Data::new("transaction data".as_bytes().to_owned())) .clone(); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], [(0, gen_keypair())].into_iter().collect(), None, @@ -450,7 +485,7 @@ mod test_process_tx { )))); tx.set_code(Code::new("wasm code".as_bytes().to_owned(), None)); tx.set_data(Data::new("transaction data".as_bytes().to_owned())); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( tx.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -483,74 +518,3 @@ mod test_process_tx { assert_matches!(result, TxError::SigError(_)); } } - -/// Test that process_tx correctly identifies a DecryptedTx -/// with some unsigned data and returns an identical copy -#[test] -fn test_process_tx_decrypted_unsigned() { - use crate::{Code, Data, Tx}; - let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - let code_sec = tx - .set_code(Code::new("transaction data".as_bytes().to_owned(), None)) - .clone(); - let data_sec = tx - .set_data(Data::new("transaction data".as_bytes().to_owned())) - .clone(); - tx.validate_tx().expect("Test failed"); - match tx.header().tx_type { - TxType::Decrypted(DecryptedTx::Decrypted) => { - assert_eq!(tx.header().code_hash, code_sec.get_hash(),); - assert_eq!(tx.header().data_hash, data_sec.get_hash(),); - } - _ => panic!("Test failed"), - } -} - -/// Test that process_tx correctly identifies a DecryptedTx -/// with some signed data and extracts it without checking -/// signature -#[test] -fn test_process_tx_decrypted_signed() { - use namada_core::key::*; - - use crate::{Code, Data, Section, Signature, Tx}; - - fn gen_keypair() -> common::SecretKey { - use rand::prelude::ThreadRng; - use rand::thread_rng; - - let mut rng: ThreadRng = thread_rng(); - ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap() - } - - use namada_core::key::Signature as S; - let mut decrypted = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - // Invalid signed data - let ed_sig = - ed25519::Signature::try_from_slice([0u8; 64].as_ref()).unwrap(); - let mut sig_sec = Signature::new( - vec![decrypted.header_hash()], - [(0, gen_keypair())].into_iter().collect(), - None, - ); - sig_sec - .signatures - .insert(0, common::Signature::try_from_sig(&ed_sig).unwrap()); - decrypted.add_section(Section::Signature(sig_sec)); - // create the tx with signed decrypted data - let code_sec = decrypted - .set_code(Code::new("transaction data".as_bytes().to_owned(), None)) - .clone(); - let data_sec = decrypted - .set_data(Data::new("transaction data".as_bytes().to_owned())) - .clone(); - decrypted.validate_tx().expect("Test failed"); - match decrypted.header().tx_type { - TxType::Decrypted(DecryptedTx::Decrypted) => { - assert_eq!(decrypted.header.code_hash, code_sec.get_hash()); - assert_eq!(decrypted.header.data_hash, data_sec.get_hash()); - } - _ => panic!("Test failed"), - } -} diff --git a/crates/tx/src/data/pgf.rs b/crates/tx/src/data/pgf.rs index fc36642693..72de35f42f 100644 --- a/crates/tx/src/data/pgf.rs +++ b/crates/tx/src/data/pgf.rs @@ -1,7 +1,6 @@ -use std::collections::HashMap; - use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -51,7 +50,7 @@ pub mod tests { ) -> UpdateStewardCommission { UpdateStewardCommission { steward, - commission, + commission: commission.into_iter().collect(), } } } diff --git a/crates/tx/src/data/wrapper.rs b/crates/tx/src/data/wrapper.rs index 13327b5854..2ac990c4bb 100644 --- a/crates/tx/src/data/wrapper.rs +++ b/crates/tx/src/data/wrapper.rs @@ -25,7 +25,7 @@ pub mod wrapper_tx { use sha2::{Digest, Sha256}; use thiserror::Error; - use crate::data::{DecryptedTx, TxType}; + use crate::data::TxType; use crate::{Code, Data, Section, Tx}; /// TODO: Determine a sane number for this @@ -200,6 +200,7 @@ pub mod wrapper_tx { pub pk: common::PublicKey, /// The epoch in which the tx is to be submitted. This determines /// which decryption key will be used + // TODO: Is this still necessary without the DKG? Seems not pub epoch: Epoch, /// Max amount of gas that can be used when executing the inner tx pub gas_limit: GasLimit, @@ -302,8 +303,7 @@ pub mod wrapper_tx { transfer_code_tag: Option, unshield: Transaction, ) -> Result { - let mut tx = - Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); + let mut tx = Tx::from_type(TxType::Raw); let masp_section = tx.add_section(Section::MaspTx(unshield)); let masp_hash = Hash( masp_section diff --git a/crates/tx/src/event.rs b/crates/tx/src/event.rs new file mode 100644 index 0000000000..b0ad629541 --- /dev/null +++ b/crates/tx/src/event.rs @@ -0,0 +1,47 @@ +//! Transaction events. + +use namada_core::event::extend::{ + ComposeEvent, ExtendEvent, Height, Log, TxHash, +}; +use namada_core::event::Event; + +use super::Tx; +use crate::data::{ResultCode, TxResult}; +use crate::TxType; + +/// Creates a new event with the hash and height of the transaction +/// already filled in +pub fn new_tx_event(tx: &Tx, height: u64) -> Event { + let base_event = match tx.header().tx_type { + TxType::Wrapper(_) | TxType::Protocol(_) => { + Event::applied_tx().with(TxHash(tx.header_hash())) + } + _ => unreachable!(), + }; + base_event + .with(Height(height.into())) + .with(Log(String::new())) + .into() +} + +/// Extend an [`Event`] with result code data. +pub struct Code(pub ResultCode); + +impl ExtendEvent for Code { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(code) = self; + event["code"] = code.into(); + } +} + +/// Extend an [`Event`] with inner tx data. +pub struct InnerTx<'result>(pub &'result TxResult); + +impl ExtendEvent for InnerTx<'_> { + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(tx_result) = self; + event["inner_tx"] = tx_result.to_string(); + } +} diff --git a/crates/tx/src/lib.rs b/crates/tx/src/lib.rs index 6c866e8513..bc53e9904f 100644 --- a/crates/tx/src/lib.rs +++ b/crates/tx/src/lib.rs @@ -1,63 +1,21 @@ #![allow(missing_docs)] +pub mod action; pub mod data; +pub mod event; pub mod proto; mod types; -use std::collections::HashMap; - use data::TxType; -use namada_core::event::{Event, EventLevel, EventType}; +pub use event::new_tx_event; pub use namada_core::key::SignableEthMessage; pub use namada_core::sign::SignatureIndex; pub use types::{ - standalone_signature, verify_standalone_sig, Code, Commitment, - CompressedSignature, Data, DecodeError, Header, MaspBuilder, Memo, Section, - Signature, Signed, Signer, Tx, TxError, VerifySigError, + standalone_signature, verify_standalone_sig, Authorization, Code, + Commitment, CompressedSignature, Data, DecodeError, Header, MaspBuilder, + Memo, Section, Signed, Signer, Tx, TxError, VerifySigError, }; -/// Creates a new event with the hash and height of the transaction -/// already filled in -pub fn new_tx_event(tx: &Tx, height: u64) -> Event { - let mut event = match tx.header().tx_type { - TxType::Wrapper(_) => { - let mut event = Event { - event_type: EventType::Accepted, - level: EventLevel::Tx, - attributes: HashMap::new(), - }; - event["hash"] = tx.header_hash().to_string(); - event - } - TxType::Decrypted(_) => { - let mut event = Event { - event_type: EventType::Applied, - level: EventLevel::Tx, - attributes: HashMap::new(), - }; - event["hash"] = tx - .clone() - .update_header(TxType::Raw) - .header_hash() - .to_string(); - event - } - TxType::Protocol(_) => { - let mut event = Event { - event_type: EventType::Applied, - level: EventLevel::Tx, - attributes: HashMap::new(), - }; - event["hash"] = tx.header_hash().to_string(); - event - } - _ => unreachable!(), - }; - event["height"] = height.to_string(); - event["log"] = "".to_string(); - event -} - #[cfg(test)] mod tests { use data_encoding::HEXLOWER; diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 1847b577ff..7a19c2570b 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; @@ -16,6 +16,7 @@ use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; use namada_core::chain::ChainId; +use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; use namada_core::masp::AssetData; use namada_core::sign::SignatureIndex; @@ -30,7 +31,7 @@ use sha2::{Digest, Sha256}; use thiserror::Error; use crate::data::protocol::ProtocolTx; -use crate::data::{hash_tx, DecryptedTx, Fee, GasLimit, TxType, WrapperTx}; +use crate::data::{hash_tx, Fee, GasLimit, TxType, WrapperTx}; use crate::proto; /// Represents an error in signature verification @@ -232,7 +233,13 @@ impl Data { /// Make a new data section with the given bytes pub fn new(data: Vec) -> Self { Self { - salt: DateTimeUtc::now().0.timestamp_millis().to_le_bytes(), + salt: { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + } + .0 + .timestamp_millis() + .to_le_bytes(), data, } } @@ -331,7 +338,13 @@ impl Code { /// Make a new code section with the given bytes pub fn new(code: Vec, tag: Option) -> Self { Self { - salt: DateTimeUtc::now().0.timestamp_millis().to_le_bytes(), + salt: { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + } + .0 + .timestamp_millis() + .to_le_bytes(), code: Commitment::Id(code), tag, } @@ -343,7 +356,13 @@ impl Code { tag: Option, ) -> Self { Self { - salt: DateTimeUtc::now().0.timestamp_millis().to_le_bytes(), + salt: { + #[allow(clippy::disallowed_methods)] + DateTimeUtc::now() + } + .0 + .timestamp_millis() + .to_le_bytes(), code: Commitment::Hash(hash), tag, } @@ -389,7 +408,7 @@ pub enum Signer { Serialize, Deserialize, )] -pub struct Signature { +pub struct Authorization { /// The hash of the section being signed pub targets: Vec, /// The public keys against which the signatures should be verified @@ -398,7 +417,7 @@ pub struct Signature { pub signatures: BTreeMap, } -impl Signature { +impl Authorization { /// Sign the given section hash with the given key and return a section pub fn new( targets: Vec, @@ -548,7 +567,7 @@ impl CompressedSignature { /// Decompress this signature object with respect to the given transaction /// by looking up the necessary section hashes. Used by constrained hardware /// wallets. - pub fn expand(self, tx: &Tx) -> Signature { + pub fn expand(self, tx: &Tx) -> Authorization { let mut targets = Vec::new(); for idx in self.targets { if idx == 0 { @@ -561,7 +580,7 @@ impl CompressedSignature { targets.push(tx.sections[idx as usize - 1].get_hash()); } } - Signature { + Authorization { targets, signer: self.signer, signatures: self.signatures, @@ -734,7 +753,7 @@ pub enum Section { /// Transaction code. Sending to hardware wallets optional Code(Code), /// A transaction header/protocol signature - Signature(Signature), + Authorization(Authorization), /// Ciphertext obtained by encrypting arbitrary transaction sections Ciphertext(Ciphertext), /// Embedded MASP transaction section @@ -762,7 +781,7 @@ impl Section { Self::Data(data) => data.hash(hasher), Self::ExtraData(extra) => extra.hash(hasher), Self::Code(code) => code.hash(hasher), - Self::Signature(signature) => signature.hash(hasher), + Self::Authorization(signature) => signature.hash(hasher), Self::Ciphertext(ct) => ct.hash(hasher), Self::MaspBuilder(mb) => mb.hash(hasher), Self::MaspTx(tx) => { @@ -826,8 +845,8 @@ impl Section { } /// Extract the signature from this section if possible - pub fn signature(&self) -> Option { - if let Self::Signature(data) = self { + pub fn signature(&self) -> Option { + if let Self::Authorization(data) = self { Some(data.clone()) } else { None @@ -901,6 +920,7 @@ impl Header { tx_type, chain_id: ChainId::default(), expiration: None, + #[allow(clippy::disallowed_methods)] timestamp: DateTimeUtc::now(), code_hash: namada_core::hash::Hash::default(), data_hash: namada_core::hash::Hash::default(), @@ -923,15 +943,6 @@ impl Header { } } - /// Get the decrypted header if it is present - pub fn decrypted(&self) -> Option { - if let TxType::Decrypted(decrypted) = &self.tx_type { - Some(decrypted.clone()) - } else { - None - } - } - /// Get the protocol header if it is present pub fn protocol(&self) -> Option { if let TxType::Protocol(protocol) = &self.tx_type { @@ -1204,7 +1215,7 @@ impl Tx { threshold: u8, max_signatures: Option, mut consume_verify_sig_gas: F, - ) -> std::result::Result, VerifySigError> + ) -> std::result::Result, VerifySigError> where F: FnMut() -> std::result::Result<(), namada_gas::Error>, { @@ -1215,7 +1226,7 @@ impl Tx { let mut witnesses = Vec::new(); for section in &self.sections { - if let Section::Signature(signatures) = section { + if let Section::Authorization(signatures) = section { // Check that the hashes being checked are a subset of those in // this section. Also ensure that all the sections the signature // signs over are present. @@ -1272,7 +1283,7 @@ impl Tx { &self, public_key: &common::PublicKey, hashes: &[namada_core::hash::Hash], - ) -> Result<&Signature, VerifySigError> { + ) -> Result<&Authorization, VerifySigError> { self.verify_signatures( hashes, AccountPublicKeysMap::from_iter([public_key.clone()]), @@ -1293,7 +1304,7 @@ impl Tx { ) -> Vec { let targets = vec![self.raw_header_hash()]; let mut signatures = Vec::new(); - let section = Signature::new( + let section = Authorization::new( targets, public_keys_index_map.index_secret_keys(secret_keys.to_vec()), signer, @@ -1339,7 +1350,7 @@ impl Tx { /// 2. The signature is valid pub fn validate_tx( &self, - ) -> std::result::Result, TxError> { + ) -> std::result::Result, TxError> { match &self.header.tx_type { // verify signature and extract signed data TxType::Wrapper(wrapper) => self @@ -1361,8 +1372,6 @@ impl Tx { err )) }), - // we extract the signed data, but don't check the signature - TxType::Decrypted(_) => Ok(None), // return as is TxType::Raw => Ok(None), } @@ -1512,7 +1521,7 @@ impl Tx { /// Add fee payer keypair to the tx builder pub fn sign_wrapper(&mut self, keypair: common::SecretKey) -> &mut Self { self.protocol_filter(); - self.add_section(Section::Signature(Signature::new( + self.add_section(Section::Authorization(Authorization::new( self.sechashes(), [(0, keypair)].into_iter().collect(), None, @@ -1537,7 +1546,7 @@ impl Tx { (0..).zip(keypairs).collect() }; - self.add_section(Section::Signature(Signature::new( + self.add_section(Section::Authorization(Authorization::new( hashes, secret_keys, signer, @@ -1551,7 +1560,7 @@ impl Tx { signatures: Vec, ) -> &mut Self { self.protocol_filter(); - let mut pk_section = Signature { + let mut pk_section = Authorization { targets: vec![self.raw_header_hash()], signatures: BTreeMap::new(), signer: Signer::PubKeys(vec![]), @@ -1562,10 +1571,12 @@ impl Tx { if let Some((addr, idx)) = &signature.index { // Add the signature under the given multisig address let section = - sections.entry(addr.clone()).or_insert_with(|| Signature { - targets: vec![self.raw_header_hash()], - signatures: BTreeMap::new(), - signer: Signer::Address(addr.clone()), + sections.entry(addr.clone()).or_insert_with(|| { + Authorization { + targets: vec![self.raw_header_hash()], + signatures: BTreeMap::new(), + signer: Signer::Address(addr.clone()), + } }); section.signatures.insert(*idx, signature.signature); } else if let Signer::PubKeys(pks) = &mut pk_section.signer { @@ -1578,7 +1589,7 @@ impl Tx { } for section in std::iter::once(pk_section).chain(sections.into_values()) { - self.add_section(Section::Signature(section)); + self.add_section(Section::Authorization(section)); } self } diff --git a/crates/tx_env/src/lib.rs b/crates/tx_env/src/lib.rs index 25242bfacf..d9fc090eae 100644 --- a/crates/tx_env/src/lib.rs +++ b/crates/tx_env/src/lib.rs @@ -2,20 +2,44 @@ //! inside a tx. use namada_core::address::Address; -use namada_core::borsh::BorshSerialize; +use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; use namada_core::ibc::IbcEvent; use namada_core::storage; -use namada_storage::{Result, StorageRead, StorageWrite}; +use namada_storage::{Result, ResultExt, StorageRead, StorageWrite}; /// Transaction host functions pub trait TxEnv: StorageRead + StorageWrite { + /// Storage read temporary state Borsh encoded value (after tx execution). + /// It will try to read from only the write log and then decode it if + /// found. + fn read_temp( + &self, + key: &storage::Key, + ) -> Result> { + let bytes = self.read_bytes_temp(key)?; + match bytes { + Some(bytes) => { + let val = T::try_from_slice(&bytes).into_storage_result()?; + Ok(Some(val)) + } + None => Ok(None), + } + } + + /// Storage read temporary state raw bytes (after tx execution). It will try + /// to read from only the write log. + fn read_bytes_temp(&self, key: &storage::Key) -> Result>>; + /// Write a temporary value to be encoded with Borsh at the given key to /// storage. fn write_temp( &mut self, key: &storage::Key, val: T, - ) -> Result<()>; + ) -> Result<()> { + let bytes = val.serialize_to_vec(); + self.write_bytes_temp(key, bytes) + } /// Write a temporary value as bytes at the given key to storage. fn write_bytes_temp( @@ -38,6 +62,7 @@ pub trait TxEnv: StorageRead + StorageWrite { &mut self, code_hash: impl AsRef<[u8]>, code_tag: &Option, + entropy_source: &[u8], ) -> Result
; /// Update a validity predicate diff --git a/crates/tx_prelude/src/account.rs b/crates/tx_prelude/src/account.rs index dacb659f65..96effa7ae9 100644 --- a/crates/tx_prelude/src/account.rs +++ b/crates/tx_prelude/src/account.rs @@ -2,17 +2,16 @@ pub use namada_account::*; use super::*; +#[inline] pub fn init_account( ctx: &mut Ctx, owner: &Address, data: InitAccount, -) -> EnvResult
{ +) -> EnvResult<()> { namada_account::init_account_storage( ctx, owner, &data.public_keys, data.threshold, - )?; - - Ok(owner.to_owned()) + ) } diff --git a/crates/tx_prelude/src/ibc.rs b/crates/tx_prelude/src/ibc.rs index 28d41d6f4b..96c467c447 100644 --- a/crates/tx_prelude/src/ibc.rs +++ b/crates/tx_prelude/src/ibc.rs @@ -1,27 +1,35 @@ //! IBC lower-level functions for transactions. use std::cell::RefCell; +use std::collections::BTreeSet; use std::rc::Rc; use namada_core::address::{Address, InternalAddress}; -pub use namada_core::ibc::{IbcEvent, IbcShieldedTransfer}; -use namada_core::token::DenominatedAmount; +pub use namada_core::ibc::IbcEvent; +use namada_core::token::Amount; pub use namada_ibc::storage::{ibc_token, is_ibc_key}; pub use namada_ibc::{ - IbcActions, IbcCommonContext, IbcStorageContext, ProofSpec, TransferModule, + IbcActions, IbcCommonContext, IbcStorageContext, NftTransferModule, + ProofSpec, TransferModule, }; -use namada_token::denom_to_amount; use namada_tx_env::TxEnv; use crate::token::{burn, mint, transfer}; use crate::{Ctx, Error}; -/// IBC actions to handle an IBC message -pub fn ibc_actions(ctx: &mut Ctx) -> IbcActions { +/// IBC actions to handle an IBC message. The `verifiers` inserted into the set +/// must be inserted into the tx context with `Ctx::insert_verifier` after tx +/// execution. +pub fn ibc_actions( + ctx: &mut Ctx, + verifiers: Rc>>, +) -> IbcActions { let ctx = Rc::new(RefCell::new(ctx.clone())); - let mut actions = IbcActions::new(ctx.clone()); - let module = TransferModule::new(ctx); - actions.add_transfer_module(module.module_id(), module); + let mut actions = IbcActions::new(ctx.clone(), verifiers.clone()); + let module = TransferModule::new(ctx.clone(), verifiers); + actions.add_transfer_module(module); + let module = NftTransferModule::new(ctx); + actions.add_transfer_module(module); actions } @@ -45,7 +53,7 @@ impl IbcStorageContext for Ctx { src: &Address, dest: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> std::result::Result<(), Error> { transfer(self, src, dest, token, amount) } @@ -63,14 +71,14 @@ impl IbcStorageContext for Ctx { &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), Error> { mint( self, &Address::Internal(InternalAddress::Ibc), target, token, - denom_to_amount(amount, token, self)?, + amount, ) } @@ -78,9 +86,9 @@ impl IbcStorageContext for Ctx { &mut self, target: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> Result<(), Error> { - burn(self, target, token, denom_to_amount(amount, token, self)?) + burn(self, target, token, amount) } fn log_string(&self, message: String) { diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index b83f2cf345..c178a6e065 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -38,7 +38,7 @@ pub use namada_storage::{ collections, iter_prefix, iter_prefix_bytes, Error, OptionExt, ResultExt, StorageRead, StorageWrite, }; -pub use namada_tx::{data as transaction, Section, Tx}; +pub use namada_tx::{action, data as transaction, Section, Tx}; pub use namada_tx_env::TxEnv; use namada_vm_env::tx::*; use namada_vm_env::{read_from_buffer, read_key_val_bytes_from_buffer}; @@ -102,6 +102,14 @@ impl Ctx { pub const unsafe fn new() -> Self { Self(()) } + + /// Yield a byte array value back to the host environment. + pub fn yield_value>(&self, value: V) { + let value = value.as_ref(); + unsafe { + namada_tx_yield_value(value.as_ptr() as _, value.len() as _); + } + } } /// Result of `TxEnv`, `namada_storage::StorageRead` or @@ -252,13 +260,14 @@ impl StorageWrite for Ctx { } impl TxEnv for Ctx { - fn write_temp( - &mut self, + fn read_bytes_temp( + &self, key: &storage::Key, - val: T, - ) -> Result<(), Error> { - let buf = val.serialize_to_vec(); - self.write_bytes_temp(key, buf) + ) -> Result>, Error> { + let key = key.to_string(); + let read_result = + unsafe { namada_tx_read_temp(key.as_ptr() as _, key.len() as _) }; + Ok(read_from_buffer(read_result, namada_tx_result_buffer)) } fn write_bytes_temp( @@ -290,6 +299,7 @@ impl TxEnv for Ctx { &mut self, code_hash: impl AsRef<[u8]>, code_tag: &Option, + entropy_source: &[u8], ) -> Result { let code_hash = code_hash.as_ref(); let code_tag = code_tag.serialize_to_vec(); @@ -300,6 +310,8 @@ impl TxEnv for Ctx { code_hash.len() as _, code_tag.as_ptr() as _, code_tag.len() as _, + entropy_source.as_ptr() as _, + entropy_source.len() as _, result.as_ptr() as _, ) }; @@ -371,10 +383,38 @@ impl TxEnv for Ctx { } } +impl namada_tx::action::Read for Ctx { + type Err = Error; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result, Self::Err> { + TxEnv::read_temp(self, key) + } +} + +impl namada_tx::action::Write for Ctx { + fn write_temp( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<(), Self::Err> { + TxEnv::write_temp(self, key, val) + } +} + /// Execute IBC tx. // Temp. workaround for -pub fn tx_ibc_execute() { - unsafe { namada_tx_ibc_execute() } +pub fn tx_ibc_execute() -> Result, Error> { + let result = unsafe { namada_tx_ibc_execute() }; + match read_from_buffer(result, namada_tx_result_buffer) { + Some(value) => { + Ok(Option::::try_from_slice(&value[..]) + .expect("The conversion shouldn't fail")) + } + None => Ok(None), + } } /// Verify section signatures against the given list of keys diff --git a/crates/tx_prelude/src/proof_of_stake.rs b/crates/tx_prelude/src/proof_of_stake.rs index 8e372722c7..e15d0e82da 100644 --- a/crates/tx_prelude/src/proof_of_stake.rs +++ b/crates/tx_prelude/src/proof_of_stake.rs @@ -12,7 +12,10 @@ use namada_proof_of_stake::{ redelegate_tokens, unbond_tokens, unjail_validator, withdraw_tokens, }; pub use namada_proof_of_stake::{parameters, types}; -use namada_tx::data::pos::BecomeValidator; +use namada_tx::action::{ + Action, ClaimRewards, PosAction, Redelegation, Unbond, Withdraw, Write, +}; +use namada_tx::data::pos::{BecomeValidator, Bond}; use super::*; @@ -26,6 +29,16 @@ impl Ctx { validator: &Address, amount: token::Amount, ) -> TxResult { + // The tx must be authorized by the source address + let verifier = source.as_ref().unwrap_or(&validator); + self.insert_verifier(verifier)?; + + self.push_action(Action::Pos(PosAction::Bond(Bond { + validator: validator.clone(), + amount, + source: source.cloned(), + })))?; + let current_epoch = self.get_block_epoch()?; bond_tokens(self, source, validator, amount, current_epoch, None) } @@ -39,6 +52,16 @@ impl Ctx { validator: &Address, amount: token::Amount, ) -> EnvResult { + // The tx must be authorized by the source address + let verifier = source.as_ref().unwrap_or(&validator); + self.insert_verifier(verifier)?; + + self.push_action(Action::Pos(PosAction::Unbond(Unbond { + validator: validator.clone(), + amount, + source: source.cloned(), + })))?; + let current_epoch = self.get_block_epoch()?; unbond_tokens(self, source, validator, amount, current_epoch, false) } @@ -51,6 +74,15 @@ impl Ctx { source: Option<&Address>, validator: &Address, ) -> EnvResult { + // The tx must be authorized by the source address + let verifier = source.as_ref().unwrap_or(&validator); + self.insert_verifier(verifier)?; + + self.push_action(Action::Pos(PosAction::Withdraw(Withdraw { + validator: validator.clone(), + source: source.cloned(), + })))?; + let current_epoch = self.get_block_epoch()?; withdraw_tokens(self, source, validator, current_epoch) } @@ -61,6 +93,13 @@ impl Ctx { validator: &Address, consensus_key: &common::PublicKey, ) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(validator)?; + + self.push_action(Action::Pos(PosAction::ConsensusKeyChange( + validator.clone(), + )))?; + let current_epoch = self.get_block_epoch()?; change_consensus_key(self, validator, consensus_key, current_epoch) } @@ -71,12 +110,24 @@ impl Ctx { validator: &Address, rate: &Dec, ) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(validator)?; + + self.push_action(Action::Pos(PosAction::CommissionChange( + validator.clone(), + )))?; + let current_epoch = self.get_block_epoch()?; change_validator_commission_rate(self, validator, *rate, current_epoch) } /// Unjail a jailed validator and re-enter the validator sets. pub fn unjail_validator(&mut self, validator: &Address) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(validator)?; + + self.push_action(Action::Pos(PosAction::Unjail(validator.clone())))?; + let current_epoch = self.get_block_epoch()?; unjail_validator(self, validator, current_epoch) } @@ -89,6 +140,16 @@ impl Ctx { dest_validator: &Address, amount: token::Amount, ) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(owner)?; + + self.push_action(Action::Pos(PosAction::Redelegation(Redelegation { + src_validator: src_validator.clone(), + dest_validator: dest_validator.clone(), + owner: owner.clone(), + amount, + })))?; + let current_epoch = self.get_block_epoch()?; redelegate_tokens( self, @@ -106,6 +167,15 @@ impl Ctx { source: Option<&Address>, validator: &Address, ) -> EnvResult { + // The tx must be authorized by the source address + let verifier = source.as_ref().unwrap_or(&validator); + self.insert_verifier(verifier)?; + + self.push_action(Action::Pos(PosAction::ClaimRewards(ClaimRewards { + validator: validator.clone(), + source: source.cloned(), + })))?; + let current_epoch = self.get_block_epoch()?; claim_reward_tokens(self, source, validator, current_epoch) } @@ -134,6 +204,13 @@ impl Ctx { let eth_hot_key = key::common::PublicKey::Secp256k1(eth_hot_key); let params = read_pos_params(self)?; + // The tx must be authorized by the source address + self.insert_verifier(&address)?; + + self.push_action(Action::Pos(PosAction::BecomeValidator( + address.clone(), + )))?; + become_validator( self, namada_proof_of_stake::BecomeValidator { @@ -162,12 +239,26 @@ impl Ctx { /// Deactivate validator pub fn deactivate_validator(&mut self, validator: &Address) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(validator)?; + + self.push_action(Action::Pos(PosAction::DeactivateValidator( + validator.clone(), + )))?; + let current_epoch = self.get_block_epoch()?; deactivate_validator(self, validator, current_epoch) } /// Reactivate validator pub fn reactivate_validator(&mut self, validator: &Address) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(validator)?; + + self.push_action(Action::Pos(PosAction::ReactivateValidator( + validator.clone(), + )))?; + let current_epoch = self.get_block_epoch()?; reactivate_validator(self, validator, current_epoch) } @@ -184,6 +275,13 @@ impl Ctx { avatar: Option, commission_rate: Option, ) -> TxResult { + // The tx must be authorized by the source address + self.insert_verifier(validator)?; + + self.push_action(Action::Pos(PosAction::MetadataChange( + validator.clone(), + )))?; + let current_epoch = self.get_block_epoch()?; change_validator_metadata( self, diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index a42794e8bb..15de904919 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -4,6 +4,7 @@ use namada_proof_of_stake::token::storage_key::{ }; use namada_storage::{Error as StorageError, ResultExt}; pub use namada_token::*; +use namada_tx_env::TxEnv; use crate::{Ctx, StorageRead, StorageWrite, TxResult}; @@ -14,9 +15,17 @@ pub fn transfer( src: &Address, dest: &Address, token: &Address, - amount: DenominatedAmount, + amount: Amount, ) -> TxResult { - let amount = denom_to_amount(amount, token, ctx)?; + // The tx must be authorized by the source address + ctx.insert_verifier(src)?; + if token.is_internal() { + // Established address tokens do not have VPs themselves, their + // validation is handled by the `Multitoken` internal address, but + // internal token addresses have to verify the transfer + ctx.insert_verifier(token)?; + } + if amount != Amount::default() && src != dest { let src_key = balance_key(token, src); let dest_key = balance_key(token, dest); @@ -41,6 +50,15 @@ pub fn undenominated_transfer( token: &Address, amount: Amount, ) -> TxResult { + // The tx must be authorized by the source address + ctx.insert_verifier(src)?; + if token.is_internal() { + // Established address tokens do not have VPs themselves, their + // validation is handled by the `Multitoken` internal address, but + // internal token addresses have to verify the transfer + ctx.insert_verifier(token)?; + } + if amount != Amount::default() && src != dest { let src_key = balance_key(token, src); let dest_key = balance_key(token, dest); diff --git a/crates/vm_env/src/lib.rs b/crates/vm_env/src/lib.rs index a8b15a90d4..7a5e4afb42 100644 --- a/crates/vm_env/src/lib.rs +++ b/crates/vm_env/src/lib.rs @@ -5,8 +5,6 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -use std::mem::ManuallyDrop; - use borsh::BorshDeserialize; use namada_core::internal::{HostEnvResult, KeyVal}; @@ -22,6 +20,13 @@ pub mod tx { // its size. pub fn namada_tx_read(key_ptr: u64, key_len: u64) -> i64; + // Read variable-length temporary state when we don't know the size + // up-front, returns the size of the value (can be 0), or -1 if + // the key is not present. If a value is found, it will be placed in the + // result buffer, because we cannot allocate a buffer for it before + // we know its size. + pub fn namada_tx_read_temp(key_ptr: u64, key_len: u64) -> i64; + // Read a value from result buffer. pub fn namada_tx_result_buffer(result_ptr: u64); @@ -76,6 +81,8 @@ pub mod tx { code_hash_len: u64, code_tag_ptr: u64, code_tag_len: u64, + entropy_source_ptr: u64, + entropy_source_len: u64, result_ptr: u64, ); @@ -120,7 +127,7 @@ pub mod tx { /// Execute IBC tx. // Temp. workaround for - pub fn namada_tx_ibc_execute(); + pub fn namada_tx_ibc_execute() -> i64; /// Set the sentinel for a wrong tx section commitment pub fn namada_tx_set_commitment_sentinel(); @@ -141,6 +148,9 @@ pub mod tx { transaction_ptr: u64, transaction_len: u64, ) -> i64; + + // Yield a byte array value back to the host. + pub fn namada_tx_yield_value(buf_ptr: u64, buf_len: u64); } } @@ -233,6 +243,9 @@ pub mod vp { event_type_len: u64, ) -> i64; + // Yield a byte array value back to the host. + pub fn namada_vp_yield_value(buf_ptr: u64, buf_len: u64); + // Requires a node running with "Info" log level pub fn namada_vp_log_string(str_ptr: u64, str_len: u64); @@ -247,7 +260,7 @@ pub mod vp { threshold: u8, max_signatures_ptr: u64, max_signatures_len: u64, - ) -> i64; + ); pub fn namada_vp_eval( vp_code_hash_ptr: u64, @@ -276,16 +289,10 @@ pub fn read_from_buffer( if HostEnvResult::is_fail(read_result) { None } else { - let result: Vec = Vec::with_capacity(read_result as _); - // The `result` will be dropped from the `target`, which is - // reconstructed from the same memory - let result = ManuallyDrop::new(result); + let result = vec![0u8; read_result as _]; let offset = result.as_slice().as_ptr() as u64; unsafe { result_buffer(offset) }; - let target = unsafe { - Vec::from_raw_parts(offset as _, read_result as _, read_result as _) - }; - Some(target) + Some(result) } } diff --git a/crates/vote_ext/src/bridge_pool_roots.rs b/crates/vote_ext/src/bridge_pool_roots.rs index e7b3d8ee6c..5a36d5fb3e 100644 --- a/crates/vote_ext/src/bridge_pool_roots.rs +++ b/crates/vote_ext/src/bridge_pool_roots.rs @@ -2,11 +2,11 @@ //! of the bridge pool merkle root to be added //! to storage. This will be used to generate //! bridge pool inclusion proofs for Ethereum. -use std::collections::HashSet; use std::ops::{Deref, DerefMut}; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::collections::HashSet; use namada_core::key::common; use namada_core::key::common::Signature; use namada_core::storage::BlockHeight; @@ -123,7 +123,7 @@ impl DerefMut for MultiSignedVext { } impl IntoIterator for MultiSignedVext { - type IntoIter = std::collections::hash_set::IntoIter; + type IntoIter = namada_core::collections::hash_set::IntoIter; type Item = SignedVext; fn into_iter(self) -> Self::IntoIter { diff --git a/crates/vote_ext/src/ethereum_events.rs b/crates/vote_ext/src/ethereum_events.rs index 4b930659e4..683705f275 100644 --- a/crates/vote_ext/src/ethereum_events.rs +++ b/crates/vote_ext/src/ethereum_events.rs @@ -1,11 +1,12 @@ //! Contains types necessary for processing Ethereum events //! in vote extensions. -use std::collections::{BTreeSet, HashMap}; +use std::collections::BTreeSet; use std::ops::Deref; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::ethereum_events::EthereumEvent; use namada_core::key::common::{self, Signature}; use namada_core::storage::BlockHeight; diff --git a/crates/vote_ext/src/lib.rs b/crates/vote_ext/src/lib.rs index b64695d147..60f7208da9 100644 --- a/crates/vote_ext/src/lib.rs +++ b/crates/vote_ext/src/lib.rs @@ -14,7 +14,7 @@ use namada_macros::BorshDeserializer; use namada_migrations::*; use namada_tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_tx::data::TxType; -use namada_tx::{Signature, Signed, Tx, TxError}; +use namada_tx::{Authorization, Signed, Tx, TxError}; /// This type represents the data we pass to the extension of /// a vote at the PreCommit phase of Tendermint. @@ -151,11 +151,13 @@ impl EthereumTxData { }))); outer_tx.header.chain_id = chain_id; outer_tx.set_data(namada_tx::Data::new(tx_data)); - outer_tx.add_section(namada_tx::Section::Signature(Signature::new( - outer_tx.sechashes(), - [(0, signing_key.clone())].into_iter().collect(), - None, - ))); + outer_tx.add_section(namada_tx::Section::Authorization( + Authorization::new( + outer_tx.sechashes(), + [(0, signing_key.clone())].into_iter().collect(), + None, + ), + )); outer_tx } diff --git a/crates/vote_ext/src/validator_set_update.rs b/crates/vote_ext/src/validator_set_update.rs index 21ca96b220..775acb9562 100644 --- a/crates/vote_ext/src/validator_set_update.rs +++ b/crates/vote_ext/src/validator_set_update.rs @@ -1,11 +1,11 @@ //! Contains types necessary for processing validator set updates //! in vote extensions. use std::cmp::Ordering; -use std::collections::HashMap; use std::ops::Deref; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::eth_abi::{AbiEncode, Encode, Token}; use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; diff --git a/crates/vp_env/Cargo.toml b/crates/vp_env/Cargo.toml index c3a39302f8..108a654773 100644 --- a/crates/vp_env/Cargo.toml +++ b/crates/vp_env/Cargo.toml @@ -16,6 +16,7 @@ version.workspace = true namada_core = { path = "../core" } namada_storage = { path = "../storage" } namada_tx = { path = "../tx" } +namada_ibc = { path = "../ibc" } derivative.workspace = true masp_primitives.workspace = true diff --git a/crates/vp_env/src/collection_validation/lazy_map.rs b/crates/vp_env/src/collection_validation/lazy_map.rs index 80b83d4cd3..6ff0999786 100644 --- a/crates/vp_env/src/collection_validation/lazy_map.rs +++ b/crates/vp_env/src/collection_validation/lazy_map.rs @@ -2,9 +2,9 @@ use core::fmt::Debug; use core::hash::Hash; -use std::collections::HashMap; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::storage; use namada_storage::collections::lazy_map::{LazyMap, NestedSubKey, SubKey}; use namada_storage::collections::{Nested, Simple}; diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index 3cbc515343..c57295c1cc 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -8,14 +8,12 @@ use masp_primitives::transaction::Transaction; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; use namada_core::hash::Hash; -use namada_core::ibc::{ - get_shielded_transfer, IbcEvent, MsgShieldedTransfer, EVENT_TYPE_PACKET, -}; use namada_core::storage::{ BlockHash, BlockHeight, Epoch, Epochs, Header, Key, TxIndex, }; use namada_core::token::Transfer; -use namada_storage::{OptionExt, ResultExt, StorageRead}; +use namada_ibc::{decode_message, IbcEvent, IbcMessage}; +use namada_storage::{OptionExt, StorageRead}; use namada_tx::Tx; /// Validity predicate's environment is available for native VPs and WASM VPs @@ -108,7 +106,7 @@ where &self, vp_code: Hash, input_data: Tx, - ) -> Result; + ) -> Result<(), namada_storage::Error>; /// Get a tx hash fn get_tx_code_hash(&self) -> Result, namada_storage::Error>; @@ -120,37 +118,31 @@ where ) -> Result { let signed = tx_data; let data = signed.data().ok_or_err_msg("No transaction data")?; - if let Ok(transfer) = Transfer::try_from_slice(&data) { - let shielded_hash = transfer - .shielded - .ok_or_err_msg("unable to find shielded hash")?; - let masp_tx = signed - .get_section(&shielded_hash) - .and_then(|x| x.as_ref().masp_tx()) - .ok_or_err_msg("unable to find shielded section")?; - return Ok(masp_tx); - } - - if let Ok(message) = MsgShieldedTransfer::try_from_slice(&data) { - return Ok(message.shielded_transfer.masp_tx); - } - - // Shielded transfer over IBC - let events = self.get_ibc_events(EVENT_TYPE_PACKET.to_string())?; - // The receiving event should be only one in the single IBC transaction - let event = events.first().ok_or_else(|| { - namada_storage::Error::new_const( - "No IBC event for the shielded action", - ) - })?; - get_shielded_transfer(event) - .into_storage_result()? - .map(|shielded| shielded.masp_tx) - .ok_or_else(|| { - namada_storage::Error::new_const( - "No shielded transfer in the IBC event", - ) - }) + let transfer = match Transfer::try_from_slice(&data) { + Ok(transfer) => Some(transfer), + Err(_) => { + match decode_message(&data).map_err(|_| { + namada_storage::Error::new_const("Unknown IBC message") + })? { + IbcMessage::Transfer(msg) => msg.transfer, + IbcMessage::NftTransfer(msg) => msg.transfer, + IbcMessage::RecvPacket(msg) => msg.transfer, + IbcMessage::AckPacket(msg) => msg.transfer, + IbcMessage::Timeout(msg) => msg.transfer, + IbcMessage::Envelope(_) => None, + } + } + }; + + let shielded_hash = transfer + .ok_or_err_msg("Missing transfer")? + .shielded + .ok_or_err_msg("unable to find shielded hash")?; + let masp_tx = signed + .get_section(&shielded_hash) + .and_then(|x| x.as_ref().masp_tx()) + .ok_or_err_msg("unable to find shielded section")?; + Ok(masp_tx) } /// Charge the provided gas for the current vp diff --git a/crates/vp_prelude/src/lib.rs b/crates/vp_prelude/src/lib.rs index 098d328e26..a23a5fbe72 100644 --- a/crates/vp_prelude/src/lib.rs +++ b/crates/vp_prelude/src/lib.rs @@ -13,7 +13,7 @@ pub mod ibc { // used in the VP input use core::slice; -pub use std::collections::{BTreeSet, HashSet}; +pub use std::collections::BTreeSet; use std::marker::PhantomData; pub use namada_core::address::Address; @@ -21,17 +21,20 @@ pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, }; use namada_core::chain::CHAIN_ID_LENGTH; +pub use namada_core::collections::HashSet; use namada_core::hash::{Hash, HASH_LENGTH}; use namada_core::internal::HostEnvResult; use namada_core::storage::{ BlockHash, BlockHeight, Epoch, Epochs, Header, TxIndex, BLOCK_HASH_LENGTH, }; +pub use namada_core::validity_predicate::{VpError, VpErrorExtResult}; pub use namada_core::*; pub use namada_governance::pgf::storage as pgf_storage; pub use namada_governance::storage as gov_storage; pub use namada_macros::validity_predicate; pub use namada_storage::{ - iter_prefix, iter_prefix_bytes, Error, OptionExt, ResultExt, StorageRead, + iter_prefix, iter_prefix_bytes, Error as StorageError, OptionExt, + ResultExt, StorageRead, }; pub use namada_tx::{Section, Tx}; use namada_vm_env::vp::*; @@ -41,6 +44,7 @@ pub use sha2::{Digest, Sha256, Sha384, Sha512}; pub use { namada_account as account, namada_parameters as parameters, namada_proof_of_stake as proof_of_stake, namada_token as token, + namada_tx as tx, }; pub fn sha256(bytes: &[u8]) -> Hash { @@ -57,21 +61,26 @@ pub fn log_string>(msg: T) { } /// Checks if a proposal id is being executed -pub fn is_proposal_accepted(ctx: &Ctx, proposal_id: u64) -> VpResult { +pub fn is_proposal_accepted(ctx: &Ctx, proposal_id: u64) -> VpEnvResult { let proposal_execution_key = gov_storage::keys::get_proposal_execution_key(proposal_id); - ctx.has_key_pre(&proposal_execution_key) + ctx.has_key_pre(&proposal_execution_key).into_vp_error() } /// Verify section signatures -pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { +#[cold] +#[inline(never)] +fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let max_signatures_per_transaction = - parameters::max_signatures_per_transaction(&ctx.pre())?; + parameters::max_signatures_per_transaction(&ctx.pre()) + .into_vp_error()?; let public_keys_index_map = - account::public_keys_index_map(&ctx.pre(), owner)?; - let threshold = account::threshold(&ctx.pre(), owner)?.unwrap_or(1); + account::public_keys_index_map(&ctx.pre(), owner).into_vp_error()?; + let threshold = account::threshold(&ctx.pre(), owner) + .into_vp_error()? + .unwrap_or(1); // Serialize parameters let max_signatures = max_signatures_per_transaction.serialize_to_vec(); @@ -79,7 +88,7 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { let targets = [tx.raw_header_hash()].serialize_to_vec(); let signer = owner.serialize_to_vec(); - let valid = unsafe { + unsafe { namada_vp_verify_tx_section_signature( targets.as_ptr() as _, targets.len() as _, @@ -90,10 +99,57 @@ pub fn verify_signatures(ctx: &Ctx, tx: &Tx, owner: &Address) -> VpResult { threshold, max_signatures.as_ptr() as _, max_signatures.len() as _, - ) - }; + ); + } + Ok(()) +} + +/// Utility to minimize signature verification ops. +#[derive(Default)] +#[repr(transparent)] +pub struct VerifySigGadget { + has_validated_sig: bool, +} + +impl VerifySigGadget { + /// Create a new [`VerifySigGadget`]. + pub const fn new() -> Self { + Self { + has_validated_sig: false, + } + } + + /// Verify a tx signature, only paying the cost of this operation once. + #[inline(always)] + pub fn verify_signatures( + &mut self, + ctx: &Ctx, + tx_data: &Tx, + owner: &Address, + ) -> VpResult { + if !self.has_validated_sig { + verify_signatures(ctx, tx_data, owner)?; + self.has_validated_sig = true; + } + Ok(()) + } - Ok(HostEnvResult::is_success(valid)) + /// Identical to [`Self::verify_signatures`], but execute a predicate before + /// validating a sig. If the predicate returns false, we do not check tx + /// signatures. + #[inline(always)] + pub fn verify_signatures_when bool>( + &mut self, + predicate: F, + ctx: &Ctx, + tx_data: &Tx, + owner: &Address, + ) -> VpResult { + if predicate() { + self.verify_signatures(ctx, tx_data, owner)?; + } + Ok(()) + } } /// Format and log a string in a debug build. @@ -157,6 +213,14 @@ impl Ctx { pub fn post(&self) -> CtxPostStorageRead<'_> { CtxPostStorageRead { _ctx: self } } + + /// Yield a byte array value back to the host environment. + pub fn yield_value>(&self, value: V) { + let value = value.as_ref(); + unsafe { + namada_vp_yield_value(value.as_ptr() as _, value.len() as _); + } + } } /// Read access to the prior storage (state before tx execution) via @@ -174,19 +238,19 @@ pub struct CtxPostStorageRead<'a> { } /// Result of `VpEnv` or `namada_storage::StorageRead` method call -pub type EnvResult = Result; +pub type VpEnvResult = Result; /// Validity predicate result -pub type VpResult = EnvResult; +pub type VpResult = VpEnvResult<()>; /// Accept a transaction pub fn accept() -> VpResult { - Ok(true) + Ok(()) } /// Reject a transaction pub fn reject() -> VpResult { - Ok(false) + Err(VpError::Unspecified) } #[derive(Debug)] @@ -208,7 +272,7 @@ impl<'view> VpEnv<'view> for Ctx { fn read_temp( &self, key: &storage::Key, - ) -> Result, Error> { + ) -> Result, StorageError> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_temp(key.as_ptr() as _, key.len() as _) }; @@ -219,19 +283,19 @@ impl<'view> VpEnv<'view> for Ctx { fn read_bytes_temp( &self, key: &storage::Key, - ) -> Result>, Error> { + ) -> Result>, StorageError> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_temp(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_vp_result_buffer)) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_height() } @@ -239,17 +303,17 @@ impl<'view> VpEnv<'view> for Ctx { fn get_block_header( &self, height: BlockHeight, - ) -> Result, Error> { + ) -> Result, StorageError> { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_header(height) } - fn get_block_hash(&self) -> Result { + fn get_block_hash(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_hash() } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_epoch() } @@ -259,11 +323,11 @@ impl<'view> VpEnv<'view> for Ctx { get_pred_epochs() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { get_tx_index() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_native_token() } @@ -271,7 +335,7 @@ impl<'view> VpEnv<'view> for Ctx { fn get_ibc_events( &self, event_type: String, - ) -> Result, Error> { + ) -> Result, StorageError> { let read_result = unsafe { namada_vp_get_ibc_events( event_type.as_ptr() as _, @@ -288,24 +352,31 @@ impl<'view> VpEnv<'view> for Ctx { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, Error> { + ) -> Result, StorageError> { iter_prefix_pre_impl(prefix) } - fn eval(&self, vp_code_hash: Hash, input_data: Tx) -> Result { - let input_data_bytes = borsh::to_vec(&input_data).unwrap(); - let result = unsafe { - namada_vp_eval( - vp_code_hash.0.as_ptr() as _, - vp_code_hash.0.len() as _, - input_data_bytes.as_ptr() as _, - input_data_bytes.len() as _, - ) - }; - Ok(HostEnvResult::is_success(result)) + fn eval( + &self, + vp_code_hash: Hash, + input_data: Tx, + ) -> Result<(), StorageError> { + let input_data_bytes = input_data.serialize_to_vec(); + + HostEnvResult::success_or( + unsafe { + namada_vp_eval( + vp_code_hash.0.as_ptr() as _, + vp_code_hash.0.len() as _, + input_data_bytes.as_ptr() as _, + input_data_bytes.len() as _, + ) + }, + StorageError::SimpleMessage("VP rejected the tx"), + ) } - fn get_tx_code_hash(&self) -> Result, Error> { + fn get_tx_code_hash(&self) -> Result, StorageError> { let result = Vec::with_capacity(HASH_LENGTH + 1); unsafe { namada_vp_get_tx_code_hash(result.as_ptr() as _); @@ -323,23 +394,37 @@ impl<'view> VpEnv<'view> for Ctx { }) } - fn charge_gas(&self, used_gas: u64) -> Result<(), Error> { + fn charge_gas(&self, used_gas: u64) -> Result<(), StorageError> { unsafe { namada_vp_charge_gas(used_gas) }; Ok(()) } } +impl namada_tx::action::Read for Ctx { + type Err = StorageError; + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result, Self::Err> { + VpEnv::read_temp(self, key) + } +} + impl StorageRead for CtxPreStorageRead<'_> { type PrefixIter<'iter> = KeyValIterator<(String, Vec)> where Self: 'iter; - fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { + fn read_bytes( + &self, + key: &storage::Key, + ) -> Result>, StorageError> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_pre(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_vp_result_buffer)) } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &storage::Key) -> Result { let key = key.to_string(); let found = unsafe { namada_vp_has_key_pre(key.as_ptr() as _, key.len() as _) }; @@ -349,7 +434,7 @@ impl StorageRead for CtxPreStorageRead<'_> { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, Error> { + ) -> Result, StorageError> { iter_prefix_pre_impl(prefix) } @@ -358,7 +443,7 @@ impl StorageRead for CtxPreStorageRead<'_> { fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, Error> { + ) -> Result)>, StorageError> { let read_result = unsafe { namada_vp_iter_next(iter.0) }; Ok(read_key_val_bytes_from_buffer( read_result, @@ -366,26 +451,26 @@ impl StorageRead for CtxPreStorageRead<'_> { )) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> Result, Error> { + ) -> Result, StorageError> { get_block_header(height) } - fn get_block_hash(&self) -> Result { + fn get_block_hash(&self) -> Result { get_block_hash() } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { get_block_epoch() } @@ -393,11 +478,11 @@ impl StorageRead for CtxPreStorageRead<'_> { get_pred_epochs() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { get_tx_index() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result { get_native_token() } } @@ -405,14 +490,17 @@ impl StorageRead for CtxPreStorageRead<'_> { impl StorageRead for CtxPostStorageRead<'_> { type PrefixIter<'iter> = KeyValIterator<(String, Vec)> where Self:'iter; - fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { + fn read_bytes( + &self, + key: &storage::Key, + ) -> Result>, StorageError> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_post(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_vp_result_buffer)) } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &storage::Key) -> Result { let key = key.to_string(); let found = unsafe { namada_vp_has_key_post(key.as_ptr() as _, key.len() as _) @@ -423,7 +511,7 @@ impl StorageRead for CtxPostStorageRead<'_> { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, Error> { + ) -> Result, StorageError> { iter_prefix_post_impl(prefix) } @@ -432,7 +520,7 @@ impl StorageRead for CtxPostStorageRead<'_> { fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, Error> { + ) -> Result)>, StorageError> { let read_result = unsafe { namada_vp_iter_next(iter.0) }; Ok(read_key_val_bytes_from_buffer( read_result, @@ -440,26 +528,26 @@ impl StorageRead for CtxPostStorageRead<'_> { )) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> Result, Error> { + ) -> Result, StorageError> { get_block_header(height) } - fn get_block_hash(&self) -> Result { + fn get_block_hash(&self) -> Result { get_block_hash() } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { get_block_epoch() } @@ -467,18 +555,18 @@ impl StorageRead for CtxPostStorageRead<'_> { get_pred_epochs() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { get_tx_index() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result { get_native_token() } } fn iter_prefix_pre_impl( prefix: &storage::Key, -) -> Result)>, Error> { +) -> Result)>, StorageError> { let prefix = prefix.to_string(); let iter_id = unsafe { namada_vp_iter_prefix_pre(prefix.as_ptr() as _, prefix.len() as _) @@ -488,7 +576,7 @@ fn iter_prefix_pre_impl( fn iter_prefix_post_impl( prefix: &storage::Key, -) -> Result)>, Error> { +) -> Result)>, StorageError> { let prefix = prefix.to_string(); let iter_id = unsafe { namada_vp_iter_prefix_post(prefix.as_ptr() as _, prefix.len() as _) @@ -496,7 +584,7 @@ fn iter_prefix_post_impl( Ok(KeyValIterator(iter_id, PhantomData)) } -fn get_chain_id() -> Result { +fn get_chain_id() -> Result { let result = Vec::with_capacity(CHAIN_ID_LENGTH); unsafe { namada_vp_get_chain_id(result.as_ptr() as _); @@ -509,11 +597,13 @@ fn get_chain_id() -> Result { ) } -fn get_block_height() -> Result { +fn get_block_height() -> Result { Ok(BlockHeight(unsafe { namada_vp_get_block_height() })) } -fn get_block_header(height: BlockHeight) -> Result, Error> { +fn get_block_header( + height: BlockHeight, +) -> Result, StorageError> { let read_result = unsafe { namada_vp_get_block_header(height.0) }; match read_from_buffer(read_result, namada_vp_result_buffer) { Some(value) => Ok(Some( @@ -524,7 +614,7 @@ fn get_block_header(height: BlockHeight) -> Result, Error> { } } -fn get_block_hash() -> Result { +fn get_block_hash() -> Result { let result = Vec::with_capacity(BLOCK_HASH_LENGTH); unsafe { namada_vp_get_block_hash(result.as_ptr() as _); @@ -534,25 +624,25 @@ fn get_block_hash() -> Result { Ok(BlockHash::try_from(slice).expect("Cannot convert the hash")) } -fn get_block_epoch() -> Result { +fn get_block_epoch() -> Result { Ok(Epoch(unsafe { namada_vp_get_block_epoch() })) } -fn get_tx_index() -> Result { +fn get_tx_index() -> Result { Ok(TxIndex(unsafe { namada_vp_get_tx_index() })) } -fn get_pred_epochs() -> Result { +fn get_pred_epochs() -> Result { let read_result = unsafe { namada_vp_get_pred_epochs() }; let bytes = read_from_buffer(read_result, namada_vp_result_buffer).ok_or( - Error::SimpleMessage( + StorageError::SimpleMessage( "Missing result from `namada_vp_get_pred_epochs` call", ), )?; Ok(namada_core::decode(bytes).expect("Cannot decode pred epochs")) } -fn get_native_token() -> Result { +fn get_native_token() -> Result { let result = Vec::with_capacity(address::ADDRESS_LEN); unsafe { namada_vp_get_native_token(result.as_ptr() as _); diff --git a/documentation/dev/src/explore/design/ledger/governance.md b/documentation/dev/src/explore/design/ledger/governance.md index a7215c79b2..78905a1caa 100644 --- a/documentation/dev/src/explore/design/ledger/governance.md +++ b/documentation/dev/src/explore/design/ledger/governance.md @@ -106,8 +106,3 @@ Vote is valid if it follows these rules: The outcome of a proposal is computed at the epoch specific in the `endEpoch` field and executed at `graceEpoch` field (if it contains a non-empty `proposalCode` field). A proposal is accepted only if enough `yay` votes (net of the voting power) to match the threshold set in `ProposalType` is reached. If a proposal gets accepted, the locked funds will be reimbursed to the author. In case it gets rejected, the locked funds will be moved to slash fund. - -## Off-chain proposal - -In cases where it's not possible to run a proposal online (for example, when the chain is halted), an offline mechanism can be used. -The ledger offers the possibility to create and sign proposals that are verified against a specific chain epoch. diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 7b8d14cccb..b92641f99c 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -16,10 +16,17 @@ version.workspace = true name = "generate-txs" path = "generate_txs.rs" +[[example]] +name = "tx-schema" +path = "tx_schema.rs" + [[example]] name = "make-db-migration" path = "make-db-migration.rs" +[features] +namada-eth-bridge = ["namada_sdk/namada-eth-bridge"] + [dev-dependencies] masp_proofs = { workspace = true, default-features = false, features = ["local-prover", "download-params"] } namada_apps = {path = "../crates/apps", features = ["migrations"]} diff --git a/examples/generate_txs.rs b/examples/generate_txs.rs index afd8ec4346..144e6b4ceb 100644 --- a/examples/generate_txs.rs +++ b/examples/generate_txs.rs @@ -31,7 +31,7 @@ async fn main() -> Result<(), Reason> { } let args: Vec<_> = std::env::args().collect(); if args.len() < 3 { - eprintln!("Usage: namada-generator "); + eprintln!("Usage: generate-txs "); return Result::Err(Reason::from("Incorrect command line arguments.")); } let json = serde_json::to_string(&test_vectors) diff --git a/examples/tx_schema.rs b/examples/tx_schema.rs new file mode 100644 index 0000000000..e3d678eefe --- /dev/null +++ b/examples/tx_schema.rs @@ -0,0 +1,18 @@ +use std::collections::BTreeMap; +use std::error::Error; + +use namada_sdk::borsh::BorshSchema; +use namada_sdk::tx::Tx; + +fn main() -> Result<(), Box> { + let args: Vec<_> = std::env::args().collect(); + if args.len() < 2 { + eprintln!("Usage: tx-schema "); + return Result::Err("Incorrect command line arguments.".into()); + } + let mut definitions = BTreeMap::new(); + Tx::add_definitions_recursively(&mut definitions); + std::fs::write(&args[1], format!("{:#?}", definitions)) + .expect("unable to save schema"); + Ok(()) +} diff --git a/genesis/localnet/balances.toml b/genesis/localnet/balances.toml index a14c274456..1a41f87dad 100644 --- a/genesis/localnet/balances.toml +++ b/genesis/localnet/balances.toml @@ -31,6 +31,8 @@ tpknam1qypvqpzu74nafjahlwyq272dj76qq9rz30dulyc94883tmj893mquqs74gxv4 = "1000000" # validator-0 tnam1q9vhfdur7gadtwx4r223agpal0fvlqhywylf2mzx = "200000" tpknam1qpzrttnzfyt6xfu2vy092eruasll3z52rjfexwapdw0rdww5uktlk3j73dw = "200000" +# validator-0-account-key +tpknam1qpg2tsrplvhu3fd7z7tq5ztc2ne3s7e2ahjl2a2cddufrzdyr752g666ytj = "1000000" [token.BTC] # albert @@ -60,6 +62,7 @@ tpknam1qypvqpzu74nafjahlwyq272dj76qq9rz30dulyc94883tmj893mquqs74gxv4 = "1000000" # validator-0 tnam1q9vhfdur7gadtwx4r223agpal0fvlqhywylf2mzx = "1000000" + [token.DOT] # albert tnam1qxfj3sf6a0meahdu9t6znp05g8zx4dkjtgyn9gfu = "1000000" diff --git a/genesis/localnet/parameters.toml b/genesis/localnet/parameters.toml index f99f9e0a50..1d4ada80c3 100644 --- a/genesis/localnet/parameters.toml +++ b/genesis/localnet/parameters.toml @@ -1,6 +1,7 @@ # General protocol parameters. [parameters] native_token = "NAM" +is_native_token_transferable = true # Minimum number of blocks in an epoch. min_num_of_blocks = 4 # Maximum expected time per block (in seconds). @@ -85,7 +86,7 @@ min_proposal_voting_period = 3 max_proposal_period = 27 # maximum number of characters in the proposal content max_proposal_content_size = 10000 -# minimum epochs between end and grace epoch +# minimum epochs between end and activation epoch min_proposal_grace_epochs = 6 # Public goods funding parameters @@ -101,3 +102,10 @@ stewards = [ pgf_inflation_rate = "0.1" # The pgf stewards inflation rate stewards_inflation_rate = "0.01" + +# IBC parameters +[ibc_params] +# default mint limit of each token +default_mint_limit = "0" +# default per-epoch throughput limit of each token +default_per_epoch_throughput_limit = "0" diff --git a/genesis/starter/parameters.toml b/genesis/starter/parameters.toml index ccbf9c95bb..aff77e1395 100644 --- a/genesis/starter/parameters.toml +++ b/genesis/starter/parameters.toml @@ -1,6 +1,7 @@ # General protocol parameters. [parameters] native_token = "NAM" +is_native_token_transferable = true # Minimum number of blocks in an epoch. min_num_of_blocks = 4 # Maximum expected time per block (in seconds). @@ -85,7 +86,7 @@ min_proposal_voting_period = 3 max_proposal_period = 27 # maximum number of characters in the proposal content max_proposal_content_size = 10000 -# minimum epochs between end and grace epoch +# minimum epochs between end and activation epoch min_proposal_grace_epochs = 6 # Public goods funding parameters @@ -96,3 +97,10 @@ stewards = [] pgf_inflation_rate = "0.1" # The pgf stewards inflation rate stewards_inflation_rate = "0.01" + +# IBC parameters +[ibc_params] +# default mint limit of each token +default_mint_limit = "0" +# default per-epoch throughput limit of each token +default_per_epoch_throughput_limit = "0" diff --git a/scripts/release.sh b/scripts/release.sh index 659e275f1f..ece65a0e0f 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -25,15 +25,15 @@ HASH_BEFORE=$(git rev-parse HEAD) cargo release --execute $VERSION HASH_AFTER=$(git rev-parse HEAD) -# update the wasm crate versions (2 fixups) +# update the wasm workspace crate versions (2 fixups) cd $REPO_ROOT/wasm cargo update -w git add Cargo.lock git commit --fixup=$HASH_AFTER cargo release --execute $VERSION -# update the wasm_for_tests crate version, and rebuild them (3 fixups) -cd $REPO_ROOT/wasm_for_tests/wasm_source +# update the wasm_for_tests workspace crate version, and rebuild them (3 fixups) +cd $REPO_ROOT/wasm_for_tests cargo update -w git add Cargo.lock git commit --fixup=$HASH_AFTER diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index d824700e63..dfb6069f28 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -39,9 +39,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if 1.0.0", "cipher", @@ -54,16 +54,28 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.14", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if 1.0.0", "once_cell", "version_check", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -85,9 +97,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -99,9 +111,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -114,9 +126,9 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3a318f1f38d2418400f8209655bfd825785afd25aa30bb7ba6cc792e4596748" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ "windows-sys 0.52.0", ] @@ -133,9 +145,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "ark-bls12-381" @@ -236,13 +248,13 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -258,34 +270,33 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.58", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.32.1", + "object 0.32.2", "rustc-demangle", ] @@ -309,9 +320,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -393,9 +404,12 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -409,6 +423,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "blake2b_simd" version = "1.0.2" @@ -433,9 +456,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", @@ -498,41 +521,86 @@ dependencies = [ [[package]] name = "borsh" -version = "1.2.1" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +dependencies = [ + "borsh-derive 0.10.3", + "hashbrown 0.13.2", +] + +[[package]] +name = "borsh" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" +checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6" dependencies = [ - "borsh-derive", + "borsh-derive 1.4.0", "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "1.2.1" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "borsh-derive" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478b41ff04256c5c8330f3dfdaaae2a5cc976a8e75088bafa4625b0d0208de8c" +checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5" dependencies = [ "once_cell", - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", "syn_derive", ] +[[package]] +name = "borsh-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "borsh-ext" version = "1.2.0" source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.2.0#a62fee3e847e512cad9ac0f1fd5a900e5db9ba37" dependencies = [ - "borsh", + "borsh 1.4.0", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "sha2 0.10.8", "tinyvec", @@ -540,9 +608,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -558,9 +626,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytecheck" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -569,9 +637,9 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2", "quote", @@ -586,9 +654,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -604,9 +672,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -619,7 +687,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.20", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -636,12 +704,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41" [[package]] name = "cfg-if" @@ -687,14 +752,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.48.5", + "windows-targets 0.52.4", ] [[package]] @@ -719,18 +784,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -740,9 +805,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "clru" @@ -787,7 +852,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bech32 0.9.1", "bs58", "digest 0.10.7", @@ -814,14 +879,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f76990911f2267d837d9d0ad060aa63aaad170af40904b29461734c339030d4d" dependencies = [ "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "const-hex" -version = "1.10.0" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" +checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -832,9 +897,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" @@ -893,9 +958,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -961,9 +1026,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if 1.0.0", ] @@ -980,26 +1045,21 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", + "crossbeam-utils 0.8.19", ] [[package]] @@ -1015,12 +1075,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1100,9 +1157,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ "darling_core", "darling_macro", @@ -1110,27 +1167,26 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -1141,9 +1197,9 @@ checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -1151,9 +1207,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -1244,7 +1300,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -1253,6 +1309,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "dynasm" version = "1.2.3" @@ -1334,9 +1396,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elliptic-curve" @@ -1360,20 +1422,20 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "enr" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" +checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "hex", "k256", @@ -1423,7 +1485,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -1561,9 +1623,9 @@ dependencies = [ [[package]] name = "ethers-addressbook" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c405f24ea3a517899ba7985385c43dc4a7eb1209af3b1e0a1a32d7dcc7f8d09" +checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759" dependencies = [ "ethers-core", "once_cell", @@ -1573,9 +1635,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0111ead599d17a7bff6985fd5756f39ca7033edc79a31b23026a8d5d64fa95cd" +checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa" dependencies = [ "const-hex", "ethers-contract-abigen", @@ -1592,31 +1654,31 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51258120c6b47ea9d9bec0d90f9e8af71c977fbefbef8213c91bfed385fe45eb" +checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b" dependencies = [ "Inflector", "const-hex", "dunce", "ethers-core", "eyre", - "prettyplease 0.2.15", + "prettyplease 0.2.17", "proc-macro2", "quote", "regex", "serde", "serde_json", - "syn 2.0.52", - "toml 0.8.2", + "syn 2.0.58", + "toml 0.8.12", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936e7a0f1197cee2b62dc89f63eff3201dbf87c283ff7e18d86d38f83b845483" +checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f" dependencies = [ "Inflector", "const-hex", @@ -1625,14 +1687,14 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "ethers-core" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f03e0bdc216eeb9e355b90cf610ef6c5bb8aca631f97b5ae9980ce34ea7878d" +checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" dependencies = [ "arrayvec", "bytes", @@ -1650,8 +1712,8 @@ dependencies = [ "rlp", "serde", "serde_json", - "strum 0.25.0", - "syn 2.0.52", + "strum 0.26.2", + "syn 2.0.58", "tempfile", "thiserror", "tiny-keccak", @@ -1660,14 +1722,14 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abbac2c890bdbe0f1b8e549a53b00e2c4c1de86bb077c1094d1f38cdf9381a56" +checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649" dependencies = [ "chrono", "ethers-core", "reqwest", - "semver 1.0.20", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -1676,9 +1738,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681ece6eb1d10f7cf4f873059a77c04ff1de4f35c63dd7bccde8f438374fcb93" +checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de" dependencies = [ "async-trait", "auto_impl", @@ -1702,13 +1764,13 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" +checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.5", + "base64 0.21.7", "bytes", "const-hex", "enr", @@ -1717,7 +1779,7 @@ dependencies = [ "futures-timer", "futures-util", "hashers", - "http", + "http 0.2.12", "instant", "jsonwebtoken", "once_cell", @@ -1738,9 +1800,9 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.11" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb1b714e227bbd2d8c53528adb580b203009728b17d0d0e4119353aa9bc5532" +checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2" dependencies = [ "async-trait", "coins-bip32", @@ -1757,9 +1819,9 @@ dependencies = [ [[package]] name = "eyre" -version = "0.6.9" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80f656be11ddf91bd709454d15d5bd896fbaf4cc3314e69349e4d1569f5b46cd" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ "indenter", "once_cell", @@ -1779,9 +1841,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "fd-lock" @@ -1870,9 +1932,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1885,9 +1947,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1895,15 +1957,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1912,9 +1974,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-locks" @@ -1928,32 +1990,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ "gloo-timers", "send_wrapper 0.4.0", @@ -1961,9 +2023,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -2021,9 +2083,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2095,17 +2157,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.22" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http", - "indexmap 2.1.0", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -2114,9 +2176,9 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hashbrown" @@ -2124,7 +2186,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.8", ] [[package]] @@ -2133,7 +2195,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.8", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.11", ] [[package]] @@ -2159,9 +2230,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -2217,18 +2288,29 @@ checksum = "77e806677ce663d0a199541030c816847b36e8dc095f70dae4a4f4ad63da5383" [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -2237,12 +2319,12 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", "pin-project-lite", ] @@ -2260,22 +2342,22 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2", - "http", + "http 0.2.12", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2", "tokio", "tower-service", "tracing", @@ -2289,7 +2371,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", + "http 0.2.12", "hyper", "rustls", "tokio", @@ -2298,9 +2380,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2321,9 +2403,9 @@ dependencies = [ [[package]] name = "ibc" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "429b6aca6624a9364878e28c90311438c2621a8270942d80732b2651ac38ac74" +checksum = "8057203ab04368297a31ecd5d059bec7108c069d636bcfc9ab20e82d89b480b8" dependencies = [ "ibc-apps", "ibc-clients", @@ -2333,11 +2415,43 @@ dependencies = [ "ibc-primitives", ] +[[package]] +name = "ibc-app-nft-transfer" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e931737b69129ae417598fe29eace3e41a9ce32b8649abe3937495973e5843f" +dependencies = [ + "ibc-app-nft-transfer-types", + "ibc-core", + "serde-json-wasm", +] + +[[package]] +name = "ibc-app-nft-transfer-types" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2019d3a6adf6b333c55630f52ca71ad8f61702ca1cf291aaf5ee40b7c6c27ba2" +dependencies = [ + "base64 0.21.7", + "borsh 0.10.3", + "derive_more", + "displaydoc", + "http 1.1.0", + "ibc-core", + "ibc-proto", + "mime", + "parity-scale-codec", + "scale-info", + "schemars", + "serde", + "serde-json-wasm", +] + [[package]] name = "ibc-app-transfer" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b177b343385d9654d99be4709b5ed1574d41f91dfa4044b2d26d688be4179d7c" +checksum = "2595e4cc14828a4141a28b86777040d8bfbabea43838a425137202cff0ee6329" dependencies = [ "ibc-app-transfer-types", "ibc-core", @@ -2346,9 +2460,9 @@ dependencies = [ [[package]] name = "ibc-app-transfer-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f92a3eda225e5c86e7bb6501c95986583ac541c4369d3c528349d81390f947" +checksum = "0106c87ddcc619a6a5eac05da2b77287e3958f89dddf951daf9a2dfc470cb5f4" dependencies = [ "derive_more", "displaydoc", @@ -2361,18 +2475,19 @@ dependencies = [ [[package]] name = "ibc-apps" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4be40d55ed2dea9f2d05b902a3586f20850c723e4bdbfc4fb0ebe7a66ca5e40" +checksum = "b5738d8c842abce233f41d3be825d01e6ee075251b509c6947d05c75477eaeec" dependencies = [ + "ibc-app-nft-transfer", "ibc-app-transfer", ] [[package]] name = "ibc-client-tendermint" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119aa5873214228bf69bded3f20022b9ae1bc35b6841d295afcd73e53db05ccf" +checksum = "81ef4eefb4fd88167335fee4d212b1ff2fa4dd4e4ce87a58bda1798be1d128ac" dependencies = [ "ibc-client-tendermint-types", "ibc-core-client", @@ -2380,7 +2495,6 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", "serde", "tendermint", "tendermint-light-client-verifier", @@ -2388,38 +2502,52 @@ dependencies = [ [[package]] name = "ibc-client-tendermint-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f21679016931b332b295a761e65cc122dc6fbfb98444148b681ad3aaa474665" +checksum = "91a224a98b193810e1ef86316e9a08e677eeff6f98b22b9eb9806bd993d3753a" dependencies = [ - "bytes", "displaydoc", "ibc-core-client-types", "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", "serde", "tendermint", "tendermint-light-client-verifier", "tendermint-proto", ] +[[package]] +name = "ibc-client-wasm-types" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e1ea3be7ae201c99b6589c112a253f2fb3c9ae7322d8937a7303d1fbfe76d27" +dependencies = [ + "base64 0.21.7", + "displaydoc", + "ibc-core-client", + "ibc-core-host-types", + "ibc-primitives", + "ibc-proto", + "serde", +] + [[package]] name = "ibc-clients" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685c660323e93107a136aa3dbc412b7fa2eafd315c2fe71184096a43800f8ca5" +checksum = "84fef481dd1ebe5ef69ee8e095c225cb3e51cd3895096ba2884b3f5b827a6ed6" dependencies = [ "ibc-client-tendermint", + "ibc-client-wasm-types", ] [[package]] name = "ibc-core" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "100d9d0aa67432c5078a8a1c818e3fc990a193be6d35ed0abeda5b340d16c1da" +checksum = "5aedd421bae80115f44b198bec9af45f234e1c8ff81ee9d5e7f60444d526d2b6" dependencies = [ "ibc-core-channel", "ibc-core-client", @@ -2428,14 +2556,15 @@ dependencies = [ "ibc-core-handler", "ibc-core-host", "ibc-core-router", + "ibc-derive", "ibc-primitives", ] [[package]] name = "ibc-core-channel" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebaa37629ac029f914dfe552ab5dad01ddb240ec885ed0ae68221cbea4e9bfc" +checksum = "535048a8fe64101263e35a6a4503474811e379a115db72ee449df882b0f11b45" dependencies = [ "ibc-core-channel-types", "ibc-core-client", @@ -2445,15 +2574,15 @@ dependencies = [ "ibc-core-host", "ibc-core-router", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-channel-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2ba72c56c411b1e0ce6dc3f5e1fa1de9e6c84891f425b7be8a9e1705964378" +checksum = "8d97396ccb1840f4ea6021bbf049a4a7e30a8f5b126f00023ec44b2a48d4dabc" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-client-types", @@ -2462,7 +2591,9 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "sha2 0.10.8", "subtle-encoding", @@ -2471,9 +2602,9 @@ dependencies = [ [[package]] name = "ibc-core-client" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c4fac8e05201795073dee8c93d5afe9dfeac9aec2412b4a2b0c5f0d1e1d725" +checksum = "15bcf0c59eaa935fa410497a56862f28c4df68317ea556724f0d0764b6c0307e" dependencies = [ "ibc-core-client-context", "ibc-core-client-types", @@ -2481,14 +2612,13 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-client-context" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b82abd9489021730d59ab2d00e9438d3711e8e78ecba4d083b64f833301682b" +checksum = "2d37d88be3dc7fd82d45418c257d826244a6b29b7902c76cf9e68fd61f1e9173" dependencies = [ "derive_more", "displaydoc", @@ -2496,26 +2626,27 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-handler-types", "ibc-core-host-types", - "ibc-derive", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", "tendermint", ] [[package]] name = "ibc-core-client-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafdbf6db5dab4c8ad610b6940e23b4f8abd0a6ac5e8e2801415a95defd4a583" +checksum = "cb717b1296e6cda0990346ba5203fadd043d5159d7d7173b3765f72f263c29db" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2523,40 +2654,43 @@ dependencies = [ [[package]] name = "ibc-core-commitment-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed4256b0216fc49024bac7e01c61b9bb055e31914ffe9ce6f468d7ce496a9357" +checksum = "a10ff34bf57bf4bc668b55208dbfdf312d7907adc6a0e39da2377883f12efada" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-primitives", "ibc-proto", "ics23", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", ] [[package]] name = "ibc-core-connection" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e237b70b9ba0177a4e59ac9048fffac2ac44c334703cc0ae403ad221450850" +checksum = "de7f4f1e78e9ed5a63b09b1405f42713f3d076ba5e7889ec31a520cad4970344" dependencies = [ "ibc-core-client", "ibc-core-connection-types", "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-connection-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca841416fa29626423917099092f3698ae2735074cb3fe42936ddf6b2ccbf2f7" +checksum = "230d7f547e121147d136c563ae71707a9e3477a9bc1bc6c1dc29051e1408a381" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-client-types", @@ -2564,7 +2698,9 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2572,9 +2708,9 @@ dependencies = [ [[package]] name = "ibc-core-handler" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47e5e5a006aa0fc87ec3f5fb1e0ef6dd5aeea5079fa927d799d526c44329987" +checksum = "c60a2d072d8f7d8d64503bbf3fb69ffcd973b92667af053617a36682fadddea5" dependencies = [ "ibc-core-channel", "ibc-core-client", @@ -2588,10 +2724,11 @@ dependencies = [ [[package]] name = "ibc-core-handler-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3d59a8a5eb2069530c42783b4fef63472a89e0e9242334351df1bb58aaf542" +checksum = "7fae38340bffa42a74563a12703c994515cca4bab755a0c83089c18c3c1e481a" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-channel-types", @@ -2602,7 +2739,9 @@ dependencies = [ "ibc-core-router-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2610,9 +2749,9 @@ dependencies = [ [[package]] name = "ibc-core-host" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aa63c895c0e5a75e42fe859b8fd4250c12bfa8b9c6b114f94c927ecfad38a03" +checksum = "abaa0e2143855d12c19e814dab72a5e28daf5e31780afb1302e983614b248668" dependencies = [ "derive_more", "displaydoc", @@ -2624,15 +2763,14 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host-types", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", ] [[package]] name = "ibc-core-host-cosmos" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a325862af6c20b0df3d27c072a2d802a7232dc1666214d738cdfbd9a9c99720" +checksum = "7e3c792be21a340e42344e5bede1695c2d21d62abcc21bbfc7662b5950ffe8d4" dependencies = [ "derive_more", "displaydoc", @@ -2646,7 +2784,6 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", "serde", "sha2 0.10.8", "subtle-encoding", @@ -2655,21 +2792,25 @@ dependencies = [ [[package]] name = "ibc-core-host-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616955da310febbe93c0569a2feebd9f57cafed3eee5a56b0c3bb953a75f6089" +checksum = "1c25ce3082e036836d60aea3cc24f46dfb248d7718516a9a48e1feb466ce10c1" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-primitives", + "parity-scale-codec", + "scale-info", + "schemars", "serde", ] [[package]] name = "ibc-core-router" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31fe115da73e0616bdb44400fa6b11ca251648d070c4ff419d58e27804d30243" +checksum = "6c261fb7e9a7de7daafb6a38cb9abdce6e912230e30246eb2ef1bb5db32ba10f" dependencies = [ "derive_more", "displaydoc", @@ -2677,23 +2818,24 @@ dependencies = [ "ibc-core-host-types", "ibc-core-router-types", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", ] [[package]] name = "ibc-core-router-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d1fbb0bbbdeafa7ac989ba1693ed46d22e0e3eb0bdae478544e31157a4fdba6" +checksum = "6f3b37bc4c11fdc60a328488f4be205106666edda20a4080484d599a8b0978d2" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "ics23", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2701,26 +2843,29 @@ dependencies = [ [[package]] name = "ibc-derive" -version = "0.4.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df07bf5bc1e65e291506b7497633e07967e49b36a8db10cda77a8fd686eb4548" +checksum = "3f5010acf3b7fec09c24d05b946424a9f7884f9647ed837c1a1676d3eabac154" dependencies = [ - "darling", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "ibc-primitives" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5edea4685267fd68514c87e7aa3a62712340c4cff6903f088a9ab571428a08a" +checksum = "af5524046e645bdfbd96ef932c8ceab6bb2391dc31dee626e274d13e7ac25ec2" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "prost 0.12.4", + "scale-info", + "schemars", "serde", "tendermint", "time", @@ -2728,15 +2873,20 @@ dependencies = [ [[package]] name = "ibc-proto" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93cbf4cbe9e5113cc7c70f3208a7029b2205c629502cbb2ae7ea0a09a97d3005" +checksum = "dd4ee32b22d3b06f31529b956f4928e5c9a068d71e46cf6abfa19c31ca550553" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", + "borsh 0.10.3", "bytes", "flex-error", "ics23", - "prost 0.12.3", + "informalsystems-pbjson", + "parity-scale-codec", + "prost 0.12.4", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint-proto", @@ -2744,18 +2894,15 @@ dependencies = [ [[package]] name = "ibc-testkit" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f550c91648f3db6474880e18cd2bd294096a99b30621aa01a9059b71e3612d98" +checksum = "3443c6ccc7551266dce6e842aa10c472bf73d7cc0c3140aafc55c942e85f530a" dependencies = [ - "bytes", "derive_more", "displaydoc", "ibc", "ibc-proto", "parking_lot", - "primitive-types", - "prost 0.12.3", "subtle-encoding", "tendermint", "tendermint-testgen", @@ -2765,15 +2912,17 @@ dependencies = [ [[package]] name = "ics23" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "661e2d6f79952a65bc92b1c81f639ebd37228dae6ff412a5aba7d474bdc4b957" +checksum = "dc3b8be84e7285c73b88effdc3294b552277d6b0ec728ee016c861b7b9a2c19c" dependencies = [ "anyhow", + "blake2", + "blake3", "bytes", "hex", "informalsystems-pbjson", - "prost 0.12.3", + "prost 0.12.4", "ripemd", "serde", "sha2 0.10.8", @@ -2865,7 +3014,7 @@ name = "index-set" version = "0.8.0" source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.1#b0d928f83cf0d465ccda299d131e8df2859b5184" dependencies = [ - "borsh", + "borsh 1.4.0", "serde", ] @@ -2882,9 +3031,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.4" +source = "git+https://github.com/heliaxdev/indexmap?tag=2.2.4-heliax-1#b5b5b547bd6ab04bbb16e060326a50ddaeb6c909" +dependencies = [ + "borsh 1.4.0", + "equivalent", + "hashbrown 0.14.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -2892,11 +3052,11 @@ dependencies = [ [[package]] name = "informalsystems-pbjson" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4eecd90f87bea412eac91c6ef94f6b1e390128290898cbe14f2b926787ae1fb" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" dependencies = [ - "base64 0.13.1", + "base64 0.21.7", "serde", ] @@ -2951,17 +3111,26 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.66" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -2972,7 +3141,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "pem", "ring 0.16.20", "serde", @@ -2996,9 +3165,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if 1.0.0", "ecdsa", @@ -3011,9 +3180,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -3032,9 +3201,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.150" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" @@ -3054,13 +3223,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "libc", - "redox_syscall", ] [[package]] @@ -3096,14 +3264,14 @@ checksum = "adf157a4dc5a29b7b464aa8fe7edeff30076e07e13646a1c3874f58477dc99f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -3117,9 +3285,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "loupe" @@ -3152,11 +3320,20 @@ dependencies = [ ] [[package]] -name = "masp_note_encryption" +name = "mach2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +dependencies = [ + "libc", +] + +[[package]] +name = "masp_note_encryption" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ - "borsh", + "borsh 1.4.0", "chacha20", "chacha20poly1305", "cipher", @@ -3167,7 +3344,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ "aes", "bip0039", @@ -3175,7 +3352,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh", + "borsh 1.4.0", "byteorder", "ff", "fpe", @@ -3199,13 +3376,13 @@ dependencies = [ [[package]] name = "masp_proofs" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "directories", - "getrandom 0.2.11", + "getrandom 0.2.14", "group", "itertools 0.11.0", "jubjub", @@ -3234,9 +3411,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap2" @@ -3256,15 +3433,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "memory_units" version = "0.4.0" @@ -3291,18 +3459,18 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "minreq" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3371dfc7b772c540da1380123674a8e20583aca99907087d990ca58cf44203" +checksum = "00a000cf8bbbfb123a9bdc66b61c2885a4bb038df4f2629884caafabeb76b0f9" dependencies = [ "log", "once_cell", @@ -3313,9 +3481,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3340,7 +3508,7 @@ version = "0.32.1" dependencies = [ "async-trait", "bimap", - "borsh", + "borsh 1.4.0", "borsh-ext", "circular-queue", "clru", @@ -3382,7 +3550,7 @@ dependencies = [ "parse_duration", "paste", "proptest", - "prost 0.12.3", + "prost 0.12.4", "rand 0.8.5", "rand_core 0.6.4", "rayon", @@ -3416,7 +3584,7 @@ dependencies = [ name = "namada_account" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "linkme", "namada_core", "namada_macros", @@ -3426,12 +3594,19 @@ dependencies = [ "serde", ] +[[package]] +name = "namada_controller" +version = "0.32.1" +dependencies = [ + "namada_core", +] + [[package]] name = "namada_core" version = "0.32.1" dependencies = [ "bech32 0.8.1", - "borsh", + "borsh 1.4.0", "borsh-ext", "chrono", "data-encoding", @@ -3443,6 +3618,7 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", + "indexmap 2.2.4", "k256", "linkme", "masp_primitives", @@ -3455,7 +3631,7 @@ dependencies = [ "num_enum", "primitive-types", "proptest", - "prost-types 0.12.3", + "prost-types 0.12.4", "rand 0.8.5", "rand_core 0.6.4", "rayon", @@ -3476,7 +3652,7 @@ dependencies = [ name = "namada_ethereum_bridge" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "ethabi", "ethers", "eyre", @@ -3507,7 +3683,7 @@ dependencies = [ name = "namada_gas" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "namada_core", "namada_macros", "serde", @@ -3518,7 +3694,7 @@ dependencies = [ name = "namada_governance" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "itertools 0.10.5", "linkme", "namada_core", @@ -3538,7 +3714,7 @@ dependencies = [ name = "namada_ibc" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "ibc", "ibc-derive", "ibc-testkit", @@ -3553,7 +3729,8 @@ dependencies = [ "namada_token", "primitive-types", "proptest", - "prost 0.12.3", + "prost 0.12.4", + "serde_json", "sha2 0.9.9", "thiserror", "tracing", @@ -3576,12 +3753,12 @@ dependencies = [ name = "namada_merkle_tree" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "eyre", "ics23", "namada_core", "namada_macros", - "prost 0.12.3", + "prost 0.12.4", "sparse-merkle-tree", "thiserror", ] @@ -3590,7 +3767,7 @@ dependencies = [ name = "namada_migrations" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "data-encoding", "lazy_static", "linkme", @@ -3602,7 +3779,7 @@ dependencies = [ name = "namada_parameters" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "namada_core", "namada_macros", "namada_storage", @@ -3613,11 +3790,12 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "data-encoding", "derivative", "linkme", "namada_account", + "namada_controller", "namada_core", "namada_governance", "namada_macros", @@ -3647,7 +3825,7 @@ dependencies = [ "async-trait", "bimap", "bls12_381", - "borsh", + "borsh 1.4.0", "borsh-ext", "circular-queue", "data-encoding", @@ -3666,6 +3844,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_gas", "namada_governance", "namada_ibc", "namada_macros", @@ -3684,7 +3863,7 @@ dependencies = [ "parse_duration", "paste", "proptest", - "prost 0.12.3", + "prost 0.12.4", "rand 0.8.5", "rand_core 0.6.4", "regex", @@ -3709,8 +3888,9 @@ dependencies = [ name = "namada_shielded_token" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "masp_primitives", + "namada_controller", "namada_core", "namada_parameters", "namada_storage", @@ -3723,7 +3903,7 @@ dependencies = [ name = "namada_state" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "ics23", "itertools 0.10.5", "linkme", @@ -3749,7 +3929,7 @@ dependencies = [ name = "namada_storage" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "itertools 0.10.5", "linkme", "namada_core", @@ -3769,7 +3949,7 @@ dependencies = [ name = "namada_test_utils" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "namada_core", "strum 0.24.1", ] @@ -3795,7 +3975,7 @@ dependencies = [ "namada_tx_prelude", "namada_vp_prelude", "num-traits", - "prost 0.12.3", + "prost 0.12.4", "regex", "serde", "serde_json", @@ -3831,7 +4011,8 @@ name = "namada_tx" version = "0.32.1" dependencies = [ "ark-bls12-381", - "borsh", + "bitflags 2.5.0", + "borsh 1.4.0", "data-encoding", "linkme", "masp_primitives", @@ -3842,8 +4023,8 @@ dependencies = [ "num-derive", "num-traits", "proptest", - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-types 0.12.4", "serde", "serde_json", "sha2 0.9.9", @@ -3863,7 +4044,7 @@ dependencies = [ name = "namada_tx_prelude" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "masp_primitives", "namada_account", "namada_core", @@ -3885,7 +4066,7 @@ dependencies = [ name = "namada_vm_env" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "masp_primitives", "namada_core", ] @@ -3894,7 +4075,7 @@ dependencies = [ name = "namada_vote_ext" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "linkme", "namada_core", "namada_macros", @@ -3910,6 +4091,7 @@ dependencies = [ "derivative", "masp_primitives", "namada_core", + "namada_ibc", "namada_storage", "namada_tx", "thiserror", @@ -3919,7 +4101,7 @@ dependencies = [ name = "namada_vp_prelude" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.4.0", "namada_account", "namada_core", "namada_governance", @@ -3936,24 +4118,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "namada_wasm" -version = "0.32.1" -dependencies = [ - "getrandom 0.2.11", - "namada", - "namada_test_utils", - "namada_tests", - "namada_tx_prelude", - "namada_vp_prelude", - "once_cell", - "proptest", - "test-log", - "tracing", - "tracing-subscriber", - "wee_alloc", -] - [[package]] name = "nonempty" version = "0.7.0" @@ -3981,7 +4145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" dependencies = [ "num-bigint 0.4.4", - "num-complex 0.4.4", + "num-complex 0.4.5", "num-integer", "num-iter", "num-rational 0.4.1", @@ -4022,9 +4186,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" dependencies = [ "num-traits", ] @@ -4042,19 +4206,18 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ "autocfg", "num-integer", @@ -4087,9 +4250,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -4121,23 +4284,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -4154,9 +4317,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -4175,9 +4338,9 @@ checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "open-fastrlp" @@ -4217,7 +4380,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6624905ddd92e460ff0685567539ed1ac985b2dee4c92c7edcd64fce905b00c" dependencies = [ "ct-codecs", - "getrandom 0.2.11", + "getrandom 0.2.14", "subtle 2.4.1", "zeroize", ] @@ -4257,7 +4420,7 @@ version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 1.0.109", @@ -4373,9 +4536,9 @@ dependencies = [ [[package]] name = "peg" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c0b841ea54f523f7aa556956fbd293bcbe06f2e67d2eb732b7278aaf1d166a" +checksum = "400bcab7d219c38abf8bd7cc2054eb9bbbd4312d66f6a5557d572a203f646f61" dependencies = [ "peg-macros", "peg-runtime", @@ -4383,9 +4546,9 @@ dependencies = [ [[package]] name = "peg-macros" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aa52829b8decbef693af90202711348ab001456803ba2a98eb4ec8fb70844c" +checksum = "46e61cce859b76d19090f62da50a9fe92bab7c2a5f09e183763559a2ac392c90" dependencies = [ "peg-runtime", "proc-macro2", @@ -4394,9 +4557,9 @@ dependencies = [ [[package]] name = "peg-runtime" -version = "0.7.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c719dcf55f09a3a7e764c6649ab594c18a177e3599c467983cdf644bfc0a4088" +checksum = "36bae92c60fa2398ce4678b98b2c4b5a7c61099961ca1fa305aec04a9ad28922" [[package]] name = "pem" @@ -4415,9 +4578,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.5" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -4431,7 +4594,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.2.6", ] [[package]] @@ -4446,29 +4609,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -4493,7 +4656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", "universal-hash", ] @@ -4521,12 +4684,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -4543,6 +4706,15 @@ dependencies = [ "uint", ] +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml 0.5.11", +] + [[package]] name = "proc-macro-crate" version = "1.3.1" @@ -4555,12 +4727,20 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + +[[package]] +name = "proc-macro-crate" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_datetime", - "toml_edit 0.20.2", + "toml_edit 0.21.1", ] [[package]] @@ -4589,9 +4769,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -4604,13 +4784,13 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -4628,12 +4808,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" dependencies = [ "bytes", - "prost-derive 0.12.3", + "prost-derive 0.12.4", ] [[package]] @@ -4673,15 +4853,15 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -4695,11 +4875,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ - "prost 0.12.3", + "prost 0.12.4", ] [[package]] @@ -4730,9 +4910,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -4802,7 +4982,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.14", ] [[package]] @@ -4837,12 +5017,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.19", ] [[package]] @@ -4887,11 +5067,11 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.14", "libredox", "thiserror", ] @@ -4909,14 +5089,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -4930,13 +5110,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -4947,44 +5127,44 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "region" -version = "3.0.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76e189c2369884dce920945e2ddf79b3dff49e071a167dd1817fa9c4c00d512e" +checksum = "e6b6ebd13bc009aef9cd476c1310d49ac354d36e240cf1bd753290f3dc7199a7" dependencies = [ "bitflags 1.3.2", "libc", - "mach", - "winapi", + "mach2", + "windows-sys 0.52.0", ] [[package]] name = "rend" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", "h2", - "http", + "http 0.2.12", "http-body", "hyper", "hyper-rustls", @@ -5001,6 +5181,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-rustls", @@ -5039,16 +5220,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "getrandom 0.2.11", + "cfg-if 1.0.0", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -5062,26 +5244,27 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.42" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" +checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" dependencies = [ "bitvec", "bytecheck", + "bytes", "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", "seahash", "tinyvec", - "uuid 1.6.1", + "uuid 1.8.0", ] [[package]] name = "rkyv_derive" -version = "0.7.42" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" +checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" dependencies = [ "proc-macro2", "quote", @@ -5143,16 +5326,16 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.20", + "semver 1.0.22", ] [[package]] name = "rustix" -version = "0.38.26" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -5161,12 +5344,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-webpki", "sct", ] @@ -5189,7 +5372,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] @@ -5198,15 +5381,15 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "rusty-fork" @@ -5222,9 +5405,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "salsa20" @@ -5246,9 +5429,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" dependencies = [ "cfg-if 1.0.0", "derive_more", @@ -5258,9 +5441,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -5270,11 +5453,35 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", ] [[package]] @@ -5301,7 +5508,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -5328,9 +5535,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -5341,9 +5548,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -5360,9 +5567,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] @@ -5390,27 +5597,27 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.193" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] [[package]] name = "serde-json-wasm" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c37d03f3b0f6b5f77c11af1e7c772de1c9af83e50bef7bb6069601900ba67b" +checksum = "f05da0d153dd4595bdffd5099dc0e9ce425b205ee648eb93437ff7302af8c9a5" dependencies = [ "serde", ] [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" dependencies = [ "serde", ] @@ -5427,20 +5634,31 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", +] + +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", @@ -5449,20 +5667,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -5511,7 +5729,7 @@ dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", ] [[package]] @@ -5571,9 +5789,9 @@ checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" [[package]] name = "simple-error" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" +checksum = "8542b68b8800c3cda649d2c72d688b6907b30f1580043135d61669d4aad1c175" [[package]] name = "simple_asn1" @@ -5607,28 +5825,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.4.10" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" -dependencies = [ - "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -5636,7 +5844,7 @@ name = "sparse-merkle-tree" version = "0.3.1-pre" source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=515687fe7884cb365067ac86c66ac3613de176bb#515687fe7884cb365067ac86c66ac3613de176bb" dependencies = [ - "borsh", + "borsh 1.4.0", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -5678,9 +5886,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -5693,11 +5901,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.25.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.25.3", + "strum_macros 0.26.2", ] [[package]] @@ -5715,15 +5923,15 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.3" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -5766,9 +5974,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -5784,9 +5992,15 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "system-configuration" version = "0.5.1" @@ -5816,28 +6030,27 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.12" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "tempfile" -version = "3.8.1" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tendermint" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc2294fa667c8b548ee27a9ba59115472d0a09c2ba255771092a7f1dcf03a789" +checksum = "15ab8f0a25d0d2ad49ac615da054d6a76aa6603ff95f7d18bafdd34450a1a04b" dependencies = [ "bytes", "digest 0.10.7", @@ -5848,8 +6061,8 @@ dependencies = [ "k256", "num-traits", "once_cell", - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-types 0.12.4", "ripemd", "serde", "serde_bytes", @@ -5866,9 +6079,9 @@ dependencies = [ [[package]] name = "tendermint-config" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a25dbe8b953e80f3d61789fbdb83bf9ad6c0ef16df5ca6546f49912542cc137" +checksum = "e1a02da769166e2052cd537b1a97c78017632c2d9e19266367b27e73910434fc" dependencies = [ "flex-error", "serde", @@ -5880,9 +6093,9 @@ dependencies = [ [[package]] name = "tendermint-light-client" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94aecbdccbc4b557649b2d1b1a4bfc27ec85205e00fb8020fce044245a4c9e3f" +checksum = "dc60a09541be13b8f8be305c260eb6144e48e01299302f71956c6e5284f2e4d6" dependencies = [ "contracts", "crossbeam-channel", @@ -5905,9 +6118,9 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74994da9de4b1144837a367ca2c60c650f5526a7c1a54760a3020959b522e474" +checksum = "9b8090d0eef9ad57b1b913b5e358e26145c86017e87338136509b94383a4af25" dependencies = [ "derive_more", "flex-error", @@ -5918,16 +6131,16 @@ dependencies = [ [[package]] name = "tendermint-proto" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc728a4f9e891d71adf66af6ecaece146f9c7a11312288a3107b3e1d6979aaf" +checksum = "b797dd3d2beaaee91d2f065e7bdf239dc8d80bba4a183a288bc1279dd5a69a1e" dependencies = [ "bytes", "flex-error", "num-derive", "num-traits", - "prost 0.12.3", - "prost-types 0.12.3", + "prost 0.12.4", + "prost-types 0.12.4", "serde", "serde_bytes", "subtle-encoding", @@ -5936,19 +6149,20 @@ dependencies = [ [[package]] name = "tendermint-rpc" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbf0a4753b46a190f367337e0163d0b552a2674a6bac54e74f9f2cdcde2969b" +checksum = "71afae8bb5f6b14ed48d4e1316a643b6c2c3cbad114f510be77b4ed20b7b3e42" dependencies = [ "async-trait", "bytes", "flex-error", "futures", - "getrandom 0.2.11", + "getrandom 0.2.14", "peg", "pin-project", + "rand 0.8.5", "reqwest", - "semver 1.0.20", + "semver 1.0.22", "serde", "serde_bytes", "serde_json", @@ -5962,15 +6176,15 @@ dependencies = [ "tokio", "tracing", "url", - "uuid 0.8.2", + "uuid 1.8.0", "walkdir", ] [[package]] name = "tendermint-testgen" -version = "0.34.0" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19d4f02b7e38ce790da973fdc9edc71a0e35340ac57737bf278c8379037c1f5" +checksum = "ae652e9e8b23f27f6a4fbeb29ead22ff4c2256b8d32df226b73258ba2a4ce11e" dependencies = [ "ed25519-consensus 2.1.0", "gumdrop", @@ -5984,9 +6198,9 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6159ab4116165c99fc88cce31f99fa2c9dbe08d3691cb38da02fc3b45f357d2b" +checksum = "7b319995299c65d522680decf80f2c108d85b861d81dfe340a10d16cee29d9e6" dependencies = [ "test-log-macros", "tracing-subscriber", @@ -5994,40 +6208,40 @@ dependencies = [ [[package]] name = "test-log-macros" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba277e77219e9eea169e8508942db1bf5d8a41ff2db9b20aab5a5aadc9fa25d" +checksum = "c8f546451eaa38373f549093fe9fd05e7d2bade739e2ddf834b9968621d60107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -6035,9 +6249,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", @@ -6055,9 +6269,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -6119,9 +6333,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.34.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -6131,7 +6345,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -6144,7 +6358,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -6182,21 +6396,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.2" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.2", + "toml_edit 0.22.9", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -6207,22 +6421,44 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", + "toml_datetime", + "winnow 0.5.40", +] + +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow 0.5.40", +] + +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.22.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.5", ] [[package]] @@ -6264,7 +6500,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -6303,38 +6539,266 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tx_become_validator" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_bond" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_bridge_pool" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_change_consensus_key" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_change_validator_commission" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_change_validator_metadata" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_claim_rewards" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_deactivate_validator" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_ibc" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_init_account" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_init_proposal" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_reactivate_validator" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] [[package]] -name = "tx_template" +name = "tx_redelegate" version = "0.32.1" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.14", + "namada", + "namada_test_utils", "namada_tests", "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_resign_steward" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_reveal_pk" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_transfer" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_unbond" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_unjail_validator" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_update_account" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_update_steward_commission" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_vote_proposal" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada_tx_prelude", + "wee_alloc", +] + +[[package]] +name = "tx_withdraw" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", "wee_alloc", ] [[package]] name = "typed-builder" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47c0496149861b7c95198088cbf36645016b1a0734cf350c50e2a38e070f38a" +checksum = "444d8748011b93cb168770e8092458cb0f8854f931ff82fdf6ddfbd72a9c933e" dependencies = [ "typed-builder-macro", ] [[package]] name = "typed-builder-macro" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982ee4197351b5c9782847ef5ec1fdcaf50503fb19d68f9771adae314e72b492" +checksum = "563b3b88238ec95680aef36bdece66896eaa7ce3c0f1b4f39d38fb2435261352" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] [[package]] @@ -6369,9 +6833,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-bidi" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -6381,9 +6845,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -6445,15 +6909,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.14", "serde", ] [[package]] name = "uuid" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" [[package]] name = "version_check" @@ -6462,12 +6926,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] -name = "vp_template" +name = "vp_implicit" version = "0.32.1" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.14", + "namada", + "namada_test_utils", "namada_tests", + "namada_tx_prelude", "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_user" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.14", + "namada", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", "wee_alloc", ] @@ -6482,9 +6970,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -6513,9 +7001,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.89" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -6523,24 +7011,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.89" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.39" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -6550,9 +7038,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.89" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6560,22 +7048,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.89" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.89" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-encoder" @@ -6832,7 +7320,7 @@ dependencies = [ "libc", "loupe", "mach", - "memoffset 0.6.5", + "memoffset", "more-asserts", "region", "rkyv", @@ -6857,7 +7345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29e3ac9b780c7dda0cac7a52a5d6d2d6707cc6e3451c9db209b6c758f40d7acb" dependencies = [ "indexmap 1.9.3", - "semver 1.0.20", + "semver 1.0.22", ] [[package]] @@ -6897,9 +7385,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.66" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -6907,9 +7395,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.3" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "wee_alloc" @@ -6968,11 +7456,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.4", ] [[package]] @@ -7003,7 +7491,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -7023,17 +7511,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -7044,9 +7532,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -7062,9 +7550,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -7080,9 +7568,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -7098,9 +7586,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -7116,9 +7604,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -7128,9 +7616,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -7146,15 +7634,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" -version = "0.5.25" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e87b8dfbe3baffbe687eef2e164e32286eff31a5ee16463ce03d991643ec94" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + +[[package]] +name = "winnow" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" dependencies = [ "memchr", ] @@ -7206,6 +7703,26 @@ dependencies = [ "nonempty", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.58", +] + [[package]] name = "zeroize" version = "1.7.0" @@ -7223,5 +7740,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.58", ] diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml index c4cb182c1a..68f9a1e420 100644 --- a/wasm/Cargo.toml +++ b/wasm/Cargo.toml @@ -2,11 +2,46 @@ resolver = "2" members = [ - "wasm_source", - "tx_template", - "vp_template", + "tx_become_validator", + "tx_bond", + "tx_change_bridge_pool", + "tx_change_consensus_key", + "tx_change_validator_commission", + "tx_change_validator_metadata", + "tx_claim_rewards", + "tx_deactivate_validator", + "tx_ibc", + "tx_init_account", + "tx_init_proposal", + "tx_reactivate_validator", + "tx_redelegate", + "tx_resign_steward", + "tx_transfer", + "tx_unbond", + "tx_update_account", + "tx_reveal_pk", + "tx_update_steward_commission", + "tx_unjail_validator", + "tx_vote_proposal", + "tx_withdraw", + "vp_implicit", + "vp_user", ] +[workspace.package] +authors = ["Heliax AG "] +edition = "2021" +license = "GPL-3.0" +version = "0.32.1" + +[workspace.dependencies] +namada_tx_prelude = { path = "../crates/tx_prelude" } +namada_vp_prelude = { path = "../crates/vp_prelude" } + +once_cell = { version = "1.8.0" } +wee_alloc = "0.4.5" +getrandom = { version = "0.2", features = ["custom"] } + [profile.release] # smaller and faster wasm (https://rustwasm.github.io/book/reference/code-size.html#compiling-with-link-time-optimizations-lto) lto = true @@ -14,3 +49,4 @@ lto = true panic = "abort" # tell llvm to optimize for size (https://rustwasm.github.io/book/reference/code-size.html#tell-llvm-to-optimize-for-size-instead-of-speed) opt-level = 'z' +strip = "debuginfo" diff --git a/wasm/Makefile b/wasm/Makefile new file mode 100644 index 0000000000..1bc09fcc1c --- /dev/null +++ b/wasm/Makefile @@ -0,0 +1,39 @@ +cargo := $(env) cargo +rustup := $(env) rustup +# Nightly build is currently used for rustfmt and clippy. +nightly := $(shell cat ../rust-nightly-version) + +# Build all wasms in release mode +all: + RUSTFLAGS='-C link-arg=-s' $(cargo) build --release --target wasm32-unknown-unknown --target-dir 'target' + cp target/wasm32-unknown-unknown/release/*.wasm . + +debug: + RUSTFLAGS='-C link-arg=-s' $(cargo) build --target wasm32-unknown-unknown --target-dir 'target' + cp target/wasm32-unknown-unknown/debug/*.wasm . + +check: + $(cargo) +$(nightly) check --workspace --target wasm32-unknown-unknown + +clippy: + $(cargo) +$(nightly) clippy --workspace -- -D warnings + +clippy-fix: + $(cargo) +$(nightly) clippy --fix -Z unstable-options --workspace --allow-dirty --allow-staged + +fmt: + $(cargo) +$(nightly) fmt + +fmt-check: + $(cargo) +$(nightly) fmt --check + +test: + $(cargo) +$(nightly) test -- -Z unstable-options --report-time + +clean: + $(cargo) clean + +deps: + $(rustup) target add wasm32-unknown-unknown + +.PHONY: all debug check clippy clippy-fix fmt fmt-check test clean deps \ No newline at end of file diff --git a/wasm/tx_become_validator/Cargo.toml b/wasm/tx_become_validator/Cargo.toml new file mode 100644 index 0000000000..67b57f5cc3 --- /dev/null +++ b/wasm/tx_become_validator/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_become_validator" +description = "WASM transaction to create a validator" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_become_validator.rs b/wasm/tx_become_validator/src/lib.rs similarity index 64% rename from wasm/wasm_source/src/tx_become_validator.rs rename to wasm/tx_become_validator/src/lib.rs index 4f959f6921..e5277a1840 100644 --- a/wasm/wasm_source/src/tx_become_validator.rs +++ b/wasm/tx_become_validator/src/lib.rs @@ -1,10 +1,11 @@ //! A tx to initialize a new validator account with a given public keys and a //! validity predicates. +use booleans::ResultBoolExt; use namada_tx_prelude::transaction::pos::BecomeValidator; use namada_tx_prelude::*; -#[transaction(gas = 4395397)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -12,7 +13,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let become_validator = BecomeValidator::try_from_slice(&data[..]) - .wrap_err("failed to decode InitValidator")?; + .wrap_err("Failed to decode BecomeValidator tx data")?; debug_log!("apply_tx called to init a new validator account"); // Check that the tx has been signed with all the keys to be used for the @@ -25,20 +26,17 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { key::common::PublicKey::Secp256k1(become_validator.eth_hot_key.clone()), become_validator.protocol_key.clone(), ]; - if !matches!(verify_signatures_of_pks(ctx, &signed, all_pks), Ok(true)) { - debug_log!("Keys ownership signature verification failed"); - panic!() - } + verify_signatures_of_pks(ctx, &signed, all_pks).true_or_else(|| { + const ERR_MSG: &str = "Keys ownership signature verification failed"; + debug_log!("{ERR_MSG}"); + Error::new_const(ERR_MSG) + })?; // Register the validator in PoS - match ctx.become_validator(become_validator) { - Ok(validator_address) => { - debug_log!("Created validator {}", validator_address.encode(),) - } - Err(err) => { - debug_log!("Validator creation failed with: {}", err); - panic!() - } - } + let validator_address = ctx + .become_validator(become_validator) + .wrap_err("Validator creation failed")?; + + debug_log!("Created validator {validator_address}"); Ok(()) } diff --git a/wasm/tx_bond/Cargo.toml b/wasm/tx_bond/Cargo.toml new file mode 100644 index 0000000000..804cb3edf7 --- /dev/null +++ b/wasm/tx_bond/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tx_bond" +description = "WASM transaction to bond tokens" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/tx_bond/src/lib.rs similarity index 97% rename from wasm/wasm_source/src/tx_bond.rs rename to wasm/tx_bond/src/lib.rs index b4b182034d..21887bbfb8 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/tx_bond/src/lib.rs @@ -2,7 +2,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 1342908)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -10,8 +10,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let bond = transaction::pos::Bond::try_from_slice(&data[..]) - .wrap_err("failed to decode Bond") - .unwrap(); + .wrap_err("Failed to decode Bond tx data")?; ctx.bond_tokens(bond.source.as_ref(), &bond.validator, bond.amount) } @@ -29,7 +28,6 @@ mod tests { read_total_stake, read_validator_stake, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -331,13 +329,10 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); - let result = - result.expect("Validation of valid changes must not fail!"); + let result = vp_env.validate_tx(&gas_meter, PosVP::new); assert!( - result, + result.is_ok(), "PoS Validity predicate must accept this transaction" ); Ok(()) diff --git a/wasm/tx_change_bridge_pool/Cargo.toml b/wasm/tx_change_bridge_pool/Cargo.toml new file mode 100644 index 0000000000..1999e31dea --- /dev/null +++ b/wasm/tx_change_bridge_pool/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_bridge_pool" +description = "WASM transaction to interact with the bridge pool" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/tx_change_bridge_pool/src/lib.rs similarity index 75% rename from wasm/wasm_source/src/tx_bridge_pool.rs rename to wasm/tx_change_bridge_pool/src/lib.rs index 8d4277a22e..3b45c22bb0 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/tx_change_bridge_pool/src/lib.rs @@ -1,13 +1,13 @@ //! A tx for adding a transfer request across the Ethereum bridge //! into the bridge pool. -use eth_bridge_pool::{GasFee, PendingTransfer, TransferToEthereum}; use namada_tx_prelude::eth_bridge_pool::{ - get_pending_key, BRIDGE_POOL_ADDRESS, + get_pending_key, GasFee, PendingTransfer, TransferToEthereum, + BRIDGE_POOL_ADDRESS, }; use namada_tx_prelude::parameters::native_erc20_key; use namada_tx_prelude::*; -#[transaction(gas = 1038546)] +#[transaction] fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { ctx.set_commitment_sentinel(); @@ -15,7 +15,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { })?; let transfer = PendingTransfer::try_from_slice(&data[..]) .map_err(|e| Error::wrap("Error deserializing PendingTransfer", e))?; - log_string("Received transfer to add to pool."); + debug_log!("Received transfer to add to Bridge pool"); // pay the gas fees let GasFee { token: ref fee_token_addr, @@ -29,7 +29,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { fee_token_addr, amount, )?; - log_string("Token transfer succeeded."); + debug_log!("Bridge pool token transfer succeeded"); let TransferToEthereum { asset, ref sender, @@ -57,7 +57,7 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { amount, )?; } - log_string("Escrow succeeded"); + debug_log!("Bridge pool escrow succeeded"); // add transfer into the pool let pending_key = get_pending_key(&transfer); ctx.write(&pending_key, transfer) @@ -66,11 +66,11 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { } fn native_erc20_address(ctx: &mut Ctx) -> EnvResult { - log_string("Trying to get wnam key"); + debug_log!("Trying to get wnam key for Bridge pool transfer"); let addr = ctx - .read_bytes(&native_erc20_key()) - .map_err(|e| Error::wrap("Could not read wNam key from storage", e))? - .unwrap(); - log_string("Got wnam key"); - Ok(BorshDeserialize::try_from_slice(addr.as_slice()).unwrap()) + .read(&native_erc20_key()) + .wrap_err("Could not read wrapped NAM address")? + .ok_or_err_msg("Wrapped NAM address must be present in storage")?; + debug_log!("Got wnam key for Bridge pool transfer: {addr}"); + Ok(addr) } diff --git a/wasm/tx_change_consensus_key/Cargo.toml b/wasm/tx_change_consensus_key/Cargo.toml new file mode 100644 index 0000000000..2f90bb8798 --- /dev/null +++ b/wasm/tx_change_consensus_key/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_change_consensus_key" +description = "WASM transaction to change consensus keys" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_change_consensus_key.rs b/wasm/tx_change_consensus_key/src/lib.rs similarity index 53% rename from wasm/wasm_source/src/tx_change_consensus_key.rs rename to wasm/tx_change_consensus_key/src/lib.rs index 9d82f4d855..4e0686abe5 100644 --- a/wasm/wasm_source/src/tx_change_consensus_key.rs +++ b/wasm/tx_change_consensus_key/src/lib.rs @@ -1,9 +1,10 @@ //! A tx for a validator to change their consensus key. +use booleans::ResultBoolExt; use namada_tx_prelude::transaction::pos::ConsensusKeyChange; use namada_tx_prelude::*; -#[transaction(gas = 220000)] // TODO: need to benchmark this gas +#[transaction] // TODO: need to benchmark this gas fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data")?; @@ -11,16 +12,17 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { validator, consensus_key, } = transaction::pos::ConsensusKeyChange::try_from_slice(&data[..]) - .wrap_err("failed to decode Dec value")?; + .wrap_err("Failed to decode ConsensusKeyChange value")?; // Check that the tx has been signed with the new consensus key - if !matches!( - verify_signatures_of_pks(ctx, &signed, vec![consensus_key.clone()]), - Ok(true) - ) { - debug_log!("Consensus key ownership signature verification failed"); - panic!() - } + verify_signatures_of_pks(ctx, &signed, vec![consensus_key.clone()]) + .true_or_else(|| { + const ERR_MSG: &str = + "Consensus key ownership signature verification failed"; + debug_log!("{ERR_MSG}"); + Error::new_const(ERR_MSG) + })?; ctx.change_validator_consensus_key(&validator, &consensus_key) + .wrap_err("Failed to change validator consensus key") } diff --git a/wasm/tx_change_validator_commission/Cargo.toml b/wasm/tx_change_validator_commission/Cargo.toml new file mode 100644 index 0000000000..a2c37ac52e --- /dev/null +++ b/wasm/tx_change_validator_commission/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tx_change_validator_commission" +description = "WASM trasaction to change validator commission" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/tx_change_validator_commission/src/lib.rs similarity index 95% rename from wasm/wasm_source/src/tx_change_validator_commission.rs rename to wasm/tx_change_validator_commission/src/lib.rs index 039296e540..e9de2f90a3 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/tx_change_validator_commission/src/lib.rs @@ -3,7 +3,7 @@ use namada_tx_prelude::transaction::pos::CommissionChange; use namada_tx_prelude::*; -#[transaction(gas = 1319787)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -14,8 +14,9 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { validator, new_rate, } = transaction::pos::CommissionChange::try_from_slice(&data[..]) - .wrap_err("failed to decode Dec value")?; + .wrap_err("Failed to decode CommissionChange value")?; ctx.change_validator_commission_rate(&validator, &new_rate) + .wrap_err("Failed to change validator's commission rate") } #[cfg(test)] @@ -28,7 +29,6 @@ mod tests { use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::validator_commission_rate_handle; use namada::proof_of_stake::types::GenesisValidator; - use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -156,13 +156,10 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); - let result = - result.expect("Validation of valid changes must not fail!"); + let result = vp_env.validate_tx(&gas_meter, PosVP::new); assert!( - result, + result.is_ok(), "PoS Validity predicate must accept this transaction" ); diff --git a/wasm/tx_change_validator_metadata/Cargo.toml b/wasm/tx_change_validator_metadata/Cargo.toml new file mode 100644 index 0000000000..f659bb0618 --- /dev/null +++ b/wasm/tx_change_validator_metadata/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_change_validator_metadata" +description = "WASM transaction to change validator metadata" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_change_validator_metadata.rs b/wasm/tx_change_validator_metadata/src/lib.rs similarity index 85% rename from wasm/wasm_source/src/tx_change_validator_metadata.rs rename to wasm/tx_change_validator_metadata/src/lib.rs index 0a0806fd2c..cf1912063e 100644 --- a/wasm/wasm_source/src/tx_change_validator_metadata.rs +++ b/wasm/tx_change_validator_metadata/src/lib.rs @@ -5,7 +5,7 @@ use namada_tx_prelude::transaction::pos::MetaDataChange; use namada_tx_prelude::*; // TODO: need to benchmark gas!!! -#[transaction(gas = 220000)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data")?; @@ -18,7 +18,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { avatar, commission_rate, } = transaction::pos::MetaDataChange::try_from_slice(&data[..]) - .wrap_err("failed to decode Dec value")?; + .wrap_err("Failed to decode MetaDataChange value")?; ctx.change_validator_metadata( &validator, email, @@ -28,4 +28,5 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { avatar, commission_rate, ) + .wrap_err("Failed to update validator's metadata") } diff --git a/wasm/tx_claim_rewards/Cargo.toml b/wasm/tx_claim_rewards/Cargo.toml new file mode 100644 index 0000000000..696a67fa8f --- /dev/null +++ b/wasm/tx_claim_rewards/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_claim_rewards" +description = "WASM transaction to claim proof-of-stake rewards" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_claim_rewards.rs b/wasm/tx_claim_rewards/src/lib.rs similarity index 72% rename from wasm/wasm_source/src/tx_claim_rewards.rs rename to wasm/tx_claim_rewards/src/lib.rs index 62207804af..43ee5e9732 100644 --- a/wasm/wasm_source/src/tx_claim_rewards.rs +++ b/wasm/tx_claim_rewards/src/lib.rs @@ -3,13 +3,15 @@ use namada_tx_prelude::*; -#[transaction(gas = 260000)] // TODO: needs to be benchmarked +#[transaction] // TODO: needs to be benchmarked fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data")?; let withdraw = transaction::pos::Withdraw::try_from_slice(&data[..]) - .wrap_err("failed to decode Withdraw")?; + .wrap_err("Failed to decode Withdraw value")?; + + ctx.claim_reward_tokens(withdraw.source.as_ref(), &withdraw.validator) + .wrap_err("Failed to claim rewards")?; - ctx.claim_reward_tokens(withdraw.source.as_ref(), &withdraw.validator)?; Ok(()) } diff --git a/wasm/tx_deactivate_validator/Cargo.toml b/wasm/tx_deactivate_validator/Cargo.toml new file mode 100644 index 0000000000..e536c3a5e0 --- /dev/null +++ b/wasm/tx_deactivate_validator/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_deactivate_validator" +description = "WASM transaction to deactivate a validator" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_deactivate_validator.rs b/wasm/tx_deactivate_validator/src/lib.rs similarity index 54% rename from wasm/wasm_source/src/tx_deactivate_validator.rs rename to wasm/tx_deactivate_validator/src/lib.rs index cd62efc114..e8574783a0 100644 --- a/wasm/wasm_source/src/tx_deactivate_validator.rs +++ b/wasm/tx_deactivate_validator/src/lib.rs @@ -2,11 +2,13 @@ use namada_tx_prelude::*; -#[transaction(gas = 340000)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data")?; - let validator = Address::try_from_slice(&data[..]) - .wrap_err("failed to decode an Address")?; + let validator = Address::try_from_slice(&data[..]).wrap_err( + "Failed to decode the address of the validator to deactivate", + )?; ctx.deactivate_validator(&validator) + .wrap_err("Failed to deactivate validator") } diff --git a/wasm/tx_ibc/Cargo.toml b/wasm/tx_ibc/Cargo.toml new file mode 100644 index 0000000000..fca7472d87 --- /dev/null +++ b/wasm/tx_ibc/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_ibc" +description = "WASM transaction for an IBC transfer" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/tx_ibc/src/lib.rs b/wasm/tx_ibc/src/lib.rs new file mode 100644 index 0000000000..fbf408bf1d --- /dev/null +++ b/wasm/tx_ibc/src/lib.rs @@ -0,0 +1,48 @@ +//! A tx for IBC. +//! This tx executes an IBC operation according to the given IBC message as the +//! tx_data. This tx uses an IBC message wrapped inside +//! `key::ed25519::SignedTxData` as its input as declared in `ibc` crate. + +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + // let data = signed.data().ok_or_err_msg("Missing data").or_else(|err| { + // ctx.set_commitment_sentinel(); + // Err(err) + // })?; + + // let transfer = + // ibc::ibc_actions(ctx).execute(&data).into_storage_result()?; + + // Temp. workaround for + let transfer = tx_ibc_execute()?; + + if let Some(transfer) = transfer { + let shielded = transfer + .shielded + .as_ref() + .map(|hash| { + signed + .get_section(hash) + .and_then(|x| x.as_ref().masp_tx()) + .ok_or_err_msg("unable to find shielded section") + .map_err(|err| { + ctx.set_commitment_sentinel(); + err + }) + }) + .transpose()?; + if let Some(shielded) = shielded { + token::utils::handle_masp_tx( + ctx, + &shielded, + transfer.key.as_deref(), + )?; + update_masp_note_commitment_tree(&shielded)?; + } + } + + Ok(()) +} diff --git a/wasm/tx_init_account/Cargo.toml b/wasm/tx_init_account/Cargo.toml new file mode 100644 index 0000000000..560910893d --- /dev/null +++ b/wasm/tx_init_account/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_init_account" +description = "WASM transaction to initialize an account" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/tx_init_account/src/lib.rs b/wasm/tx_init_account/src/lib.rs new file mode 100644 index 0000000000..759bf2216f --- /dev/null +++ b/wasm/tx_init_account/src/lib.rs @@ -0,0 +1,54 @@ +//! A tx to initialize a new established address with a given public key and +//! a validity predicate. + +use namada_tx_prelude::*; + +const HASH_LEN: usize = hash::HASH_LENGTH; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { + ctx.set_commitment_sentinel(); + err + })?; + let tx_data = account::InitAccount::try_from_slice(&data[..]) + .wrap_err("Failed to decode InitAccount tx data")?; + debug_log!("apply_tx called to init a new established account"); + + let vp_code_sec = signed + .get_section(&tx_data.vp_code_hash) + .ok_or_err_msg("VP code section not found in tx") + .map_err(|err| { + ctx.set_commitment_sentinel(); + err + })? + .extra_data_sec() + .ok_or_err_msg("VP code section must be tagged as extra") + .map_err(|err| { + ctx.set_commitment_sentinel(); + err + })?; + + let entropy = { + let mut buffer = [0u8; HASH_LEN * 2]; + + // Add code hash as entropy + buffer[..HASH_LEN].copy_from_slice(&signed.code_sechash().0); + + // Add data hash as entropy + buffer[HASH_LEN..].copy_from_slice(&signed.data_sechash().0); + + buffer + }; + + let address = ctx + .init_account(vp_code_sec.code.hash(), &vp_code_sec.tag, &entropy) + .wrap_err("Failed to generate a new established account address")?; + + account::init_account(ctx, &address, tx_data) + .wrap_err("Account creation failed")?; + + debug_log!("Created account {address}"); + Ok(()) +} diff --git a/wasm/tx_init_proposal/Cargo.toml b/wasm/tx_init_proposal/Cargo.toml new file mode 100644 index 0000000000..5301f148b5 --- /dev/null +++ b/wasm/tx_init_proposal/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_init_proposal" +description = "WASM transaction to initialize a governance proposal" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_init_proposal.rs b/wasm/tx_init_proposal/src/lib.rs similarity index 69% rename from wasm/wasm_source/src/tx_init_proposal.rs rename to wasm/tx_init_proposal/src/lib.rs index dd7018a399..7c34081515 100644 --- a/wasm/wasm_source/src/tx_init_proposal.rs +++ b/wasm/tx_init_proposal/src/lib.rs @@ -1,15 +1,23 @@ //! A tx to create a governance proposal. +use namada_tx_prelude::action::{Action, GovAction, Write}; use namada_tx_prelude::*; -#[transaction(gas = 969395)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx: Tx) -> TxResult { let data = tx.data().ok_or_err_msg("Missing data").map_err(|err| { ctx.set_commitment_sentinel(); err })?; let tx_data = governance::InitProposalData::try_from_slice(&data[..]) - .wrap_err("failed to decode InitProposalData")?; + .wrap_err("Failed to decode InitProposalData value")?; + + // The tx must be authorized by the author address + ctx.insert_verifier(&tx_data.author)?; + + ctx.push_action(Action::Gov(GovAction::InitProposal { + author: tx_data.author.clone(), + }))?; // Get the content from the referred to section let content = tx @@ -27,9 +35,9 @@ fn apply_tx(ctx: &mut Ctx, tx: Tx) -> TxResult { })?; // Get the code from the referred to section - let code_hash = tx_data.get_section_code_hash(); - let code = match code_hash { - Some(hash) => Some( + let code = tx_data + .get_section_code_hash() + .map(|hash| { tx.get_section(&hash) .ok_or_err_msg("Missing proposal code") .map_err(|err| { @@ -41,12 +49,13 @@ fn apply_tx(ctx: &mut Ctx, tx: Tx) -> TxResult { .map_err(|err| { ctx.set_commitment_sentinel(); err - })?, - ), - None => None, - }; + }) + }) + .transpose() + .wrap_err("Failed to retrieve proposal code")?; log_string("apply_tx called to create a new governance proposal"); governance::init_proposal(ctx, tx_data, content, code) + .wrap_err("Failed to initialize new governance proposal") } diff --git a/wasm/tx_reactivate_validator/Cargo.toml b/wasm/tx_reactivate_validator/Cargo.toml new file mode 100644 index 0000000000..41b795f865 --- /dev/null +++ b/wasm/tx_reactivate_validator/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_reactivate_validator" +description = "WASM transaction to reactivate a validator" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_reactivate_validator.rs b/wasm/tx_reactivate_validator/src/lib.rs similarity index 67% rename from wasm/wasm_source/src/tx_reactivate_validator.rs rename to wasm/tx_reactivate_validator/src/lib.rs index 19bfd74648..ed63efa25c 100644 --- a/wasm/wasm_source/src/tx_reactivate_validator.rs +++ b/wasm/tx_reactivate_validator/src/lib.rs @@ -2,11 +2,12 @@ use namada_tx_prelude::*; -#[transaction(gas = 340000)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data")?; let validator = Address::try_from_slice(&data[..]) - .wrap_err("failed to decode an Address")?; + .wrap_err("Failed to decode address of the validator to reactivate")?; ctx.reactivate_validator(&validator) + .wrap_err("Failed to reactivate validator") } diff --git a/wasm/tx_redelegate/Cargo.toml b/wasm/tx_redelegate/Cargo.toml new file mode 100644 index 0000000000..44d28a4a03 --- /dev/null +++ b/wasm/tx_redelegate/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tx_redelegate" +description = "WASM transaction to redelegate bonds" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_redelegate.rs b/wasm/tx_redelegate/src/lib.rs similarity index 97% rename from wasm/wasm_source/src/tx_redelegate.rs rename to wasm/tx_redelegate/src/lib.rs index 8aeed5612c..5176305616 100644 --- a/wasm/wasm_source/src/tx_redelegate.rs +++ b/wasm/tx_redelegate/src/lib.rs @@ -3,7 +3,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 2453242)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -16,8 +16,9 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { owner, amount, } = transaction::pos::Redelegation::try_from_slice(&data[..]) - .wrap_err("failed to decode a Redelegation")?; + .wrap_err("Failed to decode a Redelegation tx data")?; ctx.redelegate_tokens(&owner, &src_validator, &dest_validator, amount) + .wrap_err("Failed to redelegate tokens") } #[cfg(test)] @@ -33,7 +34,6 @@ mod tests { read_total_stake, read_validator_stake, unbond_handle, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -366,13 +366,10 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); - let result = - result.expect("Validation of valid changes must not fail!"); + let result = vp_env.validate_tx(&gas_meter, PosVP::new); assert!( - result, + result.is_ok(), "PoS Validity predicate must accept this transaction" ); Ok(()) diff --git a/wasm/tx_resign_steward/Cargo.toml b/wasm/tx_resign_steward/Cargo.toml new file mode 100644 index 0000000000..8f6fa2424e --- /dev/null +++ b/wasm/tx_resign_steward/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_resign_steward" +description = "WASM transaction to resign a PGF steward" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/tx_resign_steward/src/lib.rs b/wasm/tx_resign_steward/src/lib.rs new file mode 100644 index 0000000000..606ea4cf6a --- /dev/null +++ b/wasm/tx_resign_steward/src/lib.rs @@ -0,0 +1,29 @@ +//! A tx to resign as a steward + +use namada_tx_prelude::action::{Action, PgfAction, Write}; +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { + ctx.set_commitment_sentinel(); + err + })?; + let steward_address = Address::try_from_slice(&data[..]).wrap_err( + "Failed to decode the address of the PGF steward to remove", + )?; + + // The tx must be authorized by the source address + ctx.insert_verifier(&steward_address)?; + + ctx.push_action(Action::Pgf(PgfAction::ResignSteward( + steward_address.clone(), + )))?; + + pgf::remove_steward(ctx, &steward_address) + .wrap_err("Failed to remove PGF steward")?; + debug_log!("Removed PGF steward {steward_address}"); + + Ok(()) +} diff --git a/wasm/tx_reveal_pk/Cargo.toml b/wasm/tx_reveal_pk/Cargo.toml new file mode 100644 index 0000000000..236862f2c3 --- /dev/null +++ b/wasm/tx_reveal_pk/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_reveal_pk" +description = "WASM transaction to update reveal pk" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] diff --git a/wasm/wasm_source/src/tx_reveal_pk.rs b/wasm/tx_reveal_pk/src/lib.rs similarity index 80% rename from wasm/wasm_source/src/tx_reveal_pk.rs rename to wasm/tx_reveal_pk/src/lib.rs index f5a23d0e1f..fa4ce61b47 100644 --- a/wasm/wasm_source/src/tx_reveal_pk.rs +++ b/wasm/tx_reveal_pk/src/lib.rs @@ -6,7 +6,7 @@ use namada_tx_prelude::key::common; use namada_tx_prelude::*; -#[transaction(gas = 919818)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -14,7 +14,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let pk = common::PublicKey::try_from_slice(&data[..]) - .wrap_err("failed to decode common::PublicKey from tx_data")?; + .wrap_err("Failed to decode public key to reveal from the tx data")?; debug_log!("tx_reveal_pk called with pk: {pk}"); key::reveal_pk(ctx, &pk) + .wrap_err("Failed to reveal the implicit account's public key") } diff --git a/wasm/tx_template/.gitignore b/wasm/tx_template/.gitignore deleted file mode 100644 index 3df254e13b..0000000000 --- a/wasm/tx_template/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Built wasm module -tx.wasm - -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk \ No newline at end of file diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml deleted file mode 100644 index fd65409057..0000000000 --- a/wasm/tx_template/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -authors = ["Heliax AG "] -edition = "2021" -license = "GPL-3.0" -name = "tx_template" -resolver = "2" -version = "0.32.1" - -[lib] -crate-type = ["cdylib"] - -[dependencies] -namada_tx_prelude = {path = "../../crates/tx_prelude"} -wee_alloc = "0.4.5" -getrandom = { version = "0.2", features = ["custom"] } - -[dev-dependencies] -namada_tests = {path = "../../crates/tests"} diff --git a/wasm/tx_template/Makefile b/wasm/tx_template/Makefile deleted file mode 100644 index 02e33ac6d8..0000000000 --- a/wasm/tx_template/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -cargo = $(env) cargo -rustup = $(env) rustup - -# Linker flag "-s" for stripping (https://github.com/rust-lang/cargo/issues/3483#issuecomment-431209957) -build-release: - # wasm is built into target/wasm32-unknown-unknown/release - RUSTFLAGS='-C link-arg=-s' $(cargo) build --release --target wasm32-unknown-unknown - -build: - $(cargo) build --target wasm32-unknown-unknown - -watch: - $(cargo) watch - -clean: - $(cargo) clean && if [ -e $(wasm) ]; then rm $(wasm); fi - -deps: - $(rustup) target add wasm32-unknown-unknown - -.PHONY : build-release build watch clean deps diff --git a/wasm/tx_template/README.md b/wasm/tx_template/README.md deleted file mode 100644 index 79d163a114..0000000000 --- a/wasm/tx_template/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Transaction script wasm template - -This is a template of a wasm module for transaction script. - -## Quick start - -```shell -# To be able to build this, make sure to have -make deps - -# Build - this will create `.wasm` file -make build-release -``` diff --git a/wasm/tx_template/src/lib.rs b/wasm/tx_template/src/lib.rs deleted file mode 100644 index 4bedb51ecb..0000000000 --- a/wasm/tx_template/src/lib.rs +++ /dev/null @@ -1,28 +0,0 @@ -use namada_tx_prelude::*; - -#[transaction(gas = 1000)] -fn apply_tx(_ctx: &mut Ctx, tx_data: Tx) -> TxResult { - log_string(format!("apply_tx called with data: {:#?}", tx_data)); - Ok(()) -} - -#[cfg(test)] -mod tests { - use namada_tests::tx::*; - - use super::*; - - /// An example test, checking that this transaction performs no storage - /// modifications. - #[test] - fn test_no_op_transaction() { - // The environment must be initialized first - tx_host_env::init(); - - let tx = Tx::from_type(TxType::Raw); - apply_tx(ctx(), tx).unwrap(); - - let env = tx_host_env::take(); - assert!(env.all_touched_storage_keys().is_empty()); - } -} diff --git a/wasm/tx_transfer/Cargo.toml b/wasm/tx_transfer/Cargo.toml new file mode 100644 index 0000000000..593c7f35cd --- /dev/null +++ b/wasm/tx_transfer/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_transfer" +description = "WASM transaction to transfer tokens" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_transfer.rs b/wasm/tx_transfer/src/lib.rs similarity index 68% rename from wasm/wasm_source/src/tx_transfer.rs rename to wasm/tx_transfer/src/lib.rs index 7744c5767d..c2b8baa568 100644 --- a/wasm/wasm_source/src/tx_transfer.rs +++ b/wasm/tx_transfer/src/lib.rs @@ -4,7 +4,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 1703358)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -12,7 +12,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let transfer = token::Transfer::try_from_slice(&data[..]) - .wrap_err("failed to decode token::Transfer")?; + .wrap_err("Failed to decode token::Transfer tx data")?; debug_log!("apply_tx called with transfer: {:#?}", transfer); token::transfer( @@ -20,8 +20,9 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { &transfer.source, &transfer.target, &transfer.token, - transfer.amount, - )?; + transfer.amount.amount(), + ) + .wrap_err("Token transfer failed")?; let shielded = transfer .shielded @@ -30,7 +31,9 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { signed .get_section(hash) .and_then(|x| x.as_ref().masp_tx()) - .ok_or_err_msg("unable to find shielded section") + .ok_or_err_msg( + "Unable to find required shielded section in tx data", + ) .map_err(|err| { ctx.set_commitment_sentinel(); err @@ -38,8 +41,10 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { }) .transpose()?; if let Some(shielded) = shielded { - token::utils::handle_masp_tx(ctx, &shielded, transfer.key.as_deref())?; - update_masp_note_commitment_tree(&shielded)?; + token::utils::handle_masp_tx(ctx, &shielded, transfer.key.as_deref()) + .wrap_err("Encountered error while handling MASP transaction")?; + update_masp_note_commitment_tree(&shielded) + .wrap_err("Failed to update the MASP commitment tree")?; } Ok(()) } diff --git a/wasm/tx_unbond/Cargo.toml b/wasm/tx_unbond/Cargo.toml new file mode 100644 index 0000000000..95e3e032eb --- /dev/null +++ b/wasm/tx_unbond/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tx_unbond" +description = "WASM transaction to unbond tokens" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/tx_unbond/src/lib.rs similarity index 96% rename from wasm/wasm_source/src/tx_unbond.rs rename to wasm/tx_unbond/src/lib.rs index b1662649d1..907de5a5c9 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/tx_unbond/src/lib.rs @@ -3,7 +3,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 2645941)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -11,14 +11,12 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let unbond = transaction::pos::Unbond::try_from_slice(&data[..]) - .wrap_err("failed to decode Unbond")?; + .wrap_err("Failed to decode Unbond tx data")?; - ctx.unbond_tokens( - unbond.source.as_ref(), - &unbond.validator, - unbond.amount, - )?; - // TODO: would using debug_log! be useful? + ctx.unbond_tokens(unbond.source.as_ref(), &unbond.validator, unbond.amount) + .wrap_err("Failed to unbond tokens")?; + + debug_log!("Unbonded {} from {}", unbond.amount, unbond.validator); Ok(()) } @@ -36,7 +34,6 @@ mod tests { read_total_stake, read_validator_stake, unbond_handle, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -344,13 +341,10 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); - let result = - result.expect("Validation of valid changes must not fail!"); + let result = vp_env.validate_tx(&gas_meter, PosVP::new); assert!( - result, + result.is_ok(), "PoS Validity predicate must accept this transaction" ); Ok(()) diff --git a/wasm/tx_unjail_validator/Cargo.toml b/wasm/tx_unjail_validator/Cargo.toml new file mode 100644 index 0000000000..74a337ff95 --- /dev/null +++ b/wasm/tx_unjail_validator/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_unjail_validator" +description = "WASM transaction to unjail a validator" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_unjail_validator.rs b/wasm/tx_unjail_validator/src/lib.rs similarity index 75% rename from wasm/wasm_source/src/tx_unjail_validator.rs rename to wasm/tx_unjail_validator/src/lib.rs index 7a11152876..8138458435 100644 --- a/wasm/wasm_source/src/tx_unjail_validator.rs +++ b/wasm/tx_unjail_validator/src/lib.rs @@ -3,7 +3,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 1641054)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -11,6 +11,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let validator = Address::try_from_slice(&data[..]) - .wrap_err("failed to decode an Address")?; + .wrap_err("Failed to decode the address of the validator to unjail")?; ctx.unjail_validator(&validator) + .wrap_err("Failed to unjail validator") } diff --git a/wasm/tx_update_account/Cargo.toml b/wasm/tx_update_account/Cargo.toml new file mode 100644 index 0000000000..acae190b7a --- /dev/null +++ b/wasm/tx_update_account/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_update_account" +description = "WASM transaction to update an account" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_update_account.rs b/wasm/tx_update_account/src/lib.rs similarity index 63% rename from wasm/wasm_source/src/tx_update_account.rs rename to wasm/tx_update_account/src/lib.rs index 86038ab489..dc4268beab 100644 --- a/wasm/wasm_source/src/tx_update_account.rs +++ b/wasm/tx_update_account/src/lib.rs @@ -4,7 +4,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 968137)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx: Tx) -> TxResult { let signed = tx; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -12,21 +12,24 @@ fn apply_tx(ctx: &mut Ctx, tx: Tx) -> TxResult { err })?; let tx_data = account::UpdateAccount::try_from_slice(&data[..]) - .wrap_err("failed to decode UpdateAccount")?; + .wrap_err("Failed to decode UpdateAccount tx data")?; let owner = &tx_data.addr; debug_log!("update VP for: {:#?}", tx_data.addr); + // The tx must be authorized by the source address + ctx.insert_verifier(owner)?; + if let Some(hash) = tx_data.vp_code_hash { let vp_code_sec = signed .get_section(&hash) - .ok_or_err_msg("vp code section not found") + .ok_or_err_msg("VP code section not found") .map_err(|err| { ctx.set_commitment_sentinel(); err })? .extra_data_sec() - .ok_or_err_msg("vp code section must be tagged as extra") + .ok_or_err_msg("VP code section must be tagged as extra") .map_err(|err| { ctx.set_commitment_sentinel(); err @@ -36,23 +39,24 @@ fn apply_tx(ctx: &mut Ctx, tx: Tx) -> TxResult { owner, vp_code_sec.code.hash(), &vp_code_sec.tag, - )?; + ) + .wrap_err("Failed to update the account's validity predicate")?; } if let Some(threshold) = tx_data.threshold { let threshold_key = account::threshold_key(owner); - ctx.write(&threshold_key, threshold)?; + ctx.write(&threshold_key, threshold) + .wrap_err("Failed to update the account's signing threshold")?; } if !tx_data.public_keys.is_empty() { - account::clear_public_keys(ctx, owner)?; + account::clear_public_keys(ctx, owner) + .wrap_err("Failed to reset the account's public keys")?; for (index, public_key) in tx_data.public_keys.iter().enumerate() { let index = index as u8; - account::pks_handle(owner).insert( - ctx, - index, - public_key.clone(), - )?; + account::pks_handle(owner) + .insert(ctx, index, public_key.clone()) + .wrap_err("Failed to update the public keys of the account")?; } } diff --git a/wasm/tx_update_steward_commission/Cargo.toml b/wasm/tx_update_steward_commission/Cargo.toml new file mode 100644 index 0000000000..f05be319a6 --- /dev/null +++ b/wasm/tx_update_steward_commission/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_update_steward_commission" +description = "WASM transaction to update steward commissions" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/tx_update_steward_commission/src/lib.rs b/wasm/tx_update_steward_commission/src/lib.rs new file mode 100644 index 0000000000..5c24f764b5 --- /dev/null +++ b/wasm/tx_update_steward_commission/src/lib.rs @@ -0,0 +1,28 @@ +//! A tx to update the commission distribution for a steward + +use namada_tx_prelude::action::{Action, PgfAction, Write}; +use namada_tx_prelude::transaction::pgf::UpdateStewardCommission; +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { + ctx.set_commitment_sentinel(); + err + })?; + let steward_commission = UpdateStewardCommission::try_from_slice(&data[..]) + .wrap_err("Failed to decode an UpdateStewardCommission tx data")?; + + // The tx must be authorized by the source address + ctx.insert_verifier(&steward_commission.steward)?; + + ctx.push_action(Action::Pgf(PgfAction::UpdateStewardCommission( + steward_commission.steward.clone(), + )))?; + + pgf::update_steward_commission(ctx, steward_commission) + .wrap_err("Failed to update steward commission rate")?; + + Ok(()) +} diff --git a/wasm/tx_vote_proposal/Cargo.toml b/wasm/tx_vote_proposal/Cargo.toml new file mode 100644 index 0000000000..4f2f877287 --- /dev/null +++ b/wasm/tx_vote_proposal/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tx_vote_proposal" +description = "WASM transaction to vote on a governance proposal" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_vote_proposal.rs b/wasm/tx_vote_proposal/src/lib.rs similarity index 52% rename from wasm/wasm_source/src/tx_vote_proposal.rs rename to wasm/tx_vote_proposal/src/lib.rs index bd68cd0c73..0f5e92803c 100644 --- a/wasm/wasm_source/src/tx_vote_proposal.rs +++ b/wasm/tx_vote_proposal/src/lib.rs @@ -1,8 +1,9 @@ //! A tx to vote on a proposal +use namada_tx_prelude::action::{Action, GovAction, Write}; use namada_tx_prelude::*; -#[transaction(gas = 840866)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -10,9 +11,18 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let tx_data = governance::VoteProposalData::try_from_slice(&data[..]) - .wrap_err("failed to decode VoteProposalData")?; + .wrap_err("Failed to decode VoteProposalData value")?; + + // The tx must be authorized by the source address + ctx.insert_verifier(&tx_data.voter)?; + + ctx.push_action(Action::Gov(GovAction::VoteProposal { + id: tx_data.id, + voter: tx_data.voter.clone(), + }))?; debug_log!("apply_tx called to vote a governance proposal"); governance::vote_proposal(ctx, tx_data) + .wrap_err("Failed to vote on governance proposal") } diff --git a/wasm/tx_withdraw/Cargo.toml b/wasm/tx_withdraw/Cargo.toml new file mode 100644 index 0000000000..4ef163017f --- /dev/null +++ b/wasm/tx_withdraw/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tx_withdraw" +description = "WASM transaction to withdraw tokens" +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/tx_withdraw/src/lib.rs similarity index 95% rename from wasm/wasm_source/src/tx_withdraw.rs rename to wasm/tx_withdraw/src/lib.rs index 824ceafc1d..3d98716904 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/tx_withdraw/src/lib.rs @@ -3,7 +3,7 @@ use namada_tx_prelude::*; -#[transaction(gas = 1119469)] +#[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { let signed = tx_data; let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { @@ -11,10 +11,11 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { err })?; let withdraw = transaction::pos::Withdraw::try_from_slice(&data[..]) - .wrap_err("failed to decode Withdraw")?; + .wrap_err("Failed to decode Withdraw tx data")?; - let slashed = - ctx.withdraw_tokens(withdraw.source.as_ref(), &withdraw.validator)?; + let slashed = ctx + .withdraw_tokens(withdraw.source.as_ref(), &withdraw.validator) + .wrap_err("Failed to withdraw tokens")?; if !slashed.is_zero() { debug_log!("New withdrawal slashed for {}", slashed.to_string_native()); } @@ -30,7 +31,6 @@ mod tests { use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::unbond_handle; use namada::proof_of_stake::types::GenesisValidator; - use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -229,13 +229,10 @@ mod tests { let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &tx_env.gas_meter.borrow(), )); - let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); - let result = - result.expect("Validation of valid changes must not fail!"); + let result = vp_env.validate_tx(&gas_meter, PosVP::new); assert!( - result, + result.is_ok(), "PoS Validity predicate must accept this transaction" ); Ok(()) diff --git a/wasm/vp_implicit/Cargo.toml b/wasm/vp_implicit/Cargo.toml new file mode 100644 index 0000000000..89e1b54348 --- /dev/null +++ b/wasm/vp_implicit/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_implicit" +description = "Implicit account valididity predicate." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/vp_implicit/src/lib.rs similarity index 78% rename from wasm/wasm_source/src/vp_implicit.rs rename to wasm/vp_implicit/src/lib.rs index 6ac6a9d735..2b876e2072 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/vp_implicit/src/lib.rs @@ -11,65 +11,14 @@ //! //! Any other storage key changes are allowed only with a valid signature. -use core::ops::Deref; - +use booleans::BoolResultUnitExt; +use namada_vp_prelude::tx::action::*; use namada_vp_prelude::*; -use once_cell::unsync::Lazy; - -enum KeyType<'a> { - /// Public key - written once revealed - Pk(&'a Address), - TokenBalance { - owner: &'a Address, - }, - TokenMinted, - TokenMinter(&'a Address), - PoS, - Masp, - PgfSteward(&'a Address), - GovernanceVote(&'a Address), - Ibc, - Unknown, -} - -impl<'a> From<&'a storage::Key> for KeyType<'a> { - fn from(key: &'a storage::Key) -> KeyType<'a> { - if let Some(address) = account::is_pks_key(key) { - Self::Pk(address) - } else if let Some([_, owner]) = - token::storage_key::is_any_token_balance_key(key) - { - Self::TokenBalance { owner } - } else if token::storage_key::is_any_minted_balance_key(key).is_some() { - Self::TokenMinted - } else if let Some(minter) = token::storage_key::is_any_minter_key(key) - { - Self::TokenMinter(minter) - } else if proof_of_stake::storage_key::is_pos_key(key) { - Self::PoS - } else if let Some(address) = pgf_storage::keys::is_stewards_key(key) { - Self::PgfSteward(address) - } else if gov_storage::keys::is_vote_key(key) { - let voter_address = gov_storage::keys::get_voter_address(key); - if let Some(address) = voter_address { - Self::GovernanceVote(address) - } else { - Self::Unknown - } - } else if token::storage_key::is_masp_key(key) { - Self::Masp - } else if ibc::is_ibc_key(key) { - Self::Ibc - } else { - Self::Unknown - } - } -} #[validity_predicate(gas = 118452)] fn validate_tx( ctx: &Ctx, - tx_data: Tx, + tx: Tx, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, @@ -81,59 +30,139 @@ fn validate_tx( verifiers ); - let valid_sig = Lazy::new(|| { - matches!(verify_signatures(ctx, &tx_data, &addr), Ok(true)) - }); + // Check if this is a governance proposal first + let is_gov_proposal = tx + .data() + .and_then(|tx_data| { + let proposal_id = u64::try_from_slice(&tx_data).ok()?; + Some(is_proposal_accepted(ctx, proposal_id)) + }) + .transpose()? + .unwrap_or(false); + if is_gov_proposal { + // Any change from governance is allowed without further checks + return Ok(()); + } + + let mut gadget = VerifySigGadget::new(); + + // Find the actions applied in the tx + let actions = ctx.read_actions().into_vp_error()?; + + // Require authorization by signature when the source of an action is this + // VP's address + for action in actions { + match action { + Action::Pos(pos_action) => match pos_action { + PosAction::BecomeValidator(source) + | PosAction::DeactivateValidator(source) + | PosAction::ReactivateValidator(source) + | PosAction::Unjail(source) + | PosAction::CommissionChange(source) + | PosAction::MetadataChange(source) + | PosAction::ConsensusKeyChange(source) + | PosAction::Redelegation(Redelegation { + owner: source, .. + }) => gadget.verify_signatures_when( + || source == addr, + ctx, + &tx, + &addr, + )?, + PosAction::Bond(Bond { + source, validator, .. + }) + | PosAction::Unbond(Unbond { + source, validator, .. + }) + | PosAction::Withdraw(Withdraw { source, validator }) + | PosAction::ClaimRewards(ClaimRewards { validator, source }) => + { + let source = source.unwrap_or(validator); + gadget.verify_signatures_when( + || source == addr, + ctx, + &tx, + &addr, + )? + } + }, + Action::Gov( + GovAction::InitProposal { author: source } + | GovAction::VoteProposal { voter: source, .. }, + ) + | Action::Pgf( + PgfAction::ResignSteward(source) + | PgfAction::UpdateStewardCommission(source), + ) => gadget.verify_signatures_when( + || source == addr, + ctx, + &tx, + &addr, + )?, + } + } - for key in keys_changed.iter() { + keys_changed.iter().try_for_each(|key| { let key_type: KeyType = key.into(); - let is_valid = match key_type { + let mut validate_change = || match key_type { KeyType::Pk(owner) => { if owner == &addr { - if ctx.has_key_pre(key)? { - // If the PK is already reveal, reject the tx - return reject(); - } - let post: Option = - ctx.read_post(key)?; - match post { - Some(pk) => { + let key_was_not_already_revealed = + !ctx.has_key_pre(key).into_vp_error()?; + key_was_not_already_revealed.ok_or_else(|| { + VpError::Erased(format!( + "Public key of {addr} has already been revealed" + )) + })?; + + let pubkey_in_storage = + ctx.read_post(key).into_vp_error()?; + pubkey_in_storage.map_or_else( + || { + Err(VpError::Erased( + "Public keys that have been revealed cannot \ + be deleted" + .into(), + )) + }, + |pk: key::common::PublicKey| { let addr_from_pk: Address = (&pk).into(); + let pk_derived_addr_is_correct = + addr_from_pk == addr; + // Check that address matches with the address // derived from the PK - if addr_from_pk != addr { - return reject(); - } - } - None => { - // Revealed PK cannot be deleted - return reject(); - } - } + pk_derived_addr_is_correct.ok_or_else(|| { + VpError::Erased(format!( + "The address derived from the revealed \ + public key {addr_from_pk} does not match \ + the implicit account's address {addr}" + )) + }) + }, + )?; } - true + Ok(()) } KeyType::TokenBalance { owner, .. } => { if owner == &addr { let pre: token::Amount = - ctx.read_pre(key)?.unwrap_or_default(); + ctx.read_pre(key).into_vp_error()?.unwrap_or_default(); let post: token::Amount = - ctx.read_post(key)?.unwrap_or_default(); + ctx.read_post(key).into_vp_error()?.unwrap_or_default(); let change = post.change() - pre.change(); - // debit has to signed, credit doesn't - let valid = change.non_negative() || *valid_sig; + gadget.verify_signatures_when( + // NB: debit has to signed, credit doesn't + || change.is_negative(), + ctx, + &tx, + &addr, + )?; let sign = if change.non_negative() { "" } else { "-" }; - debug_log!( - "token key: {}, change: {}{:?}, valid_sig: {}, valid \ - modification: {}", - key, - sign, - change, - *valid_sig, - valid - ); - valid + debug_log!("token key: {key}, change: {sign}{change:?}"); } else { + // If this is not the owner, allow any change debug_log!( "This address ({}) is not of owner ({}) of token key: \ {}", @@ -141,156 +170,84 @@ fn validate_tx( owner, key ); - // If this is not the owner, allow any change - true } + Ok(()) } - KeyType::TokenMinted => verifiers.contains(&address::MULTITOKEN), - KeyType::TokenMinter(minter) => minter != &addr || *valid_sig, - KeyType::PoS => validate_pos_changes(ctx, &addr, key, &valid_sig)?, - KeyType::PgfSteward(address) => address != &addr || *valid_sig, - KeyType::GovernanceVote(voter) => voter != &addr || *valid_sig, - KeyType::Masp | KeyType::Ibc => true, + KeyType::TokenMinted => { + verifiers.contains(&address::MULTITOKEN).ok_or_else(|| { + VpError::Erased( + "The Multitoken VP should have been a verifier for \ + this transaction, since a token was minted" + .into(), + ) + }) + } + KeyType::TokenMinter(minter_addr) => gadget.verify_signatures_when( + || minter_addr == &addr, + ctx, + &tx, + &addr, + ), + KeyType::Masp | KeyType::Ibc => Ok(()), KeyType::Unknown => { // Unknown changes require a valid signature - *valid_sig + gadget.verify_signatures(ctx, &tx, &addr) } }; - if !is_valid { - log_string(format!("key {} modification failed vp_implicit", key)); - return reject(); - } - } - - accept() + validate_change().inspect_err(|reason| { + log_string(format!( + "Modification on key {key} failed vp_implicit: {reason}" + )); + }) + }) } -fn validate_pos_changes( - ctx: &Ctx, - owner: &Address, - key: &storage::Key, - valid_sig: &impl Deref, -) -> VpResult { - use proof_of_stake::{storage, storage_key}; - - // Bond or unbond - let is_valid_bond_or_unbond_change = || { - let bond_id = storage_key::is_bond_key(key) - .map(|(bond_id, _)| bond_id) - .or_else(|| storage_key::is_bond_epoched_meta_key(key)) - .or_else(|| { - storage_key::is_unbond_key(key).map(|(bond_id, _, _)| bond_id) - }); - if let Some(bond_id) = bond_id { - // Bonds and unbonds changes for this address must be signed - return &bond_id.source != owner || **valid_sig; - }; - // Unknown changes are not allowed - false - }; - - // Changes in validator state - let is_valid_state_change = || { - let state_change = storage_key::is_validator_state_key(key); - let is_valid_state = - match state_change { - Some((address, epoch)) => { - let params_pre = storage::read_pos_params(&ctx.pre())?; - let state_pre = storage::validator_state_handle(address) - .get(&ctx.pre(), epoch, ¶ms_pre)?; - - let params_post = storage::read_pos_params(&ctx.post())?; - let state_post = storage::validator_state_handle(address) - .get(&ctx.post(), epoch, ¶ms_post)?; - - match (state_pre, state_post) { - (Some(pre), Some(post)) => { - use proof_of_stake::types::ValidatorState::*; - - // Bonding and unbonding may affect validator sets - if matches!( - pre, - Consensus | BelowCapacity | BelowThreshold - ) && matches!( - post, - Consensus | BelowCapacity | BelowThreshold - ) { - true - } else { - // Unknown state changes are not allowed - false - } - } - (Some(_pre), None) => { - // Clearing of old epoched data - true - } - _ => false, - } - } - None => false, - }; - - VpResult::Ok( - is_valid_state - || storage_key::is_validator_state_epoched_meta_key(key) - || storage_key::is_consensus_validator_set_key(key) - || storage_key::is_below_capacity_validator_set_key(key), - ) - }; +enum KeyType<'a> { + /// Public key - written once revealed + Pk(&'a Address), + TokenBalance { + owner: &'a Address, + }, + TokenMinted, + TokenMinter(&'a Address), + Masp, + Ibc, + Unknown, +} - let is_valid_reward_claim = || { - if let Some(bond_id) = - storage_key::is_last_pos_reward_claim_epoch_key(key) +impl<'a> From<&'a storage::Key> for KeyType<'a> { + fn from(key: &'a storage::Key) -> KeyType<'a> { + if let Some(address) = account::is_pks_key(key) { + Self::Pk(address) + } else if let Some([_, owner]) = + token::storage_key::is_any_token_balance_key(key) { - // Claims for this address must be signed - return &bond_id.source != owner || **valid_sig; - } - if let Some(bond_id) = storage_key::is_rewards_counter_key(key) { - // Redelegations auto-claim rewards - return &bond_id.source != owner || **valid_sig; - } - - false - }; - - let is_valid_redelegation = || { - if storage_key::is_validator_redelegations_key(key) { - return true; - } - if let Some(delegator) = - storage_key::is_delegator_redelegations_key(key) + Self::TokenBalance { owner } + } else if token::storage_key::is_any_minted_balance_key(key).is_some() { + Self::TokenMinted + } else if let Some(minter) = token::storage_key::is_any_minter_key(key) { - // Redelegations for this address must be signed - return delegator != owner || **valid_sig; - } - if let Some(bond_id) = storage_key::is_rewards_counter_key(key) { - // Redelegations auto-claim rewards - return &bond_id.source != owner || **valid_sig; + Self::TokenMinter(minter) + } else if token::storage_key::is_masp_key(key) { + Self::Masp + } else if ibc::is_ibc_key(key) { + Self::Ibc + } else { + Self::Unknown } - false - }; - - Ok(is_valid_bond_or_unbond_change() - || storage_key::is_total_deltas_key(key) - || storage_key::is_validator_deltas_key(key) - || storage_key::is_validator_total_bond_or_unbond_key(key) - || storage_key::is_validator_set_positions_key(key) - || storage_key::is_total_consensus_stake_key(key) - || is_valid_state_change()? - || is_valid_reward_claim() - || is_valid_redelegation() - || **valid_sig) + } } #[cfg(test)] mod tests { + use std::panic; + // Use this as `#[test]` annotation to enable logging use namada::core::dec::Dec; use namada::core::storage::Epoch; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::tx::data::TxType; - use namada::tx::{Code, Data, Signature}; + use namada::tx::{Authorization, Code, Data}; use namada_test_utils::TestWasms; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -318,7 +275,7 @@ mod tests { vp_host_env::init(); assert!( - validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap() + validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).is_ok() ); } @@ -351,7 +308,7 @@ mod tests { assert!( validate_tx(&CTX, tx_data, addr.clone(), keys_changed, verifiers) - .unwrap(), + .is_ok(), "Revealing PK that's not yet revealed and is matching the address \ must be accepted" ); @@ -377,7 +334,7 @@ mod tests { vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap(), + validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).is_err(), "Revealing PK that's already revealed should be rejected" ); } @@ -418,7 +375,7 @@ mod tests { vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap(), + validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).is_err(), "Mismatching PK must be rejected" ); } @@ -453,6 +410,11 @@ mod tests { amount, token::NATIVE_MAX_DECIMAL_PLACES.into(), ); + + // Add the receiver's address to the verifier set as this address is not + // part of the verifier set from a transfer function + tx_env.verifiers.insert(vp_owner.clone()); + // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction @@ -461,7 +423,7 @@ mod tests { &source, address, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -475,7 +437,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -556,8 +518,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -638,7 +605,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, @@ -652,7 +619,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -697,7 +664,7 @@ mod tests { address, &target, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -710,8 +677,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -757,7 +729,7 @@ mod tests { address, &target, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -768,7 +740,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, @@ -783,7 +755,7 @@ mod tests { assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -830,7 +802,7 @@ mod tests { &source, &target, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -844,7 +816,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -898,7 +870,15 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); + assert!( + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") + ); } fn test_signed_arb_storage_write( @@ -933,7 +913,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, @@ -944,7 +924,7 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); + assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).is_ok()); } } @@ -982,8 +962,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } } diff --git a/wasm/vp_template/.gitignore b/wasm/vp_template/.gitignore deleted file mode 100644 index 1f7868d94e..0000000000 --- a/wasm/vp_template/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Built wasm module -vp.wasm - -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk \ No newline at end of file diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml deleted file mode 100644 index 3aa9573d24..0000000000 --- a/wasm/vp_template/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -authors = ["Heliax AG "] -edition = "2021" -license = "GPL-3.0" -name = "vp_template" -resolver = "2" -version = "0.32.1" - -[lib] -crate-type = ["cdylib"] - -[dependencies] -namada_vp_prelude = {path = "../../crates/vp_prelude"} -wee_alloc = "0.4.5" -getrandom = { version = "0.2", features = ["custom"] } - -[dev-dependencies] -namada_tests = {path = "../../crates/tests"} diff --git a/wasm/vp_template/Makefile b/wasm/vp_template/Makefile deleted file mode 100644 index 02e33ac6d8..0000000000 --- a/wasm/vp_template/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -cargo = $(env) cargo -rustup = $(env) rustup - -# Linker flag "-s" for stripping (https://github.com/rust-lang/cargo/issues/3483#issuecomment-431209957) -build-release: - # wasm is built into target/wasm32-unknown-unknown/release - RUSTFLAGS='-C link-arg=-s' $(cargo) build --release --target wasm32-unknown-unknown - -build: - $(cargo) build --target wasm32-unknown-unknown - -watch: - $(cargo) watch - -clean: - $(cargo) clean && if [ -e $(wasm) ]; then rm $(wasm); fi - -deps: - $(rustup) target add wasm32-unknown-unknown - -.PHONY : build-release build watch clean deps diff --git a/wasm/vp_template/README.md b/wasm/vp_template/README.md deleted file mode 100644 index 58c6b6198b..0000000000 --- a/wasm/vp_template/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Validity predicate wasm template - -This is a template of a wasm module for a validity predicate. - -## Quick start - -```shell -# To be able to build this, make sure to have -make deps - -# Build - this will create `.wasm` file -make build-release -``` diff --git a/wasm/vp_template/src/lib.rs b/wasm/vp_template/src/lib.rs deleted file mode 100644 index 3a42efc741..0000000000 --- a/wasm/vp_template/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -use namada_vp_prelude::*; - -#[validity_predicate(gas = 1000)] -fn validate_tx( - ctx: &Ctx, - tx_data: Tx, - addr: Address, - keys_changed: BTreeSet, - verifiers: BTreeSet
, -) -> VpResult { - log_string(format!( - "validate_tx called with addr: {}, key_changed: {:#?}, tx_data: \ - {:#?}, verifiers: {:?}", - addr, keys_changed, tx_data, verifiers - )); - - for key in keys_changed { - let pre: Option = ctx.read_pre(&key)?; - let post: Option = ctx.read_post(&key)?; - log_string(format!( - "validate_tx key: {}, pre: {:#?}, post: {:#?}", - key, pre, post, - )); - } - accept() -} diff --git a/wasm/vp_user/Cargo.toml b/wasm/vp_user/Cargo.toml new file mode 100644 index 0000000000..f94e5e6b85 --- /dev/null +++ b/wasm/vp_user/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_user" +description = "User validity predicate." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada = {path = "../../crates/namada"} +namada_tests = {path = "../../crates/tests"} +namada_test_utils = {path = "../../crates/test_utils"} +namada_vp_prelude = {path = "../../crates/vp_prelude"} + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/vp_user/src/lib.rs similarity index 77% rename from wasm/wasm_source/src/vp_user.rs rename to wasm/vp_user/src/lib.rs index fd4166ec3c..db130a2447 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/vp_user/src/lib.rs @@ -11,76 +11,14 @@ //! //! Any other storage key changes are allowed only with a valid signature. -use core::ops::Deref; - +use booleans::BoolResultUnitExt; +use namada_vp_prelude::tx::action::*; use namada_vp_prelude::*; -use once_cell::unsync::Lazy; -use proof_of_stake::storage::{read_pos_params, validator_state_handle}; -use proof_of_stake::storage_key::{ - is_below_capacity_validator_set_key, is_bond_epoched_meta_key, is_bond_key, - is_consensus_keys_key, is_consensus_validator_set_key, - is_delegator_redelegations_key, is_last_pos_reward_claim_epoch_key, - is_pos_key, is_rewards_counter_key, is_total_consensus_stake_key, - is_total_deltas_key, is_unbond_key, is_validator_address_raw_hash_key, - is_validator_addresses_key, is_validator_commission_rate_key, - is_validator_deltas_key, is_validator_eth_cold_key_key, - is_validator_eth_hot_key_key, is_validator_max_commission_rate_change_key, - is_validator_metadata_key, is_validator_redelegations_key, - is_validator_set_positions_key, is_validator_state_epoched_meta_key, - is_validator_state_key, is_validator_total_bond_or_unbond_key, -}; - -enum KeyType<'a> { - TokenBalance { owner: &'a Address }, - TokenMinted, - TokenMinter(&'a Address), - PoS, - Vp(&'a Address), - Masp, - PgfSteward(&'a Address), - GovernanceVote(&'a Address), - Ibc, - Unknown, -} - -impl<'a> From<&'a storage::Key> for KeyType<'a> { - fn from(key: &'a storage::Key) -> KeyType<'a> { - if let Some([_, owner]) = - token::storage_key::is_any_token_balance_key(key) - { - Self::TokenBalance { owner } - } else if token::storage_key::is_any_minted_balance_key(key).is_some() { - Self::TokenMinted - } else if let Some(minter) = token::storage_key::is_any_minter_key(key) - { - Self::TokenMinter(minter) - } else if is_pos_key(key) { - Self::PoS - } else if gov_storage::keys::is_vote_key(key) { - let voter_address = gov_storage::keys::get_voter_address(key); - if let Some(address) = voter_address { - Self::GovernanceVote(address) - } else { - Self::Unknown - } - } else if let Some(address) = pgf_storage::keys::is_stewards_key(key) { - Self::PgfSteward(address) - } else if let Some(address) = key.is_validity_predicate() { - Self::Vp(address) - } else if token::storage_key::is_masp_key(key) { - Self::Masp - } else if ibc::is_ibc_key(key) { - Self::Ibc - } else { - Self::Unknown - } - } -} #[validity_predicate(gas = 137325)] fn validate_tx( ctx: &Ctx, - tx_data: Tx, + tx: Tx, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, @@ -92,32 +30,100 @@ fn validate_tx( verifiers ); - let valid_sig = Lazy::new(|| { - matches!(verify_signatures(ctx, &tx_data, &addr), Ok(true)) - }); + // Check if this is a governance proposal first + let is_gov_proposal = tx + .data() + .and_then(|tx_data| { + let proposal_id = u64::try_from_slice(&tx_data).ok()?; + Some(is_proposal_accepted(ctx, proposal_id)) + }) + .transpose()? + .unwrap_or(false); + if is_gov_proposal { + // Any change from governance is allowed without further checks + return Ok(()); + } + + let mut gadget = VerifySigGadget::new(); + + // Find the actions applied in the tx + let actions = ctx.read_actions().into_vp_error()?; + + // Require authorization by signature when the source of an action is this + // VP's address + for action in actions { + match action { + Action::Pos(pos_action) => match pos_action { + PosAction::BecomeValidator(source) + | PosAction::DeactivateValidator(source) + | PosAction::ReactivateValidator(source) + | PosAction::Unjail(source) + | PosAction::CommissionChange(source) + | PosAction::MetadataChange(source) + | PosAction::ConsensusKeyChange(source) + | PosAction::Redelegation(Redelegation { + owner: source, .. + }) => gadget.verify_signatures_when( + || source == addr, + ctx, + &tx, + &addr, + )?, + PosAction::Bond(Bond { + source, validator, .. + }) + | PosAction::Unbond(Unbond { + source, validator, .. + }) + | PosAction::Withdraw(Withdraw { source, validator }) + | PosAction::ClaimRewards(ClaimRewards { validator, source }) => + { + let source = source.unwrap_or(validator); + gadget.verify_signatures_when( + || source == addr, + ctx, + &tx, + &addr, + )? + } + }, + Action::Gov( + GovAction::InitProposal { author: source } + | GovAction::VoteProposal { voter: source, .. }, + ) + | Action::Pgf( + PgfAction::ResignSteward(source) + | PgfAction::UpdateStewardCommission(source), + ) => gadget.verify_signatures_when( + || source == addr, + ctx, + &tx, + &addr, + )?, + } + } - for key in keys_changed.iter() { + keys_changed.iter().try_for_each(|key| { let key_type: KeyType = key.into(); - let is_valid = match key_type { + let mut validate_change = || match key_type { KeyType::TokenBalance { owner, .. } => { if owner == &addr { let pre: token::Amount = - ctx.read_pre(key)?.unwrap_or_default(); + ctx.read_pre(key).into_vp_error()?.unwrap_or_default(); let post: token::Amount = - ctx.read_post(key)?.unwrap_or_default(); + ctx.read_post(key).into_vp_error()?.unwrap_or_default(); let change = post.change() - pre.change(); - // debit has to signed, credit doesn't - let valid = change.non_negative() || *valid_sig; - debug_log!( - "token key: {}, change: {:?}, valid_sig: {}, valid \ - modification: {}", - key, - change, - *valid_sig, - valid - ); - valid + gadget.verify_signatures_when( + // NB: debit has to signed, credit doesn't + || change.is_negative(), + ctx, + &tx, + &addr, + )?; + let sign = if change.non_negative() { "" } else { "-" }; + debug_log!("token key: {key}, change: {sign}{change:?}"); } else { + // If this is not the owner, allow any change debug_log!( "This address ({}) is not of owner ({}) of token key: \ {}", @@ -125,218 +131,91 @@ fn validate_tx( owner, key ); - // If this is not the owner, allow any change - true } + Ok(()) + } + KeyType::TokenMinted => { + verifiers.contains(&address::MULTITOKEN).ok_or_else(|| { + VpError::Erased( + "The Multitoken VP should have been a verifier for \ + this transaction, since a token was minted" + .into(), + ) + }) } - KeyType::TokenMinted => verifiers.contains(&address::MULTITOKEN), - KeyType::TokenMinter(minter) => minter != &addr || *valid_sig, - KeyType::PoS => validate_pos_changes(ctx, &addr, key, &valid_sig)?, - KeyType::PgfSteward(address) => address != &addr || *valid_sig, - KeyType::GovernanceVote(voter) => voter != &addr || *valid_sig, + KeyType::TokenMinter(minter_addr) => gadget.verify_signatures_when( + || minter_addr == &addr, + ctx, + &tx, + &addr, + ), KeyType::Vp(owner) => { - let has_post: bool = ctx.has_key_post(key)?; - if owner == &addr { - has_post && *valid_sig - } else { - true - } + let vp_overwritten: bool = + ctx.has_key_post(key).into_vp_error()?; + gadget.verify_signatures_when( + || owner == &addr && vp_overwritten, + ctx, + &tx, + &addr, + ) } - KeyType::Masp | KeyType::Ibc => true, + KeyType::Masp | KeyType::Ibc => Ok(()), KeyType::Unknown => { // Unknown changes require a valid signature - *valid_sig + gadget.verify_signatures(ctx, &tx, &addr) } }; - if !is_valid { - log_string(format!("key {} modification failed vp_user", key)); - return reject(); - } - } - - accept() + validate_change().inspect_err(|reason| { + log_string(format!( + "Modification on key {key} failed vp_user: {reason}" + )); + }) + }) } -fn validate_pos_changes( - ctx: &Ctx, - owner: &Address, - key: &storage::Key, - valid_sig: &impl Deref, -) -> VpResult { - // Bond or unbond - let is_valid_bond_or_unbond_change = || { - let bond_id = is_bond_key(key) - .map(|(bond_id, _)| bond_id) - .or_else(|| is_bond_epoched_meta_key(key)) - .or_else(|| is_unbond_key(key).map(|(bond_id, _, _)| bond_id)); - if let Some(bond_id) = bond_id { - // Bonds and unbonds changes for this address must be signed - return &bond_id.source != owner || **valid_sig; - }; - // Unknown changes are not allowed - false - }; - - // Commission rate changes must be signed by the validator - let is_valid_commission_rate_change = || { - if let Some(validator) = is_validator_commission_rate_key(key) { - return validator == owner && **valid_sig; - } - false - }; - - // Metadata changes must be signed by the validator whose - // metadata is manipulated - let is_valid_metadata_change = || { - let metadata = is_validator_metadata_key(key); - match metadata { - Some(address) => address == owner && **valid_sig, - None => false, - } - }; - - // Changes in validator state - let is_valid_state_change = || { - let state_change = is_validator_state_key(key); - let is_valid_state = match state_change { - Some((address, epoch)) => { - let params_pre = read_pos_params(&ctx.pre())?; - let state_pre = validator_state_handle(address).get( - &ctx.pre(), - epoch, - ¶ms_pre, - )?; - - let params_post = read_pos_params(&ctx.post())?; - let state_post = validator_state_handle(address).get( - &ctx.post(), - epoch, - ¶ms_post, - )?; - - match (state_pre, state_post) { - (Some(pre), Some(post)) => { - use proof_of_stake::types::ValidatorState::*; - - if ( - // Deactivation case - matches!( - pre, - Consensus | BelowCapacity | BelowThreshold - ) && post == Inactive) - // Reactivation case - || (pre == Inactive && post != Inactive) - // Unjail case - || (pre == Jailed - && matches!( - post, - Consensus - | BelowCapacity - | BelowThreshold - )) - { - if address == owner { **valid_sig } else { true } - } else if - // Bonding and unbonding may affect validator sets - matches!( - pre, - Consensus | BelowCapacity | BelowThreshold - ) && matches!( - post, - Consensus | BelowCapacity | BelowThreshold - ) { - true - } else { - // Unknown state changes are not allowed - false - } - } - (None, Some(_post)) => { - // Becoming a validator must be authorized - address == owner && **valid_sig - } - (Some(_pre), None) => { - // Clearing of old epoched data - true - } - _ => false, - } - } - None => false, - }; - - VpResult::Ok( - is_valid_state - || is_validator_state_epoched_meta_key(key) - || is_consensus_validator_set_key(key) - || is_below_capacity_validator_set_key(key), - ) - }; - - let is_valid_reward_claim = || { - if let Some(bond_id) = is_last_pos_reward_claim_epoch_key(key) { - // Claims for this address must be signed - return &bond_id.source != owner || **valid_sig; - } - if let Some(bond_id) = is_rewards_counter_key(key) { - // Claims for this address must be signed - return &bond_id.source != owner || **valid_sig; - } - false - }; +enum KeyType<'a> { + TokenBalance { owner: &'a Address }, + TokenMinted, + TokenMinter(&'a Address), + Vp(&'a Address), + Masp, + Ibc, + Unknown, +} - let is_valid_redelegation = || { - if is_validator_redelegations_key(key) { - return true; - } - if let Some(delegator) = is_delegator_redelegations_key(key) { - // Redelegations for this address must be signed - return delegator != owner || **valid_sig; - } - if let Some(bond_id) = is_rewards_counter_key(key) { - // Redelegations auto-claim rewards - return &bond_id.source != owner || **valid_sig; - } - false - }; - - let is_valid_become_validator = || { - if is_validator_addresses_key(key) - || is_consensus_keys_key(key) - || is_validator_eth_cold_key_key(key).is_some() - || is_validator_eth_hot_key_key(key).is_some() - || is_validator_max_commission_rate_change_key(key).is_some() - || is_validator_address_raw_hash_key(key).is_some() +impl<'a> From<&'a storage::Key> for KeyType<'a> { + fn from(key: &'a storage::Key) -> KeyType<'a> { + if let Some([_, owner]) = + token::storage_key::is_any_token_balance_key(key) + { + Self::TokenBalance { owner } + } else if token::storage_key::is_any_minted_balance_key(key).is_some() { + Self::TokenMinted + } else if let Some(minter) = token::storage_key::is_any_minter_key(key) { - // A signature is required to become validator - return **valid_sig; + Self::TokenMinter(minter) + } else if let Some(address) = key.is_validity_predicate() { + Self::Vp(address) + } else if token::storage_key::is_masp_key(key) { + Self::Masp + } else if ibc::is_ibc_key(key) { + Self::Ibc + } else { + Self::Unknown } - false - }; - - Ok(is_valid_bond_or_unbond_change() - || is_total_deltas_key(key) - || is_validator_deltas_key(key) - || is_validator_total_bond_or_unbond_key(key) - || is_validator_set_positions_key(key) - || is_total_consensus_stake_key(key) - || is_valid_state_change()? - || is_valid_reward_claim() - || is_valid_redelegation() - || is_valid_commission_rate_change() - || is_valid_metadata_change() - || is_valid_become_validator() - || **valid_sig) + } } #[cfg(test)] mod tests { + use std::panic; + use address::testing::arb_non_internal_address; use namada::core::dec::Dec; use namada::core::storage::Epoch; use namada::ledger::pos::{GenesisValidator, PosParams}; use namada::tx::data::{self, TxType}; - use namada::tx::{Code, Data, Signature}; + use namada::tx::{Authorization, Code, Data}; use namada_test_utils::TestWasms; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; @@ -366,7 +245,7 @@ mod tests { vp_host_env::init(); assert!( - validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap() + validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).is_ok() ); } @@ -399,6 +278,11 @@ mod tests { amount, token::NATIVE_MAX_DECIMAL_PLACES.into(), ); + + // Add the receiver's address to the verifier set as this address is not + // part of the verifier set from a transfer function + tx_env.verifiers.insert(vp_owner.clone()); + // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction @@ -407,7 +291,7 @@ mod tests { &source, address, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -421,7 +305,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -462,7 +346,7 @@ mod tests { address, &target, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -475,8 +359,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -521,7 +410,7 @@ mod tests { address, &target, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -532,7 +421,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, @@ -545,7 +434,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -624,8 +513,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -687,7 +581,7 @@ mod tests { GenesisValidator { address: validator2.clone(), tokens: stake2, - consensus_key: ck2, + consensus_key: ck2.clone(), protocol_key, commission_rate, max_commission_rate_change, @@ -706,6 +600,10 @@ mod tests { // Initialize a tx environment let mut tx_env = tx_host_env::take(); + // Set the validator accounts' keys + tx_env.init_account_storage(&validator1, vec![ck1], 1); + tx_env.init_account_storage(&validator2, vec![ck2], 1); + tx_env.init_account_storage(&validator3, vec![ck3.clone()], 1); let token = address::testing::nam(); // write the denomination of NAM into storage @@ -738,7 +636,7 @@ mod tests { let mut tx_data = Tx::from_type(TxType::Raw); tx_data.set_data(Data::new(vec![])); tx_data.set_code(Code::new(vec![], None)); - tx_data.add_section(Section::Signature(Signature::new( + tx_data.add_section(Section::Authorization(Authorization::new( vec![tx_data.raw_header_hash()], pks_map.index_secret_keys(vec![sk3]), None, @@ -748,32 +646,27 @@ mod tests { let keys_changed: BTreeSet = vp_env.all_touched_storage_keys(); - // dbg!(&keys_changed); - // let verifiers: BTreeSet
= BTreeSet::default(); let verifiers: BTreeSet
= vp_env.get_verifiers(); - dbg!(&verifiers); + assert!(verifiers.contains(&address::POS)); + assert!(verifiers.contains(&validator3)); + assert_eq!(verifiers.len(), 2); + + // The other validators whose state may be affected by unjailing must + // not be part of the verifier set + assert!(!verifiers.contains(&validator1)); + assert!(!verifiers.contains(&validator2)); + vp_host_env::set(vp_env); - // for verifier in verifiers.clone() { - // dbg!(&verifier); - // assert!( - // validate_tx( - // &CTX, - // signed_tx.clone(), - // validator1, - // keys_changed.clone(), - // verifiers.clone() - // ) - // .unwrap() - // ); + // The validator3 VP must accept the authorized tx assert!( validate_tx( &CTX, signed_tx.clone(), - validator1, + validator3, keys_changed.clone(), verifiers.clone() ) - .unwrap() + .is_ok() ); } @@ -857,8 +750,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -948,8 +846,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, validator, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, validator, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -1031,7 +934,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, @@ -1044,7 +947,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -1126,7 +1029,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, @@ -1139,7 +1042,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -1232,7 +1135,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![secret_key]), None, @@ -1245,7 +1148,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, validator, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -1282,7 +1185,7 @@ mod tests { &source, &target, &token, - amount, + amount.amount(), ) .unwrap(); }); @@ -1296,7 +1199,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -1318,7 +1221,7 @@ mod tests { proptest! { /// Test that an unsigned tx that performs arbitrary storage writes or - /// deletes to the account is rejected. + /// deletes to the account is rejected. #[test] fn test_unsigned_arb_storage_write_rejected( (vp_owner, storage_key) in arb_account_storage_subspace_key(), @@ -1350,61 +1253,69 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); + assert!( + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") + ); } } proptest! { - /// Test that a signed tx that performs arbitrary storage writes or - /// deletes to the account is accepted. - #[test] - fn test_signed_arb_storage_write( - (vp_owner, storage_key) in arb_account_storage_subspace_key(), - // Generate bytes to write. If `None`, delete from the key instead - storage_value in any::>>(), - ) { - // Initialize a tx environment - let mut tx_env = TestTxEnv::default(); - - let keypair = key::testing::keypair_1(); - let public_key = keypair.ref_to(); - - // Spawn all the accounts in the storage key to be able to modify - // their storage - let storage_key_addresses = storage_key.find_addresses(); - tx_env.spawn_accounts(storage_key_addresses); - tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); - - // Initialize VP environment from a transaction - vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { - // Write or delete some data in the transaction - if let Some(value) = &storage_value { - tx::ctx().write(&storage_key, value).unwrap(); - } else { - tx::ctx().delete(&storage_key).unwrap(); - } - }); - - let pks_map = AccountPublicKeysMap::from_iter(vec![public_key]); - - let mut vp_env = vp_host_env::take(); - let mut tx = vp_env.tx.clone(); - tx.set_code(Code::new(vec![], None)); - tx.set_data(Data::new(vec![])); - tx.add_section(Section::Signature(Signature::new( - vec![ tx.raw_header_hash()], - pks_map.index_secret_keys(vec![keypair]), - None, - ))); - let signed_tx = tx.clone(); - vp_env.tx = signed_tx.clone(); - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= BTreeSet::default(); - vp_host_env::set(vp_env); - assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).unwrap()); - } + /// Test that a signed tx that performs arbitrary storage writes or + /// deletes to the account is accepted. + #[test] + fn test_signed_arb_storage_write( + (vp_owner, storage_key) in arb_account_storage_subspace_key(), + // Generate bytes to write. If `None`, delete from the key instead + storage_value in any::>>(), + ) { + // Initialize a tx environment + let mut tx_env = TestTxEnv::default(); + + let keypair = key::testing::keypair_1(); + let public_key = keypair.ref_to(); + + // Spawn all the accounts in the storage key to be able to modify + // their storage + let storage_key_addresses = storage_key.find_addresses(); + tx_env.spawn_accounts(storage_key_addresses); + tx_env.init_account_storage(&vp_owner, vec![public_key.clone()], 1); + + // Initialize VP environment from a transaction + vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { + // Write or delete some data in the transaction + if let Some(value) = &storage_value { + tx::ctx().write(&storage_key, value).unwrap(); + } else { + tx::ctx().delete(&storage_key).unwrap(); + } + }); + + let pks_map = AccountPublicKeysMap::from_iter(vec![public_key]); + + let mut vp_env = vp_host_env::take(); + let mut tx = vp_env.tx.clone(); + tx.set_code(Code::new(vec![], None)); + tx.set_data(Data::new(vec![])); + tx.add_section(Section::Authorization(Authorization::new( + vec![tx.raw_header_hash()], + pks_map.index_secret_keys(vec![keypair]), + None, + ))); + let signed_tx = tx.clone(); + vp_env.tx = signed_tx.clone(); + let keys_changed: BTreeSet = + vp_env.all_touched_storage_keys(); + let verifiers: BTreeSet
= BTreeSet::default(); + vp_host_env::set(vp_env); + assert!(validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers).is_ok()); } + } /// Test that a validity predicate update without a valid signature is /// rejected. @@ -1439,8 +1350,13 @@ mod tests { let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); assert!( - !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) - .unwrap() + panic::catch_unwind(|| { + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("InvalidSectionSignature") ); } @@ -1478,7 +1394,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, @@ -1491,7 +1407,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } @@ -1534,7 +1450,7 @@ mod tests { let mut tx = vp_env.tx.clone(); tx.set_data(Data::new(vec![])); tx.set_code(Code::new(vec![], None)); - tx.add_section(Section::Signature(Signature::new( + tx.add_section(Section::Authorization(Authorization::new( vec![tx.raw_header_hash()], pks_map.index_secret_keys(vec![keypair]), None, @@ -1547,7 +1463,7 @@ mod tests { vp_host_env::set(vp_env); assert!( validate_tx(&CTX, signed_tx, vp_owner, keys_changed, verifiers) - .unwrap() + .is_ok() ); } } diff --git a/wasm/wasm_source/.gitignore b/wasm/wasm_source/.gitignore deleted file mode 100644 index 5028f0063a..0000000000 --- a/wasm/wasm_source/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk \ No newline at end of file diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml deleted file mode 100644 index c9debf23b8..0000000000 --- a/wasm/wasm_source/Cargo.toml +++ /dev/null @@ -1,59 +0,0 @@ -[package] -authors = ["Heliax AG "] -edition = "2021" -license = "GPL-3.0" -name = "namada_wasm" -resolver = "2" -version = "0.32.1" - -[lib] -crate-type = ["cdylib"] - -# The features should be used individually to build the selected wasm. -# Newly added wasms should also be added into the Makefile `$(wasms)` list. -[features] -tx_bond = ["namada_tx_prelude"] -tx_bridge_pool = ["namada_tx_prelude"] -tx_change_validator_commission = ["namada_tx_prelude"] -tx_change_consensus_key = ["namada_tx_prelude"] -tx_change_validator_metadata = ["namada_tx_prelude"] -tx_claim_rewards = ["namada_tx_prelude"] -tx_deactivate_validator = ["namada_tx_prelude"] -tx_from_intent = ["namada_tx_prelude"] -tx_ibc = ["namada_tx_prelude"] -tx_init_account = ["namada_tx_prelude"] -tx_init_proposal = ["namada_tx_prelude"] -tx_become_validator = ["namada_tx_prelude"] -tx_reactivate_validator = ["namada_tx_prelude"] -tx_redelegate = ["namada_tx_prelude"] -tx_reveal_pk = ["namada_tx_prelude"] -tx_transfer = ["namada_tx_prelude"] -tx_unbond = ["namada_tx_prelude"] -tx_unjail_validator = ["namada_tx_prelude"] -tx_update_account = ["namada_tx_prelude"] -tx_vote_proposal = ["namada_tx_prelude"] -tx_withdraw = ["namada_tx_prelude"] -tx_update_steward_commission = ["namada_tx_prelude"] -tx_resign_steward = ["namada_tx_prelude"] -vp_implicit = ["namada_vp_prelude", "once_cell"] -vp_user = ["namada_vp_prelude", "once_cell"] - -[dependencies] -namada_tx_prelude = {path = "../../crates/tx_prelude", optional = true} -namada_vp_prelude = {path = "../../crates/vp_prelude", optional = true} - -once_cell = {version = "1.8.0", optional = true} -wee_alloc = "0.4.5" -getrandom = { version = "0.2", features = ["custom"] } - -[dev-dependencies] -namada = {path = "../../crates/namada"} -namada_tests = {path = "../../crates/tests", default-features = false, features = ["wasm-runtime"]} -namada_test_utils = {path = "../../crates/test_utils"} -namada_tx_prelude = {path = "../../crates/tx_prelude"} -namada_vp_prelude = {path = "../../crates/vp_prelude"} - -proptest = "1.4.0" -test-log = {version = "0.2.14", default-features = false, features = ["trace"]} -tracing = "0.1.30" -tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} diff --git a/wasm/wasm_source/Makefile b/wasm/wasm_source/Makefile deleted file mode 100644 index f01292af1e..0000000000 --- a/wasm/wasm_source/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -cargo := $(env) cargo -rustup := $(env) rustup -# Nightly build is currently used for rustfmt and clippy. -nightly := $(shell cat ../../rust-nightly-version) - -# All the wasms that can be built from this source, switched via Cargo features -# Wasms can be added via the Cargo.toml `[features]` list. -wasms := tx_bond -wasms += tx_bridge_pool -wasms += tx_change_validator_commission -wasms += tx_change_consensus_key -wasms += tx_change_validator_metadata -wasms += tx_claim_rewards -wasms += tx_deactivate_validator -wasms += tx_ibc -wasms += tx_init_account -wasms += tx_init_proposal -wasms += tx_become_validator -wasms += tx_redelegate -wasms += tx_reactivate_validator -wasms += tx_reveal_pk -wasms += tx_transfer -wasms += tx_unbond -wasms += tx_unjail_validator -wasms += tx_update_account -wasms += tx_vote_proposal -wasms += tx_withdraw -wasms += tx_update_steward_commission -wasms += tx_resign_steward -wasms += vp_implicit -wasms += vp_user - -# Build all wasms in release mode -all: $(wasms) - -# Build all wasms in debug mode -debug: - $(foreach wasm,$(wasms),make debug_$(wasm) && ) true - -# `cargo check` all wasms -check: - $(foreach wasm,$(wasms),make check_$(wasm) && ) true - -# `cargo test` all wasms -test: - $(foreach wasm,$(wasms),make test_$(wasm) && ) true - -# `cargo clippy` all wasms -clippy: - $(foreach wasm,$(wasms),make clippy_$(wasm) && ) true - -clippy-fix: - $(cargo) +$(nightly) clippy --fix -Z unstable-options --all-targets --allow-dirty --allow-staged - -fmt: - $(cargo) +$(nightly) fmt --all - -fmt-check: - $(cargo) +$(nightly) fmt --all -- --check - -# Build a selected wasm -# Linker flag "-s" for stripping (https://github.com/rust-lang/cargo/issues/3483#issuecomment-431209957) -$(wasms): %: - RUSTFLAGS='-C link-arg=-s' $(cargo) build --release --target wasm32-unknown-unknown --target-dir 'target' --features $@ && \ - cp "./target/wasm32-unknown-unknown/release/namada_wasm.wasm" ../$@.wasm - -# Build a selected wasm in debug mode -$(patsubst %,debug_%,$(wasms)): debug_%: - RUSTFLAGS='-C link-arg=-s' $(cargo) build --target wasm32-unknown-unknown --target-dir 'target' --features $* && \ - cp "./target/wasm32-unknown-unknown/debug/namada_wasm.wasm" ../$*.wasm - -# `cargo check` one of the wasms, e.g. `make check_tx_transfer` -$(patsubst %,check_%,$(wasms)): check_%: - $(cargo) check --target wasm32-unknown-unknown --features $* - -# `cargo test` one of the wasms, e.g. `make test_tx_transfer` -$(patsubst %,test_%,$(wasms)): test_%: - $(cargo) +$(nightly) test --features $* \ - -- \ - -Z unstable-options --report-time - -# `cargo watch` one of the wasms, e.g. `make watch_tx_transfer` -$(patsubst %,watch_%,$(wasms)): watch_%: - $(cargo) watch --features $* - -# `cargo clippy` one of the wasms, e.g. `make clippy_tx_transfer` -$(patsubst %,clippy_%,$(wasms)): clippy_%: - $(cargo) +$(nightly) clippy --all-targets --features $* -- -D warnings - -clean-wasm = rm ../$(wasm).*.wasm -clean: - $(foreach wasm,$(wasms),$(clean-wasm) && ) true - -deps: - $(rustup) target add wasm32-unknown-unknown - -.PHONY : all debug check test clippy fmt fmt-check clean deps diff --git a/wasm/wasm_source/README.md b/wasm/wasm_source/README.md deleted file mode 100644 index 423e29d034..0000000000 --- a/wasm/wasm_source/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# WASM source code in Rust - -This crate contains WASM implementations of various transactions and validity predicates. - -## Quick start - -```shell -# To be able to build this, make sure to have -make deps - -# Build - this will output .wasm files in the parent dir -make all - -# Each source that is included here can also be build and checked individually, e.g. for "tx_transfer" source: - -make tx_transfer # optimized build (strips `debug_log!` statements) -make debug_tx_transfer # debug build -make check_tx_transfer # cargo check -make test_tx_transfer # cargo test -make watch_tx_transfer # cargo watch -make clippy_tx_transfer # cargo clippy -``` diff --git a/wasm/wasm_source/proptest-regressions/tx_bond.txt b/wasm/wasm_source/proptest-regressions/tx_bond.txt deleted file mode 100644 index 8c589d1abd..0000000000 --- a/wasm/wasm_source/proptest-regressions/tx_bond.txt +++ /dev/null @@ -1 +0,0 @@ -cc f22e874350910b197cb02a4a07ec5bef18e16c0d1a39eaabaee43d1fc05ce11d diff --git a/wasm/wasm_source/proptest-regressions/tx_redelegate.txt b/wasm/wasm_source/proptest-regressions/tx_redelegate.txt deleted file mode 100644 index a8d6e7edba..0000000000 --- a/wasm/wasm_source/proptest-regressions/tx_redelegate.txt +++ /dev/null @@ -1,2 +0,0 @@ -cc 90825d666558e208bb8f4f46c746177abf8bf9499114dcdac1525ab600b7c6ce - diff --git a/wasm/wasm_source/src/lib.rs b/wasm/wasm_source/src/lib.rs deleted file mode 100644 index 4e27cb92ad..0000000000 --- a/wasm/wasm_source/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -#[cfg(feature = "tx_become_validator")] -pub mod tx_become_validator; -#[cfg(feature = "tx_bond")] -pub mod tx_bond; -#[cfg(feature = "tx_bridge_pool")] -pub mod tx_bridge_pool; -#[cfg(feature = "tx_change_consensus_key")] -pub mod tx_change_consensus_key; -#[cfg(feature = "tx_change_validator_commission")] -pub mod tx_change_validator_commission; -#[cfg(feature = "tx_change_validator_metadata")] -pub mod tx_change_validator_metadata; -#[cfg(feature = "tx_claim_rewards")] -pub mod tx_claim_rewards; -#[cfg(feature = "tx_deactivate_validator")] -pub mod tx_deactivate_validator; -#[cfg(feature = "tx_ibc")] -pub mod tx_ibc; -#[cfg(feature = "tx_init_account")] -pub mod tx_init_account; -#[cfg(feature = "tx_init_proposal")] -pub mod tx_init_proposal; -#[cfg(feature = "tx_reactivate_validator")] -pub mod tx_reactivate_validator; -#[cfg(feature = "tx_redelegate")] -pub mod tx_redelegate; -#[cfg(feature = "tx_resign_steward")] -pub mod tx_resign_steward; -#[cfg(feature = "tx_reveal_pk")] -pub mod tx_reveal_pk; -#[cfg(feature = "tx_transfer")] -pub mod tx_transfer; -#[cfg(feature = "tx_unbond")] -pub mod tx_unbond; -#[cfg(feature = "tx_unjail_validator")] -pub mod tx_unjail_validator; -#[cfg(feature = "tx_update_account")] -pub mod tx_update_account; -#[cfg(feature = "tx_update_steward_commission")] -pub mod tx_update_steward_commission; -#[cfg(feature = "tx_vote_proposal")] -pub mod tx_vote_proposal; -#[cfg(feature = "tx_withdraw")] -pub mod tx_withdraw; - -#[cfg(feature = "vp_implicit")] -pub mod vp_implicit; -#[cfg(feature = "vp_user")] -pub mod vp_user; diff --git a/wasm/wasm_source/src/tx_ibc.rs b/wasm/wasm_source/src/tx_ibc.rs deleted file mode 100644 index bf06ebbbca..0000000000 --- a/wasm/wasm_source/src/tx_ibc.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! A tx for IBC. -//! This tx executes an IBC operation according to the given IBC message as the -//! tx_data. This tx uses an IBC message wrapped inside -//! `key::ed25519::SignedTxData` as its input as declared in `ibc` crate. - -use namada_tx_prelude::*; - -#[transaction(gas = 585022)] -fn apply_tx(_ctx: &mut Ctx, _tx_data: Tx) -> TxResult { - // let signed = tx_data; - // let data = signed.data().ok_or_err_msg("Missing data").or_else(|err| { - // ctx.set_commitment_sentinel(); - // Err(err) - // })?; - - // ibc::ibc_actions(ctx).execute(&data).into_storage_result() - - // Temp. workaround for - tx_ibc_execute(); - Ok(()) -} diff --git a/wasm/wasm_source/src/tx_init_account.rs b/wasm/wasm_source/src/tx_init_account.rs deleted file mode 100644 index 5b21855c4a..0000000000 --- a/wasm/wasm_source/src/tx_init_account.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! A tx to initialize a new established address with a given public key and -//! a validity predicate. - -use namada_tx_prelude::*; - -#[transaction(gas = 885069)] -fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { - let signed = tx_data; - let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { - ctx.set_commitment_sentinel(); - err - })?; - let tx_data = account::InitAccount::try_from_slice(&data[..]) - .wrap_err("failed to decode InitAccount")?; - debug_log!("apply_tx called to init a new established account"); - - let vp_code_sec = signed - .get_section(&tx_data.vp_code_hash) - .ok_or_err_msg("vp code section not found") - .map_err(|err| { - ctx.set_commitment_sentinel(); - err - })? - .extra_data_sec() - .ok_or_err_msg("vp code section must be tagged as extra") - .map_err(|err| { - ctx.set_commitment_sentinel(); - err - })?; - - let address = - ctx.init_account(vp_code_sec.code.hash(), &vp_code_sec.tag)?; - - match account::init_account(ctx, &address, tx_data) { - Ok(address) => { - debug_log!("Created account {}", address.encode(),) - } - Err(err) => { - debug_log!("Account creation failed with: {}", err); - panic!() - } - } - Ok(()) -} diff --git a/wasm/wasm_source/src/tx_resign_steward.rs b/wasm/wasm_source/src/tx_resign_steward.rs deleted file mode 100644 index 10d8045895..0000000000 --- a/wasm/wasm_source/src/tx_resign_steward.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! A tx to resign as a steward - -use namada_tx_prelude::*; - -#[transaction(gas = 1058710)] -fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { - let signed = tx_data; - let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { - ctx.set_commitment_sentinel(); - err - })?; - let steward_address = Address::try_from_slice(&data[..]) - .wrap_err("failed to decode an Address")?; - - pgf::remove_steward(ctx, &steward_address)?; - - Ok(()) -} diff --git a/wasm/wasm_source/src/tx_update_steward_commission.rs b/wasm/wasm_source/src/tx_update_steward_commission.rs deleted file mode 100644 index 164414bcd8..0000000000 --- a/wasm/wasm_source/src/tx_update_steward_commission.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! A tx to update the commission distribution for a steward - -use namada_tx_prelude::transaction::pgf::UpdateStewardCommission; -use namada_tx_prelude::*; - -#[transaction(gas = 1222239)] -fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { - let signed = tx_data; - let data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { - ctx.set_commitment_sentinel(); - err - })?; - let steward_commission = UpdateStewardCommission::try_from_slice(&data[..]) - .wrap_err("failed to decode an UpdateStewardCommission")?; - - pgf::update_steward_commission(ctx, steward_commission)?; - - Ok(()) -} diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/Cargo.lock similarity index 92% rename from wasm_for_tests/wasm_source/Cargo.lock rename to wasm_for_tests/Cargo.lock index 63ebe2291c..b1d5ab3726 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -309,9 +309,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -393,9 +393,12 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -496,16 +499,39 @@ dependencies = [ "subtle 2.4.1", ] +[[package]] +name = "borsh" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +dependencies = [ + "borsh-derive 0.10.3", + "hashbrown 0.12.3", +] + [[package]] name = "borsh" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9897ef0f1bd2362169de6d7e436ea2237dc1085d7d1e4db75f4be34d86f309d1" dependencies = [ - "borsh-derive", + "borsh-derive 1.2.1", "cfg_aliases", ] +[[package]] +name = "borsh-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn 1.0.109", +] + [[package]] name = "borsh-derive" version = "1.2.1" @@ -520,12 +546,34 @@ dependencies = [ "syn_derive", ] +[[package]] +name = "borsh-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "borsh-ext" version = "1.2.0" source = "git+https://github.com/heliaxdev/borsh-ext?tag=v1.2.0#a62fee3e847e512cad9ac0f1fd5a900e5db9ba37" dependencies = [ - "borsh", + "borsh 1.2.1", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -787,7 +835,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bech32 0.9.1", "bs58", "digest 0.10.7", @@ -1118,7 +1166,6 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim", "syn 2.0.39", ] @@ -1253,6 +1300,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" + [[package]] name = "dynasm" version = "1.2.3" @@ -1373,7 +1426,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "hex", "k256", @@ -1708,7 +1761,7 @@ checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.5", + "base64 0.21.7", "bytes", "const-hex", "enr", @@ -1717,7 +1770,7 @@ dependencies = [ "futures-timer", "futures-util", "hashers", - "http", + "http 0.2.11", "instant", "jsonwebtoken", "once_cell", @@ -2104,7 +2157,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.11", "indexmap 2.1.0", "slab", "tokio", @@ -2235,6 +2288,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.5" @@ -2242,7 +2306,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", + "http 0.2.11", "pin-project-lite", ] @@ -2269,7 +2333,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", + "http 0.2.11", "http-body", "httparse", "httpdate", @@ -2289,7 +2353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", + "http 0.2.11", "hyper", "rustls", "tokio", @@ -2321,9 +2385,9 @@ dependencies = [ [[package]] name = "ibc" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "429b6aca6624a9364878e28c90311438c2621a8270942d80732b2651ac38ac74" +checksum = "8057203ab04368297a31ecd5d059bec7108c069d636bcfc9ab20e82d89b480b8" dependencies = [ "ibc-apps", "ibc-clients", @@ -2333,11 +2397,43 @@ dependencies = [ "ibc-primitives", ] +[[package]] +name = "ibc-app-nft-transfer" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e931737b69129ae417598fe29eace3e41a9ce32b8649abe3937495973e5843f" +dependencies = [ + "ibc-app-nft-transfer-types", + "ibc-core", + "serde-json-wasm", +] + +[[package]] +name = "ibc-app-nft-transfer-types" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2019d3a6adf6b333c55630f52ca71ad8f61702ca1cf291aaf5ee40b7c6c27ba2" +dependencies = [ + "base64 0.21.7", + "borsh 0.10.3", + "derive_more", + "displaydoc", + "http 1.0.0", + "ibc-core", + "ibc-proto", + "mime", + "parity-scale-codec", + "scale-info", + "schemars", + "serde", + "serde-json-wasm", +] + [[package]] name = "ibc-app-transfer" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b177b343385d9654d99be4709b5ed1574d41f91dfa4044b2d26d688be4179d7c" +checksum = "2595e4cc14828a4141a28b86777040d8bfbabea43838a425137202cff0ee6329" dependencies = [ "ibc-app-transfer-types", "ibc-core", @@ -2346,9 +2442,9 @@ dependencies = [ [[package]] name = "ibc-app-transfer-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f92a3eda225e5c86e7bb6501c95986583ac541c4369d3c528349d81390f947" +checksum = "0106c87ddcc619a6a5eac05da2b77287e3958f89dddf951daf9a2dfc470cb5f4" dependencies = [ "derive_more", "displaydoc", @@ -2361,18 +2457,19 @@ dependencies = [ [[package]] name = "ibc-apps" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4be40d55ed2dea9f2d05b902a3586f20850c723e4bdbfc4fb0ebe7a66ca5e40" +checksum = "b5738d8c842abce233f41d3be825d01e6ee075251b509c6947d05c75477eaeec" dependencies = [ + "ibc-app-nft-transfer", "ibc-app-transfer", ] [[package]] name = "ibc-client-tendermint" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119aa5873214228bf69bded3f20022b9ae1bc35b6841d295afcd73e53db05ccf" +checksum = "81ef4eefb4fd88167335fee4d212b1ff2fa4dd4e4ce87a58bda1798be1d128ac" dependencies = [ "ibc-client-tendermint-types", "ibc-core-client", @@ -2380,7 +2477,6 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", "serde", "tendermint", "tendermint-light-client-verifier", @@ -2388,38 +2484,52 @@ dependencies = [ [[package]] name = "ibc-client-tendermint-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f21679016931b332b295a761e65cc122dc6fbfb98444148b681ad3aaa474665" +checksum = "91a224a98b193810e1ef86316e9a08e677eeff6f98b22b9eb9806bd993d3753a" dependencies = [ - "bytes", "displaydoc", "ibc-core-client-types", "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", "serde", "tendermint", "tendermint-light-client-verifier", "tendermint-proto", ] +[[package]] +name = "ibc-client-wasm-types" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e1ea3be7ae201c99b6589c112a253f2fb3c9ae7322d8937a7303d1fbfe76d27" +dependencies = [ + "base64 0.21.7", + "displaydoc", + "ibc-core-client", + "ibc-core-host-types", + "ibc-primitives", + "ibc-proto", + "serde", +] + [[package]] name = "ibc-clients" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685c660323e93107a136aa3dbc412b7fa2eafd315c2fe71184096a43800f8ca5" +checksum = "84fef481dd1ebe5ef69ee8e095c225cb3e51cd3895096ba2884b3f5b827a6ed6" dependencies = [ "ibc-client-tendermint", + "ibc-client-wasm-types", ] [[package]] name = "ibc-core" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "100d9d0aa67432c5078a8a1c818e3fc990a193be6d35ed0abeda5b340d16c1da" +checksum = "5aedd421bae80115f44b198bec9af45f234e1c8ff81ee9d5e7f60444d526d2b6" dependencies = [ "ibc-core-channel", "ibc-core-client", @@ -2428,14 +2538,15 @@ dependencies = [ "ibc-core-handler", "ibc-core-host", "ibc-core-router", + "ibc-derive", "ibc-primitives", ] [[package]] name = "ibc-core-channel" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebaa37629ac029f914dfe552ab5dad01ddb240ec885ed0ae68221cbea4e9bfc" +checksum = "535048a8fe64101263e35a6a4503474811e379a115db72ee449df882b0f11b45" dependencies = [ "ibc-core-channel-types", "ibc-core-client", @@ -2445,15 +2556,15 @@ dependencies = [ "ibc-core-host", "ibc-core-router", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-channel-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2ba72c56c411b1e0ce6dc3f5e1fa1de9e6c84891f425b7be8a9e1705964378" +checksum = "8d97396ccb1840f4ea6021bbf049a4a7e30a8f5b126f00023ec44b2a48d4dabc" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-client-types", @@ -2462,7 +2573,9 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "sha2 0.10.8", "subtle-encoding", @@ -2471,9 +2584,9 @@ dependencies = [ [[package]] name = "ibc-core-client" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c4fac8e05201795073dee8c93d5afe9dfeac9aec2412b4a2b0c5f0d1e1d725" +checksum = "15bcf0c59eaa935fa410497a56862f28c4df68317ea556724f0d0764b6c0307e" dependencies = [ "ibc-core-client-context", "ibc-core-client-types", @@ -2481,14 +2594,13 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-client-context" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b82abd9489021730d59ab2d00e9438d3711e8e78ecba4d083b64f833301682b" +checksum = "2d37d88be3dc7fd82d45418c257d826244a6b29b7902c76cf9e68fd61f1e9173" dependencies = [ "derive_more", "displaydoc", @@ -2496,26 +2608,27 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-handler-types", "ibc-core-host-types", - "ibc-derive", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", "tendermint", ] [[package]] name = "ibc-core-client-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafdbf6db5dab4c8ad610b6940e23b4f8abd0a6ac5e8e2801415a95defd4a583" +checksum = "cb717b1296e6cda0990346ba5203fadd043d5159d7d7173b3765f72f263c29db" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2523,40 +2636,43 @@ dependencies = [ [[package]] name = "ibc-core-commitment-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed4256b0216fc49024bac7e01c61b9bb055e31914ffe9ce6f468d7ce496a9357" +checksum = "a10ff34bf57bf4bc668b55208dbfdf312d7907adc6a0e39da2377883f12efada" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-primitives", "ibc-proto", "ics23", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", ] [[package]] name = "ibc-core-connection" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e237b70b9ba0177a4e59ac9048fffac2ac44c334703cc0ae403ad221450850" +checksum = "de7f4f1e78e9ed5a63b09b1405f42713f3d076ba5e7889ec31a520cad4970344" dependencies = [ "ibc-core-client", "ibc-core-connection-types", "ibc-core-handler-types", "ibc-core-host", "ibc-primitives", - "prost 0.12.3", ] [[package]] name = "ibc-core-connection-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca841416fa29626423917099092f3698ae2735074cb3fe42936ddf6b2ccbf2f7" +checksum = "230d7f547e121147d136c563ae71707a9e3477a9bc1bc6c1dc29051e1408a381" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-client-types", @@ -2564,7 +2680,9 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2572,9 +2690,9 @@ dependencies = [ [[package]] name = "ibc-core-handler" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47e5e5a006aa0fc87ec3f5fb1e0ef6dd5aeea5079fa927d799d526c44329987" +checksum = "c60a2d072d8f7d8d64503bbf3fb69ffcd973b92667af053617a36682fadddea5" dependencies = [ "ibc-core-channel", "ibc-core-client", @@ -2588,10 +2706,11 @@ dependencies = [ [[package]] name = "ibc-core-handler-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3d59a8a5eb2069530c42783b4fef63472a89e0e9242334351df1bb58aaf542" +checksum = "7fae38340bffa42a74563a12703c994515cca4bab755a0c83089c18c3c1e481a" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-channel-types", @@ -2602,7 +2721,9 @@ dependencies = [ "ibc-core-router-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2610,9 +2731,9 @@ dependencies = [ [[package]] name = "ibc-core-host" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aa63c895c0e5a75e42fe859b8fd4250c12bfa8b9c6b114f94c927ecfad38a03" +checksum = "abaa0e2143855d12c19e814dab72a5e28daf5e31780afb1302e983614b248668" dependencies = [ "derive_more", "displaydoc", @@ -2624,15 +2745,14 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host-types", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", ] [[package]] name = "ibc-core-host-cosmos" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a325862af6c20b0df3d27c072a2d802a7232dc1666214d738cdfbd9a9c99720" +checksum = "7e3c792be21a340e42344e5bede1695c2d21d62abcc21bbfc7662b5950ffe8d4" dependencies = [ "derive_more", "displaydoc", @@ -2646,7 +2766,6 @@ dependencies = [ "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "prost 0.12.3", "serde", "sha2 0.10.8", "subtle-encoding", @@ -2655,21 +2774,25 @@ dependencies = [ [[package]] name = "ibc-core-host-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616955da310febbe93c0569a2feebd9f57cafed3eee5a56b0c3bb953a75f6089" +checksum = "1c25ce3082e036836d60aea3cc24f46dfb248d7718516a9a48e1feb466ce10c1" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-primitives", + "parity-scale-codec", + "scale-info", + "schemars", "serde", ] [[package]] name = "ibc-core-router" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31fe115da73e0616bdb44400fa6b11ca251648d070c4ff419d58e27804d30243" +checksum = "6c261fb7e9a7de7daafb6a38cb9abdce6e912230e30246eb2ef1bb5db32ba10f" dependencies = [ "derive_more", "displaydoc", @@ -2677,23 +2800,24 @@ dependencies = [ "ibc-core-host-types", "ibc-core-router-types", "ibc-primitives", - "prost 0.12.3", "subtle-encoding", ] [[package]] name = "ibc-core-router-types" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d1fbb0bbbdeafa7ac989ba1693ed46d22e0e3eb0bdae478544e31157a4fdba6" +checksum = "6f3b37bc4c11fdc60a328488f4be205106666edda20a4080484d599a8b0978d2" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-core-host-types", "ibc-primitives", "ibc-proto", - "ics23", - "prost 0.12.3", + "parity-scale-codec", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint", @@ -2701,11 +2825,10 @@ dependencies = [ [[package]] name = "ibc-derive" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df07bf5bc1e65e291506b7497633e07967e49b36a8db10cda77a8fd686eb4548" +checksum = "3de1e69ff9d7d6094b720a36bb26fc8078b5e1b0e216e2d0a92f602e6dc8016e" dependencies = [ - "darling", "proc-macro2", "quote", "syn 2.0.39", @@ -2713,14 +2836,18 @@ dependencies = [ [[package]] name = "ibc-primitives" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5edea4685267fd68514c87e7aa3a62712340c4cff6903f088a9ab571428a08a" +checksum = "af5524046e645bdfbd96ef932c8ceab6bb2391dc31dee626e274d13e7ac25ec2" dependencies = [ + "borsh 0.10.3", "derive_more", "displaydoc", "ibc-proto", + "parity-scale-codec", "prost 0.12.3", + "scale-info", + "schemars", "serde", "tendermint", "time", @@ -2728,15 +2855,20 @@ dependencies = [ [[package]] name = "ibc-proto" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93cbf4cbe9e5113cc7c70f3208a7029b2205c629502cbb2ae7ea0a09a97d3005" +checksum = "dd4ee32b22d3b06f31529b956f4928e5c9a068d71e46cf6abfa19c31ca550553" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", + "borsh 0.10.3", "bytes", "flex-error", "ics23", + "informalsystems-pbjson 0.7.0", + "parity-scale-codec", "prost 0.12.3", + "scale-info", + "schemars", "serde", "subtle-encoding", "tendermint-proto", @@ -2744,18 +2876,15 @@ dependencies = [ [[package]] name = "ibc-testkit" -version = "0.48.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f550c91648f3db6474880e18cd2bd294096a99b30621aa01a9059b71e3612d98" +checksum = "3443c6ccc7551266dce6e842aa10c472bf73d7cc0c3140aafc55c942e85f530a" dependencies = [ - "bytes", "derive_more", "displaydoc", "ibc", "ibc-proto", "parking_lot", - "primitive-types", - "prost 0.12.3", "subtle-encoding", "tendermint", "tendermint-testgen", @@ -2772,7 +2901,7 @@ dependencies = [ "anyhow", "bytes", "hex", - "informalsystems-pbjson", + "informalsystems-pbjson 0.6.0", "prost 0.12.3", "ripemd", "serde", @@ -2865,7 +2994,7 @@ name = "index-set" version = "0.8.0" source = "git+https://github.com/heliaxdev/index-set?tag=v0.8.1#b0d928f83cf0d465ccda299d131e8df2859b5184" dependencies = [ - "borsh", + "borsh 1.2.1", "serde", ] @@ -2890,6 +3019,17 @@ dependencies = [ "hashbrown 0.14.3", ] +[[package]] +name = "indexmap" +version = "2.2.4" +source = "git+https://github.com/heliaxdev/indexmap?tag=2.2.4-heliax-1#b5b5b547bd6ab04bbb16e060326a50ddaeb6c909" +dependencies = [ + "borsh 1.2.1", + "equivalent", + "hashbrown 0.14.3", + "serde", +] + [[package]] name = "informalsystems-pbjson" version = "0.6.0" @@ -2900,6 +3040,16 @@ dependencies = [ "serde", ] +[[package]] +name = "informalsystems-pbjson" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aa4a0980c8379295100d70854354e78df2ee1c6ca0f96ffe89afeb3140e3a3d" +dependencies = [ + "base64 0.21.7", + "serde", +] + [[package]] name = "inout" version = "0.1.3" @@ -2972,7 +3122,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "pem", "ring 0.16.20", "serde", @@ -3058,7 +3208,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "libc", "redox_syscall", ] @@ -3134,9 +3284,9 @@ dependencies = [ [[package]] name = "masp_note_encryption" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ - "borsh", + "borsh 1.2.1", "chacha20", "chacha20poly1305", "cipher", @@ -3147,7 +3297,7 @@ dependencies = [ [[package]] name = "masp_primitives" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ "aes", "bip0039", @@ -3155,7 +3305,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "bls12_381", - "borsh", + "borsh 1.2.1", "byteorder", "ff", "fpe", @@ -3179,7 +3329,7 @@ dependencies = [ [[package]] name = "masp_proofs" version = "1.0.0" -source = "git+https://github.com/anoma/masp?rev=30492323d98b0531fd18b6285cd94afcaa4066d2#30492323d98b0531fd18b6285cd94afcaa4066d2" +source = "git+https://github.com/anoma/masp?rev=6cbc8bd90a71cc280492c44bc3415162093daa76#6cbc8bd90a71cc280492c44bc3415162093daa76" dependencies = [ "bellman", "blake2b_simd", @@ -3320,7 +3470,7 @@ version = "0.32.1" dependencies = [ "async-trait", "bimap", - "borsh", + "borsh 1.2.1", "borsh-ext", "circular-queue", "clru", @@ -3394,7 +3544,7 @@ dependencies = [ name = "namada_account" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "namada_macros", "namada_storage", @@ -3402,12 +3552,19 @@ dependencies = [ "serde", ] +[[package]] +name = "namada_controller" +version = "0.32.1" +dependencies = [ + "namada_core", +] + [[package]] name = "namada_core" version = "0.32.1" dependencies = [ "bech32 0.8.1", - "borsh", + "borsh 1.2.1", "borsh-ext", "chrono", "data-encoding", @@ -3419,6 +3576,7 @@ dependencies = [ "ics23", "impl-num-traits", "index-set", + "indexmap 2.2.4", "k256", "masp_primitives", "namada_macros", @@ -3450,7 +3608,7 @@ dependencies = [ name = "namada_ethereum_bridge" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "ethabi", "ethers", "eyre", @@ -3479,7 +3637,7 @@ dependencies = [ name = "namada_gas" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "namada_macros", "serde", @@ -3490,7 +3648,7 @@ dependencies = [ name = "namada_governance" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "itertools 0.10.5", "namada_core", "namada_macros", @@ -3508,7 +3666,7 @@ dependencies = [ name = "namada_ibc" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "ibc", "ibc-derive", "ibc-testkit", @@ -3524,6 +3682,7 @@ dependencies = [ "primitive-types", "proptest", "prost 0.12.3", + "serde_json", "sha2 0.9.9", "thiserror", "tracing", @@ -3546,7 +3705,7 @@ dependencies = [ name = "namada_merkle_tree" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "eyre", "ics23", "namada_core", @@ -3560,7 +3719,7 @@ dependencies = [ name = "namada_parameters" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "namada_macros", "namada_storage", @@ -3571,10 +3730,11 @@ dependencies = [ name = "namada_proof_of_stake" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "data-encoding", "derivative", "namada_account", + "namada_controller", "namada_core", "namada_governance", "namada_macros", @@ -3603,7 +3763,7 @@ dependencies = [ "async-trait", "bimap", "bls12_381", - "borsh", + "borsh 1.2.1", "borsh-ext", "circular-queue", "data-encoding", @@ -3621,6 +3781,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_gas", "namada_governance", "namada_ibc", "namada_macros", @@ -3663,8 +3824,9 @@ dependencies = [ name = "namada_shielded_token" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "masp_primitives", + "namada_controller", "namada_core", "namada_parameters", "namada_storage", @@ -3677,7 +3839,7 @@ dependencies = [ name = "namada_state" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "ics23", "itertools 0.10.5", "namada_core", @@ -3701,7 +3863,7 @@ dependencies = [ name = "namada_storage" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "itertools 0.10.5", "namada_core", "namada_gas", @@ -3719,7 +3881,7 @@ dependencies = [ name = "namada_test_utils" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "strum 0.24.1", ] @@ -3781,7 +3943,8 @@ name = "namada_tx" version = "0.32.1" dependencies = [ "ark-bls12-381", - "borsh", + "bitflags 2.5.0", + "borsh 1.2.1", "data-encoding", "masp_primitives", "namada_core", @@ -3811,7 +3974,7 @@ dependencies = [ name = "namada_tx_prelude" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "masp_primitives", "namada_account", "namada_core", @@ -3833,7 +3996,7 @@ dependencies = [ name = "namada_vm_env" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "masp_primitives", "namada_core", ] @@ -3842,7 +4005,7 @@ dependencies = [ name = "namada_vote_ext" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_core", "namada_macros", "namada_tx", @@ -3856,6 +4019,7 @@ dependencies = [ "derivative", "masp_primitives", "namada_core", + "namada_ibc", "namada_storage", "namada_tx", "thiserror", @@ -3865,7 +4029,7 @@ dependencies = [ name = "namada_vp_prelude" version = "0.32.1" dependencies = [ - "borsh", + "borsh 1.2.1", "namada_account", "namada_core", "namada_governance", @@ -3882,18 +4046,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "namada_wasm_for_tests" -version = "0.32.1" -dependencies = [ - "getrandom 0.2.11", - "namada_test_utils", - "namada_tests", - "namada_tx_prelude", - "namada_vp_prelude", - "wee_alloc", -] - [[package]] name = "nonempty" version = "0.7.0" @@ -4483,6 +4635,15 @@ dependencies = [ "uint", ] +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml 0.5.11", +] + [[package]] name = "proc-macro-crate" version = "1.3.1" @@ -4544,7 +4705,7 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.1", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -4918,13 +5079,13 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", "h2", - "http", + "http 0.2.11", "http-body", "hyper", "hyper-rustls", @@ -5092,7 +5253,7 @@ version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -5129,7 +5290,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] @@ -5217,6 +5378,30 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -5376,6 +5561,17 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_json" version = "1.0.108" @@ -5576,7 +5772,7 @@ name = "sparse-merkle-tree" version = "0.3.1-pre" source = "git+https://github.com/heliaxdev/sparse-merkle-tree?rev=515687fe7884cb365067ac86c66ac3613de176bb#515687fe7884cb365067ac86c66ac3613de176bb" dependencies = [ - "borsh", + "borsh 1.2.1", "cfg-if 1.0.0", "ics23", "sha2 0.9.9", @@ -6247,6 +6443,182 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +[[package]] +name = "tx_fail" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_infinite_guest_gas" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_infinite_host_gas" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_invalid_data" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_memory_limit" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_no_op" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_proposal_code" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_proposal_ibc_token_inflation" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_proposal_masp_reward" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_read_storage_key" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "tx_write" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + [[package]] name = "typed-builder" version = "0.18.0" @@ -6391,6 +6763,118 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "vp_always_false" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_always_true" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_eval" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_infinite_guest_gas" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_infinite_host_gas" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_memory_limit" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + +[[package]] +name = "vp_read_storage_key" +version = "0.32.1" +dependencies = [ + "getrandom 0.2.11", + "namada_test_utils", + "namada_tests", + "namada_tx_prelude", + "namada_vp_prelude", + "proptest", + "test-log", + "tracing", + "tracing-subscriber", + "wee_alloc", +] + [[package]] name = "wait-timeout" version = "0.2.0" diff --git a/wasm_for_tests/Cargo.toml b/wasm_for_tests/Cargo.toml new file mode 100644 index 0000000000..069ca169cc --- /dev/null +++ b/wasm_for_tests/Cargo.toml @@ -0,0 +1,45 @@ +[workspace] +resolver = "2" + +members = [ + "tx_fail", + "tx_infinite_guest_gas", + "tx_infinite_host_gas", + "tx_invalid_data", + "tx_memory_limit", + "tx_no_op", + "tx_proposal_code", + "tx_proposal_ibc_token_inflation", + "tx_proposal_masp_reward", + "tx_read_storage_key", + "tx_write", + "vp_always_false", + "vp_always_true", + "vp_eval", + "vp_infinite_guest_gas", + "vp_infinite_host_gas", + "vp_memory_limit", + "vp_read_storage_key", +] + +[workspace.package] +authors = ["Heliax AG "] +edition = "2021" +license = "GPL-3.0" +version = "0.32.1" + +[workspace.dependencies] +namada_test_utils = { path = "../crates/test_utils" } +namada_tx_prelude = { path = "../crates/tx_prelude" } +namada_vp_prelude = { path = "../crates/vp_prelude" } +wee_alloc = "0.4.5" +getrandom = { version = "0.2", features = ["custom"] } + +[profile.release] +# smaller and faster wasm (https://rustwasm.github.io/book/reference/code-size.html#compiling-with-link-time-optimizations-lto) +lto = true +# simply terminate on panics, no unwinding +panic = "abort" +# tell llvm to optimize for size (https://rustwasm.github.io/book/reference/code-size.html#tell-llvm-to-optimize-for-size-instead-of-speed) +opt-level = 'z' +strip = "debuginfo" \ No newline at end of file diff --git a/wasm_for_tests/Makefile b/wasm_for_tests/Makefile new file mode 100644 index 0000000000..fc63e4bfea --- /dev/null +++ b/wasm_for_tests/Makefile @@ -0,0 +1,61 @@ +cargo := $(env) cargo +rustup := $(env) rustup +# Nightly build is currently used for rustfmt and clippy. +nightly := $(shell cat ../rust-nightly-version) + +# All the wasms that can be built from this source, switched via Cargo features +# Wasms can be added via the Cargo.toml `[features]` list. +wasms := tx_fail +wasms += tx_infinite_guest_gas +wasms += tx_infinite_host_gas +wasms += tx_invalid_data +wasms += tx_memory_limit +wasms += tx_no_op +wasms += tx_proposal_code +wasms += tx_proposal_ibc_token_inflation +wasms += tx_proposal_masp_reward +wasms += tx_read_storage_key +wasms += tx_write +wasms += vp_always_false +wasms += vp_always_true +wasms += vp_eval +wasms += vp_infinite_guest_gas +wasms += vp_infinite_host_gas +wasms += vp_memory_limit +wasms += vp_read_storage_key + + +# Build all wasms in release mode +all: + RUSTFLAGS='-C link-arg=-s' $(cargo) build --release --target wasm32-unknown-unknown --target-dir 'target' + cp target/wasm32-unknown-unknown/release/*.wasm . + +debug: + RUSTFLAGS='-C link-arg=-s' $(cargo) build --target wasm32-unknown-unknown --target-dir 'target' + cp target/wasm32-unknown-unknown/debug/*.wasm . + +check: + $(cargo) +$(nightly) check --workspace --target wasm32-unknown-unknown + +clippy: + $(cargo) +$(nightly) clippy --workspace -- -D warnings + +clippy-fix: + $(cargo) +$(nightly) clippy --fix -Z unstable-options --workspace --allow-dirty --allow-staged + +fmt: + $(cargo) +$(nightly) fmt + +fmt-check: + $(cargo) +$(nightly) fmt --check + +test: + $(cargo) +$(nightly) test -- -Z unstable-options --report-time + +clean: + $(cargo) clean + +deps: + $(rustup) target add wasm32-unknown-unknown + +.PHONY: all debug check clippy clippy-fix fmt fmt-check test clean deps \ No newline at end of file diff --git a/wasm_for_tests/README.md b/wasm_for_tests/README.md new file mode 100644 index 0000000000..9ae40634f3 --- /dev/null +++ b/wasm_for_tests/README.md @@ -0,0 +1,12 @@ +# WASM source code in Rust + +This crate contains WASM implementations of various transactions and validity predicates, used for testing. + +## Quick start + +```shell +# To be able to build this, make sure to have +make deps + +# Build - this will output .wasm files in the parent dir +make all diff --git a/wasm_for_tests/release.toml b/wasm_for_tests/release.toml new file mode 100644 index 0000000000..3a264faa08 --- /dev/null +++ b/wasm_for_tests/release.toml @@ -0,0 +1,7 @@ +allow-branch = ["main", "maint-*"] +consolidate-commits = true +pre-release-commit-message = "fixup! Namada {{version}}" +publish = false +push = false +shared-version = true +tag = false diff --git a/wasm_for_tests/tx_fail.wasm b/wasm_for_tests/tx_fail.wasm index dc63cb3a47..c133ea8e64 100755 Binary files a/wasm_for_tests/tx_fail.wasm and b/wasm_for_tests/tx_fail.wasm differ diff --git a/wasm_for_tests/tx_fail/Cargo.toml b/wasm_for_tests/tx_fail/Cargo.toml new file mode 100644 index 0000000000..03fd1a3235 --- /dev/null +++ b/wasm_for_tests/tx_fail/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_fail" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_fail/src/lib.rs b/wasm_for_tests/tx_fail/src/lib.rs new file mode 100644 index 0000000000..df94e287d4 --- /dev/null +++ b/wasm_for_tests/tx_fail/src/lib.rs @@ -0,0 +1,6 @@ +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(_ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + Err(Error::SimpleMessage("failed tx")) +} diff --git a/wasm_for_tests/tx_infinite_guest_gas.wasm b/wasm_for_tests/tx_infinite_guest_gas.wasm new file mode 100755 index 0000000000..befe9c6726 Binary files /dev/null and b/wasm_for_tests/tx_infinite_guest_gas.wasm differ diff --git a/wasm_for_tests/tx_infinite_guest_gas/Cargo.toml b/wasm_for_tests/tx_infinite_guest_gas/Cargo.toml new file mode 100644 index 0000000000..d4734326ff --- /dev/null +++ b/wasm_for_tests/tx_infinite_guest_gas/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_infinite_guest_gas" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_infinite_guest_gas/src/lib.rs b/wasm_for_tests/tx_infinite_guest_gas/src/lib.rs new file mode 100644 index 0000000000..a6d63067ac --- /dev/null +++ b/wasm_for_tests/tx_infinite_guest_gas/src/lib.rs @@ -0,0 +1,8 @@ +use namada_tx_prelude::*; + +/// A tx that endlessly charges gas from the guest environment +#[transaction] +fn apply_tx(_ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + #[allow(clippy::empty_loop)] + loop {} +} diff --git a/wasm_for_tests/tx_infinite_host_gas.wasm b/wasm_for_tests/tx_infinite_host_gas.wasm new file mode 100755 index 0000000000..48c2adb274 Binary files /dev/null and b/wasm_for_tests/tx_infinite_host_gas.wasm differ diff --git a/wasm_for_tests/tx_infinite_host_gas/Cargo.toml b/wasm_for_tests/tx_infinite_host_gas/Cargo.toml new file mode 100644 index 0000000000..1c6167e9ba --- /dev/null +++ b/wasm_for_tests/tx_infinite_host_gas/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_infinite_host_gas" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_infinite_host_gas/src/lib.rs b/wasm_for_tests/tx_infinite_host_gas/src/lib.rs new file mode 100644 index 0000000000..b15e7ab276 --- /dev/null +++ b/wasm_for_tests/tx_infinite_host_gas/src/lib.rs @@ -0,0 +1,12 @@ +use namada_tx_prelude::*; + +/// A tx that endlessly charges gas from the host environment +#[transaction] +fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + let target_key = parameters_storage::get_tx_allowlist_storage_key(); + loop { + // NOTE: don't propagate the error to verify that execution abortion + // is done in host and does not require guest cooperation + let _ = ctx.write(&target_key, vec!["hash"]); + } +} diff --git a/wasm_for_tests/tx_invalid_data.wasm b/wasm_for_tests/tx_invalid_data.wasm new file mode 100755 index 0000000000..1e38fef5d4 Binary files /dev/null and b/wasm_for_tests/tx_invalid_data.wasm differ diff --git a/wasm_for_tests/tx_invalid_data/Cargo.toml b/wasm_for_tests/tx_invalid_data/Cargo.toml new file mode 100644 index 0000000000..329aa86081 --- /dev/null +++ b/wasm_for_tests/tx_invalid_data/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_invalid_data" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_invalid_data/src/lib.rs b/wasm_for_tests/tx_invalid_data/src/lib.rs new file mode 100644 index 0000000000..eda9df65c4 --- /dev/null +++ b/wasm_for_tests/tx_invalid_data/src/lib.rs @@ -0,0 +1,11 @@ +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let _data = signed.data().ok_or_err_msg("Missing data").map_err(|err| { + ctx.set_commitment_sentinel(); + err + })?; + Ok(()) +} diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index 2c8bc9ca12..dd5b173a0f 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_memory_limit/Cargo.toml b/wasm_for_tests/tx_memory_limit/Cargo.toml new file mode 100644 index 0000000000..434ebe0b88 --- /dev/null +++ b/wasm_for_tests/tx_memory_limit/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_memory_limit" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_memory_limit/src/lib.rs b/wasm_for_tests/tx_memory_limit/src/lib.rs new file mode 100644 index 0000000000..a9a17ebde5 --- /dev/null +++ b/wasm_for_tests/tx_memory_limit/src/lib.rs @@ -0,0 +1,12 @@ +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(_ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let len = + usize::try_from_slice(&tx_data.data().as_ref().unwrap()[..]).unwrap(); + log_string(format!("allocate len {}", len)); + let bytes: Vec = vec![6_u8; len]; + // use the variable to prevent it from compiler optimizing it away + log_string(format!("{:?}", &bytes[..8])); + Ok(()) +} diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm deleted file mode 100755 index 761e1a14e9..0000000000 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and /dev/null differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index 1b85a13bef..60830f4a2d 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_no_op/Cargo.toml b/wasm_for_tests/tx_no_op/Cargo.toml new file mode 100644 index 0000000000..665504cdf1 --- /dev/null +++ b/wasm_for_tests/tx_no_op/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_no_op" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_no_op/src/lib.rs b/wasm_for_tests/tx_no_op/src/lib.rs new file mode 100644 index 0000000000..a819b73be6 --- /dev/null +++ b/wasm_for_tests/tx_no_op/src/lib.rs @@ -0,0 +1,6 @@ +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(_ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + Ok(()) +} diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index a2d9a0d8c3..cb04028e69 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_proposal_code/Cargo.toml b/wasm_for_tests/tx_proposal_code/Cargo.toml new file mode 100644 index 0000000000..eaaa7177e6 --- /dev/null +++ b/wasm_for_tests/tx_proposal_code/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_proposal_code" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_proposal_code/src/lib.rs b/wasm_for_tests/tx_proposal_code/src/lib.rs new file mode 100644 index 0000000000..2d050755e7 --- /dev/null +++ b/wasm_for_tests/tx_proposal_code/src/lib.rs @@ -0,0 +1,13 @@ +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + // governance + let target_key = gov_storage::keys::get_min_proposal_grace_epochs_key(); + ctx.write(&target_key, 9_u64)?; + + // parameters + let target_key = parameters_storage::get_vp_allowlist_storage_key(); + ctx.write(&target_key, vec!["hash"])?; + Ok(()) +} diff --git a/wasm_for_tests/tx_proposal_ibc_token_inflation.wasm b/wasm_for_tests/tx_proposal_ibc_token_inflation.wasm index f0d6593fd0..40333f8ba3 100755 Binary files a/wasm_for_tests/tx_proposal_ibc_token_inflation.wasm and b/wasm_for_tests/tx_proposal_ibc_token_inflation.wasm differ diff --git a/wasm_for_tests/tx_proposal_ibc_token_inflation/Cargo.toml b/wasm_for_tests/tx_proposal_ibc_token_inflation/Cargo.toml new file mode 100644 index 0000000000..25d31ccd51 --- /dev/null +++ b/wasm_for_tests/tx_proposal_ibc_token_inflation/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_proposal_ibc_token_inflation" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_proposal_ibc_token_inflation/src/lib.rs b/wasm_for_tests/tx_proposal_ibc_token_inflation/src/lib.rs new file mode 100644 index 0000000000..176dc0eeaa --- /dev/null +++ b/wasm_for_tests/tx_proposal_ibc_token_inflation/src/lib.rs @@ -0,0 +1,57 @@ +use std::str::FromStr; + +use dec::Dec; +use namada_tx_prelude::*; + +// Denom of tokens over IBC is always zero +const IBC_TOKEN_DENOM: u8 = 0; +const CHANNEL_ID: &str = "channel-0"; +const BASE_TOKEN: &str = "tnam1qyvfwdkz8zgs9n3qn9xhp8scyf8crrxwuq26r6gy"; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + let ibc_denom = format!("transfer/{CHANNEL_ID}/{BASE_TOKEN}"); + let ibc_token = ibc::ibc_token(&ibc_denom); + + let shielded_token_last_inflation_key = + token::storage_key::masp_last_inflation_key(&ibc_token); + let shielded_token_last_locked_amount_key = + token::storage_key::masp_last_locked_amount_key(&ibc_token); + let shielded_token_max_rewards_key = + token::storage_key::masp_max_reward_rate_key(&ibc_token); + let shielded_token_target_locked_amount_key = + token::storage_key::masp_locked_amount_target_key(&ibc_token); + let shielded_token_kp_gain_key = + token::storage_key::masp_kp_gain_key(&ibc_token); + let shielded_token_kd_gain_key = + token::storage_key::masp_kd_gain_key(&ibc_token); + + let token_map_key = token::storage_key::masp_token_map_key(); + let mut token_map: masp::TokenMap = + ctx.read(&token_map_key)?.unwrap_or_default(); + token_map.insert(ibc_denom, ibc_token); + ctx.write(&token_map_key, token_map)?; + + ctx.write(&shielded_token_last_inflation_key, token::Amount::zero())?; + ctx.write( + &shielded_token_last_locked_amount_key, + token::Amount::zero(), + )?; + ctx.write( + &shielded_token_max_rewards_key, + Dec::from_str("0.01").unwrap(), + )?; + ctx.write( + &shielded_token_target_locked_amount_key, + token::Amount::from_uint(1_000_000_000, IBC_TOKEN_DENOM).unwrap(), + )?; + ctx.write( + &shielded_token_kp_gain_key, + Dec::from_str("120000").unwrap(), + )?; + ctx.write( + &shielded_token_kd_gain_key, + Dec::from_str("120000").unwrap(), + )?; + Ok(()) +} diff --git a/wasm_for_tests/tx_proposal_masp_reward.wasm b/wasm_for_tests/tx_proposal_masp_reward.wasm index f54edf14a5..5535ba4213 100755 Binary files a/wasm_for_tests/tx_proposal_masp_reward.wasm and b/wasm_for_tests/tx_proposal_masp_reward.wasm differ diff --git a/wasm_for_tests/tx_proposal_masp_reward/Cargo.toml b/wasm_for_tests/tx_proposal_masp_reward/Cargo.toml new file mode 100644 index 0000000000..83dd4e1f61 --- /dev/null +++ b/wasm_for_tests/tx_proposal_masp_reward/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_proposal_masp_reward" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_proposal_masp_reward/src/lib.rs b/wasm_for_tests/tx_proposal_masp_reward/src/lib.rs new file mode 100644 index 0000000000..5ca9e23d77 --- /dev/null +++ b/wasm_for_tests/tx_proposal_masp_reward/src/lib.rs @@ -0,0 +1,15 @@ +use std::str::FromStr; + +use dec::Dec; +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { + let native_token = ctx.get_native_token()?; + let shielded_rewards_key = + token::storage_key::masp_max_reward_rate_key(&native_token); + + ctx.write(&shielded_rewards_key, Dec::from_str("0.05").unwrap())?; + + Ok(()) +} diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index e084776585..6c8c68fee7 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key/Cargo.toml b/wasm_for_tests/tx_read_storage_key/Cargo.toml new file mode 100644 index 0000000000..8d6f97fa22 --- /dev/null +++ b/wasm_for_tests/tx_read_storage_key/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_read_storage_key" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_read_storage_key/src/lib.rs b/wasm_for_tests/tx_read_storage_key/src/lib.rs new file mode 100644 index 0000000000..fab14f404a --- /dev/null +++ b/wasm_for_tests/tx_read_storage_key/src/lib.rs @@ -0,0 +1,12 @@ +use namada_tx_prelude::*; + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + // Allocates a memory of size given from the `tx_data (usize)` + let key = + storage::Key::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) + .unwrap(); + log_string(format!("key {}", key)); + let _result: Vec = ctx.read(&key)?.unwrap(); + Ok(()) +} diff --git a/wasm_for_tests/tx_write.wasm b/wasm_for_tests/tx_write.wasm index 90e09886f7..321f95bc50 100755 Binary files a/wasm_for_tests/tx_write.wasm and b/wasm_for_tests/tx_write.wasm differ diff --git a/wasm_for_tests/tx_write/Cargo.toml b/wasm_for_tests/tx_write/Cargo.toml new file mode 100644 index 0000000000..25ee6acc5a --- /dev/null +++ b/wasm_for_tests/tx_write/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "tx_write" +description = "Wasm transaction used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/tx_write/src/lib.rs b/wasm_for_tests/tx_write/src/lib.rs new file mode 100644 index 0000000000..f8fd53f71b --- /dev/null +++ b/wasm_for_tests/tx_write/src/lib.rs @@ -0,0 +1,58 @@ +use namada_test_utils::tx_data::TxWriteData; +use namada_tx_prelude::*; + +const TX_NAME: &str = "tx_write"; + +fn log(msg: &str) { + log_string(format!("[{}] {}", TX_NAME, msg)) +} + +fn fatal(msg: &str, err: impl std::error::Error) -> ! { + log(&format!("ERROR: {} - {:?}", msg, err)); + panic!() +} + +fn fatal_msg(msg: &str) -> ! { + log(msg); + panic!() +} + +#[transaction] +fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { + let signed = tx_data; + let data = match signed.data() { + Some(data) => { + log(&format!("got data ({} bytes)", data.len())); + data + } + None => { + fatal_msg("no data provided"); + } + }; + let TxWriteData { key, value } = + match TxWriteData::try_from_slice(&data[..]) { + Ok(write_op) => { + log(&format!( + "parsed WriteOp to key {} ({} bytes)", + &write_op.key, + &write_op.value.len(), + )); + write_op + } + Err(error) => fatal("deserializing WriteOp", error), + }; + let existing_value: Option = ctx.read(&key)?; + match existing_value { + Some(existing_value) => { + log(&format!("already present value is {}", existing_value)); + } + None => { + log("no already present value"); + } + } + log(&format!("attempting to write new value to key {}", key)); + // using `ctx.write_bytes` instead of `ctx.write` here, as we want to + // write the actual bytes, not a Borsh-serialization of a `Vec` + ctx.write_bytes(&key, &value[..])?; + Ok(()) +} diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm deleted file mode 100755 index 2f6e3946b4..0000000000 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and /dev/null differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index 0b1672cf59..f01e3167fb 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_false/Cargo.toml b/wasm_for_tests/vp_always_false/Cargo.toml new file mode 100644 index 0000000000..c3211d7e01 --- /dev/null +++ b/wasm_for_tests/vp_always_false/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_always_false" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_always_false/src/lib.rs b/wasm_for_tests/vp_always_false/src/lib.rs new file mode 100644 index 0000000000..17b64ddbd6 --- /dev/null +++ b/wasm_for_tests/vp_always_false/src/lib.rs @@ -0,0 +1,12 @@ +use namada_vp_prelude::*; + +#[validity_predicate] +fn validate_tx( + _ctx: &Ctx, + _tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + reject() +} diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index 5ac5977994..4422ebaef0 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_always_true/Cargo.toml b/wasm_for_tests/vp_always_true/Cargo.toml new file mode 100644 index 0000000000..b790d374a1 --- /dev/null +++ b/wasm_for_tests/vp_always_true/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_always_true" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_always_true/src/lib.rs b/wasm_for_tests/vp_always_true/src/lib.rs new file mode 100644 index 0000000000..55daff519a --- /dev/null +++ b/wasm_for_tests/vp_always_true/src/lib.rs @@ -0,0 +1,12 @@ +use namada_vp_prelude::*; + +#[validity_predicate] +fn validate_tx( + _ctx: &Ctx, + _tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + accept() +} diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index b4cc97f944..bc49415cfb 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_eval/Cargo.toml b/wasm_for_tests/vp_eval/Cargo.toml new file mode 100644 index 0000000000..dfd2aa4520 --- /dev/null +++ b/wasm_for_tests/vp_eval/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_eval" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_eval/src/lib.rs b/wasm_for_tests/vp_eval/src/lib.rs new file mode 100644 index 0000000000..be8deecb96 --- /dev/null +++ b/wasm_for_tests/vp_eval/src/lib.rs @@ -0,0 +1,18 @@ +use namada_vp_prelude::*; + +#[validity_predicate] +fn validate_tx( + ctx: &Ctx, + tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + use namada_tx_prelude::transaction::eval_vp::EvalVp; + let EvalVp { + vp_code_hash, + input, + }: EvalVp = + EvalVp::try_from_slice(&tx_data.data().as_ref().unwrap()[..]).unwrap(); + ctx.eval(vp_code_hash, input).into_vp_error() +} diff --git a/wasm_for_tests/vp_infinite_guest_gas.wasm b/wasm_for_tests/vp_infinite_guest_gas.wasm new file mode 100755 index 0000000000..2e2fd81ccc Binary files /dev/null and b/wasm_for_tests/vp_infinite_guest_gas.wasm differ diff --git a/wasm_for_tests/vp_infinite_guest_gas/Cargo.toml b/wasm_for_tests/vp_infinite_guest_gas/Cargo.toml new file mode 100644 index 0000000000..976e458fef --- /dev/null +++ b/wasm_for_tests/vp_infinite_guest_gas/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_infinite_guest_gas" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_infinite_guest_gas/src/lib.rs b/wasm_for_tests/vp_infinite_guest_gas/src/lib.rs new file mode 100644 index 0000000000..5e7f66e08d --- /dev/null +++ b/wasm_for_tests/vp_infinite_guest_gas/src/lib.rs @@ -0,0 +1,15 @@ + +use namada_vp_prelude::*; + +/// A vp that endlessly charges gas from the guest environment +#[validity_predicate] +fn validate_tx( + _ctx: &Ctx, + _tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + #[allow(clippy::empty_loop)] + loop {} +} diff --git a/wasm_for_tests/vp_infinite_host_gas.wasm b/wasm_for_tests/vp_infinite_host_gas.wasm new file mode 100755 index 0000000000..09696a2e26 Binary files /dev/null and b/wasm_for_tests/vp_infinite_host_gas.wasm differ diff --git a/wasm_for_tests/vp_infinite_host_gas/Cargo.toml b/wasm_for_tests/vp_infinite_host_gas/Cargo.toml new file mode 100644 index 0000000000..5908f8a48d --- /dev/null +++ b/wasm_for_tests/vp_infinite_host_gas/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_infinite_host_gas" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_infinite_host_gas/src/lib.rs b/wasm_for_tests/vp_infinite_host_gas/src/lib.rs new file mode 100644 index 0000000000..bf0ee79041 --- /dev/null +++ b/wasm_for_tests/vp_infinite_host_gas/src/lib.rs @@ -0,0 +1,19 @@ +use namada_vp_prelude::*; + +/// A vp that endlessly charges gas from the host environment +#[validity_predicate] +fn validate_tx( + ctx: &Ctx, + _tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + let target_key = + namada_tx_prelude::parameters_storage::get_tx_allowlist_storage_key(); + loop { + // NOTE: don't propagate the error to verify that execution abortion + // is done in host and does not require guest cooperation + let _ = ctx.read_bytes_pre(&target_key); + } +} diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 663e3ebf25..2cbb0e2fb3 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_memory_limit/Cargo.toml b/wasm_for_tests/vp_memory_limit/Cargo.toml new file mode 100644 index 0000000000..23aef54c5f --- /dev/null +++ b/wasm_for_tests/vp_memory_limit/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_memory_limit" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_memory_limit/src/lib.rs b/wasm_for_tests/vp_memory_limit/src/lib.rs new file mode 100644 index 0000000000..09284f61b1 --- /dev/null +++ b/wasm_for_tests/vp_memory_limit/src/lib.rs @@ -0,0 +1,18 @@ +use namada_vp_prelude::*; + +#[validity_predicate] +fn validate_tx( + _ctx: &Ctx, + tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + let len = + usize::try_from_slice(&tx_data.data().as_ref().unwrap()[..]).unwrap(); + log_string(format!("allocate len {}", len)); + let bytes: Vec = vec![6_u8; len]; + // use the variable to prevent it from compiler optimizing it away + log_string(format!("{:?}", &bytes[..8])); + accept() +} diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index 1d37962592..7b8f5b3283 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key/Cargo.toml b/wasm_for_tests/vp_read_storage_key/Cargo.toml new file mode 100644 index 0000000000..ef545b8c7b --- /dev/null +++ b/wasm_for_tests/vp_read_storage_key/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vp_read_storage_key" +description = "Wasm vp used for testing." +authors.workspace = true +edition.workspace = true +license.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +namada_test_utils.workspace = true +namada_tx_prelude.workspace = true +namada_vp_prelude.workspace = true +wee_alloc.workspace = true +getrandom.workspace = true + +[dev-dependencies] +namada_tests = { path = "../../crates/tests", default-features = false, features = [ + "wasm-runtime", +] } + +proptest = "1.4.0" +test-log = {version = "0.2.14", default-features = false, features = ["trace"]} +tracing = "0.1.30" +tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} + +[lib] +crate-type = ["cdylib"] \ No newline at end of file diff --git a/wasm_for_tests/vp_read_storage_key/src/lib.rs b/wasm_for_tests/vp_read_storage_key/src/lib.rs new file mode 100644 index 0000000000..40bc8a4a53 --- /dev/null +++ b/wasm_for_tests/vp_read_storage_key/src/lib.rs @@ -0,0 +1,18 @@ +use namada_vp_prelude::*; + +#[validity_predicate] +fn validate_tx( + ctx: &Ctx, + tx_data: Tx, + _addr: Address, + _keys_changed: BTreeSet, + _verifiers: BTreeSet
, +) -> VpResult { + // Allocates a memory of size given from the `tx_data (usize)` + let key = + storage::Key::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) + .unwrap(); + log_string(format!("key {}", key)); + let _result: Vec = ctx.read_pre(&key).into_vp_error()?.unwrap(); + accept() +} diff --git a/wasm_for_tests/wasm_source/.gitignore b/wasm_for_tests/wasm_source/.gitignore deleted file mode 100644 index 5028f0063a..0000000000 --- a/wasm_for_tests/wasm_source/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk \ No newline at end of file diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index 0a958efb71..85bc53973d 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -12,19 +12,24 @@ crate-type = ["cdylib"] # The features should be used individually to build the selected wasm. # Newly added wasms should also be added into the Makefile `$(wasms)` list. [features] +tx_fail = [] +tx_infinite_guest_gas = [] +tx_infinite_host_gas = [] +tx_invalid_data = [] tx_memory_limit = [] tx_no_op = [] -tx_fail = [] +tx_proposal_code = [] +tx_proposal_ibc_token_inflation = [] +tx_proposal_masp_reward = [] tx_read_storage_key = [] tx_write = [] vp_always_false = [] vp_always_true = [] vp_eval = [] +vp_infinite_guest_gas = [] +vp_infinite_host_gas = [] vp_memory_limit = [] vp_read_storage_key = [] -tx_proposal_code = [] -tx_proposal_masp_reward = [] -tx_proposal_ibc_token_inflation = [] [dependencies] namada_test_utils = {path = "../../crates/test_utils"} diff --git a/wasm_for_tests/wasm_source/Makefile b/wasm_for_tests/wasm_source/Makefile deleted file mode 100644 index 2c113ce0f3..0000000000 --- a/wasm_for_tests/wasm_source/Makefile +++ /dev/null @@ -1,83 +0,0 @@ -cargo := $(env) cargo -rustup := $(env) rustup -# Nightly build is currently used for rustfmt and clippy. -nightly := $(shell cat ../../rust-nightly-version) - -# All the wasms that can be built from this source, switched via Cargo features -# Wasms can be added via the Cargo.toml `[features]` list. -wasms := tx_memory_limit -wasms += tx_no_op -wasms += tx_fail -wasms += tx_read_storage_key -wasms += tx_write -wasms += vp_always_false -wasms += vp_always_true -wasms += vp_eval -wasms += vp_memory_limit -wasms += vp_read_storage_key -wasms += tx_proposal_code -wasms += tx_proposal_masp_reward -wasms += tx_proposal_ibc_token_inflation - - -# Build all wasms -all: - make $(wasms) - make opt-wasm - -# `cargo check` all wasms -check: - $(foreach wasm,$(wasms),make check_$(wasm) && ) true - -# `cargo test` all wasms -test: - $(foreach wasm,$(wasms),make test_$(wasm) && ) true - -# `cargo clippy` all wasms -clippy: - $(foreach wasm,$(wasms),make clippy_$(wasm) && ) true - -clippy-fix: - $(cargo) +$(nightly) clippy --fix -Z unstable-options --all-targets --allow-dirty --allow-staged - -fmt: - $(cargo) +$(nightly) fmt --all - -fmt-check: - $(cargo) +$(nightly) fmt --all -- --check - -# Build a selected wasm -$(wasms): %: - $(cargo) build --release --target wasm32-unknown-unknown --target-dir 'target' --features $@ && \ - cp "./target/wasm32-unknown-unknown/release/namada_wasm_for_tests.wasm" ../$@.wasm - -# `cargo check` one of the wasms, e.g. `make check_tx_no_op` -$(patsubst %,check_%,$(wasms)): check_%: - $(cargo) check --target wasm32-unknown-unknown --features $* - -# `cargo test` one of the wasms, e.g. `make test_tx_no_op` -$(patsubst %,test_%,$(wasms)): test_%: - $(cargo) test --features $* \ - -- \ - -Z unstable-options --report-time - -# `cargo watch` one of the wasms, e.g. `make watch_tx_no_op` -$(patsubst %,watch_%,$(wasms)): watch_%: - $(cargo) watch --features $* - -# `cargo clippy` one of the wasms, e.g. `make clippy_tx_no_op` -$(patsubst %,clippy_%,$(wasms)): clippy_%: - $(cargo) +$(nightly) clippy --all-targets --features $* -- -D warnings - -clean-wasm = rm ../$(wasm).wasm -clean: - $(foreach wasm,$(wasms),$(clean-wasm) && ) true - -deps: - $(rustup) target add wasm32-unknown-unknown - -# this command needs wasm-opt installed -opt-wasm: - @for file in $(shell ls ../*.wasm); do wasm-opt -Oz -o $${file} $${file}; done - -.PHONY : all check test clippy fmt fmt-check clean deps opt-wasm diff --git a/wasm_for_tests/wasm_source/README.md b/wasm_for_tests/wasm_source/README.md deleted file mode 100644 index 07d1f3f976..0000000000 --- a/wasm_for_tests/wasm_source/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# WASM source code in Rust - -This crate contains WASM implementations of various transactions and validity predicates, used for testing. - -## Quick start - -```shell -# To be able to build this, make sure to have -make deps - -# Build - this will output .wasm files in the parent dir -make all - -# Each source that is included here can also be build and checked individually, e.g. for "tx_no_op" source: - -make tx_no_op # build -make check_tx_no_op # cargo check -make test_tx_no_op # cargo test -make watch_tx_no_op # cargo watch -make clippy_tx_no_op # cargo clippy -``` diff --git a/wasm_for_tests/wasm_source/release.toml b/wasm_for_tests/wasm_source/release.toml deleted file mode 100644 index dbacf2cec3..0000000000 --- a/wasm_for_tests/wasm_source/release.toml +++ /dev/null @@ -1,7 +0,0 @@ -allow-branch = ["main", "maint-*"] -consolidate-commits = true -pre-release-commit-message = "fixup! Namada {{version}}" -publish = false -push = false -shared-version = true -tag = false diff --git a/wasm_for_tests/wasm_source/src/lib.rs b/wasm_for_tests/wasm_source/src/lib.rs deleted file mode 100644 index 39801653fb..0000000000 --- a/wasm_for_tests/wasm_source/src/lib.rs +++ /dev/null @@ -1,331 +0,0 @@ -/// A tx that doesn't do anything. -#[cfg(feature = "tx_no_op")] -pub mod main { - use namada_tx_prelude::*; - - #[transaction(gas = 1000)] - fn apply_tx(_ctx: &mut Ctx, _tx_data: Tx) -> TxResult { - Ok(()) - } -} - -/// A tx that fails every time. -#[cfg(feature = "tx_fail")] -pub mod main { - use namada_tx_prelude::*; - - #[transaction(gas = 1000)] - fn apply_tx(_ctx: &mut Ctx, _tx_data: Tx) -> TxResult { - Err(Error::SimpleMessage("failed tx")) - } -} - -/// A tx that allocates a memory of size given from the `tx_data: usize`. -#[cfg(feature = "tx_memory_limit")] -pub mod main { - use namada_tx_prelude::*; - - #[transaction(gas = 1000)] - fn apply_tx(_ctx: &mut Ctx, tx_data: Tx) -> TxResult { - let len = usize::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) - .unwrap(); - log_string(format!("allocate len {}", len)); - let bytes: Vec = vec![6_u8; len]; - // use the variable to prevent it from compiler optimizing it away - log_string(format!("{:?}", &bytes[..8])); - Ok(()) - } -} - -/// A tx to be used as proposal_code -#[cfg(feature = "tx_proposal_code")] -pub mod main { - use namada_tx_prelude::*; - - #[transaction(gas = 1000)] - fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { - // governance - let target_key = gov_storage::keys::get_min_proposal_grace_epoch_key(); - ctx.write(&target_key, 9_u64)?; - - // parameters - let target_key = parameters_storage::get_tx_allowlist_storage_key(); - ctx.write(&target_key, vec!["hash"])?; - Ok(()) - } -} - -/// A tx to be used as proposal_code for changing shielded rewards -#[cfg(feature = "tx_proposal_masp_reward")] -pub mod main { - use std::str::FromStr; - - use dec::Dec; - use namada_tx_prelude::*; - - #[transaction(gas = 1000)] - fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { - let native_token = ctx.get_native_token()?; - let shielded_rewards_key = - token::storage_key::masp_max_reward_rate_key(&native_token); - - ctx.write(&shielded_rewards_key, Dec::from_str("0.05").unwrap())?; - - Ok(()) - } -} - -/// A tx to be used as proposal_code to set the inflation params for a token -/// over IBC -#[cfg(feature = "tx_proposal_ibc_token_inflation")] -pub mod main { - use std::str::FromStr; - - use dec::Dec; - use namada_tx_prelude::*; - - // Denom of tokens over IBC is always zero - const IBC_TOKEN_DENOM: u8 = 0; - const CHANNEL_ID: &str = "channel-0"; - const BASE_TOKEN: &str = "tnam1qyvfwdkz8zgs9n3qn9xhp8scyf8crrxwuq26r6gy"; - - #[transaction(gas = 1000)] - fn apply_tx(ctx: &mut Ctx, _tx_data: Tx) -> TxResult { - let ibc_denom = format!("transfer/{CHANNEL_ID}/{BASE_TOKEN}"); - let ibc_token = ibc::ibc_token(&ibc_denom); - - let shielded_token_last_inflation_key = - token::storage_key::masp_last_inflation_key(&ibc_token); - let shielded_token_last_locked_amount_key = - token::storage_key::masp_last_locked_amount_key(&ibc_token); - let shielded_token_max_rewards_key = - token::storage_key::masp_max_reward_rate_key(&ibc_token); - let shielded_token_target_locked_amount_key = - token::storage_key::masp_locked_amount_target_key(&ibc_token); - let shielded_token_kp_gain_key = - token::storage_key::masp_kp_gain_key(&ibc_token); - let shielded_token_kd_gain_key = - token::storage_key::masp_kd_gain_key(&ibc_token); - - let token_map_key = token::storage_key::masp_token_map_key(); - let mut token_map: masp::TokenMap = ctx.read(&token_map_key)?.unwrap_or_default(); - token_map.insert(ibc_denom, ibc_token); - ctx.write(&token_map_key, token_map)?; - - ctx.write( - &shielded_token_last_inflation_key, - token::Amount::zero(), - )?; - ctx.write( - &shielded_token_last_locked_amount_key, - token::Amount::zero(), - )?; - ctx.write( - &shielded_token_max_rewards_key, - Dec::from_str("0.01").unwrap(), - )?; - ctx.write( - &shielded_token_target_locked_amount_key, - token::Amount::from_uint(1_000_000_000, IBC_TOKEN_DENOM).unwrap(), - )?; - ctx.write( - &shielded_token_kp_gain_key, - Dec::from_str("120000").unwrap(), - )?; - ctx.write( - &shielded_token_kd_gain_key, - Dec::from_str("120000").unwrap(), - )?; - Ok(()) - } -} - -/// A tx that attempts to read the given key from storage. -#[cfg(feature = "tx_read_storage_key")] -pub mod main { - use namada_tx_prelude::*; - - #[transaction(gas = 1000)] - fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { - // Allocates a memory of size given from the `tx_data (usize)` - let key = - storage::Key::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) - .unwrap(); - log_string(format!("key {}", key)); - let _result: Vec = ctx.read(&key)?.unwrap(); - Ok(()) - } -} - -/// A tx that attempts to write arbitrary data to the given key -#[cfg(feature = "tx_write")] -pub mod main { - use namada_test_utils::tx_data::TxWriteData; - use namada_tx_prelude::{ - log_string, transaction, BorshDeserialize, Ctx, StorageRead, - StorageWrite, Tx, TxEnv, TxResult, - }; - - const TX_NAME: &str = "tx_write"; - - fn log(msg: &str) { - log_string(format!("[{}] {}", TX_NAME, msg)) - } - - fn fatal(msg: &str, err: impl std::error::Error) -> ! { - log(&format!("ERROR: {} - {:?}", msg, err)); - panic!() - } - - fn fatal_msg(msg: &str) -> ! { - log(msg); - panic!() - } - - #[transaction(gas = 1000)] - fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { - let signed = tx_data; - let data = match signed.data() { - Some(data) => { - log(&format!("got data ({} bytes)", data.len())); - data - } - None => { - fatal_msg("no data provided"); - } - }; - let TxWriteData { key, value } = - match TxWriteData::try_from_slice(&data[..]) { - Ok(write_op) => { - log(&format!( - "parsed WriteOp to key {} ({} bytes)", - &write_op.key, - &write_op.value.len(), - )); - write_op - } - Err(error) => fatal("deserializing WriteOp", error), - }; - let existing_value: Option = ctx.read(&key)?; - match existing_value { - Some(existing_value) => { - log(&format!("already present value is {}", existing_value)); - } - None => { - log("no already present value"); - } - } - log(&format!("attempting to write new value to key {}", key)); - // using `ctx.write_bytes` instead of `ctx.write` here, as we want to - // write the actual bytes, not a Borsh-serialization of a `Vec` - ctx.write_bytes(&key, &value[..])?; - Ok(()) - } -} - -/// A VP that always returns `true`. -#[cfg(feature = "vp_always_true")] -pub mod main { - use namada_vp_prelude::*; - - #[validity_predicate(gas = 1000)] - fn validate_tx( - _ctx: &Ctx, - _tx_data: Tx, - _addr: Address, - _keys_changed: BTreeSet, - _verifiers: BTreeSet
, - ) -> VpResult { - accept() - } -} - -/// A VP that always returns `false`. -#[cfg(feature = "vp_always_false")] -pub mod main { - use namada_vp_prelude::*; - - #[validity_predicate(gas = 1000)] - fn validate_tx( - _ctx: &Ctx, - _tx_data: Tx, - _addr: Address, - _keys_changed: BTreeSet, - _verifiers: BTreeSet
, - ) -> VpResult { - reject() - } -} - -/// A VP that runs the VP given in `tx_data` via `eval`. It returns the result -/// of `eval`. -#[cfg(feature = "vp_eval")] -pub mod main { - use namada_vp_prelude::*; - - #[validity_predicate(gas = 1000)] - fn validate_tx( - ctx: &Ctx, - tx_data: Tx, - _addr: Address, - _keys_changed: BTreeSet, - _verifiers: BTreeSet
, - ) -> VpResult { - use namada_tx_prelude::transaction::eval_vp::EvalVp; - let EvalVp { - vp_code_hash, - input, - }: EvalVp = - EvalVp::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) - .unwrap(); - ctx.eval(vp_code_hash, input) - } -} - -// A VP that allocates a memory of size given from the `tx_data: usize`. -// Returns `true`, if the allocation is within memory limits. -#[cfg(feature = "vp_memory_limit")] -pub mod main { - use namada_vp_prelude::*; - - #[validity_predicate(gas = 1000)] - fn validate_tx( - _ctx: &Ctx, - tx_data: Tx, - _addr: Address, - _keys_changed: BTreeSet, - _verifiers: BTreeSet
, - ) -> VpResult { - let len = usize::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) - .unwrap(); - log_string(format!("allocate len {}", len)); - let bytes: Vec = vec![6_u8; len]; - // use the variable to prevent it from compiler optimizing it away - log_string(format!("{:?}", &bytes[..8])); - accept() - } -} - -/// A VP that attempts to read the given key from storage (state prior to tx -/// execution). Returns `true`, if the allocation is within memory limits. -#[cfg(feature = "vp_read_storage_key")] -pub mod main { - use namada_vp_prelude::*; - - #[validity_predicate(gas = 1000)] - fn validate_tx( - ctx: &Ctx, - tx_data: Tx, - _addr: Address, - _keys_changed: BTreeSet, - _verifiers: BTreeSet
, - ) -> VpResult { - // Allocates a memory of size given from the `tx_data (usize)` - let key = - storage::Key::try_from_slice(&tx_data.data().as_ref().unwrap()[..]) - .unwrap(); - log_string(format!("key {}", key)); - let _result: Vec = ctx.read_pre(&key)?.unwrap(); - accept() - } -}