diff --git a/.changelog/v0.29.0/SDK/1995-eth-tx-emits-events.md b/.changelog/v0.29.0/SDK/1995-eth-tx-emits-events.md new file mode 100644 index 0000000000..2ce8e5780b --- /dev/null +++ b/.changelog/v0.29.0/SDK/1995-eth-tx-emits-events.md @@ -0,0 +1,2 @@ +- Introduce a method to query the status (pending, relayed or expired) of Bridge + pool transfers ([\#1995](https://github.com/anoma/namada/pull/1995)) \ No newline at end of file diff --git a/.changelog/v0.29.0/SDK/2220-light-sdk.md b/.changelog/v0.29.0/SDK/2220-light-sdk.md new file mode 100644 index 0000000000..76af7c6645 --- /dev/null +++ b/.changelog/v0.29.0/SDK/2220-light-sdk.md @@ -0,0 +1 @@ +- Added light sdk ([\#2220](https://github.com/anoma/namada/pull/2220)) \ No newline at end of file diff --git a/.changelog/v0.29.0/SDK/2276-nicer-client-tx-result.md b/.changelog/v0.29.0/SDK/2276-nicer-client-tx-result.md new file mode 100644 index 0000000000..0d2d22586c --- /dev/null +++ b/.changelog/v0.29.0/SDK/2276-nicer-client-tx-result.md @@ -0,0 +1,2 @@ +- Improved the TxResponse type. + ([\#2276](https://github.com/anoma/namada/pull/2276)) \ No newline at end of file diff --git a/.changelog/v0.29.0/SDK/2282-masp-misc-fixes.md b/.changelog/v0.29.0/SDK/2282-masp-misc-fixes.md new file mode 100644 index 0000000000..abac63577f --- /dev/null +++ b/.changelog/v0.29.0/SDK/2282-masp-misc-fixes.md @@ -0,0 +1,2 @@ +- Removed useless epoch for fee unshielding. + ([\#2282](https://github.com/anoma/namada/pull/2282)) \ No newline at end of file diff --git a/.changelog/v0.29.0/SDK/2308-fix-ibc-gen-shielded.md b/.changelog/v0.29.0/SDK/2308-fix-ibc-gen-shielded.md new file mode 100644 index 0000000000..5e65bcff6f --- /dev/null +++ b/.changelog/v0.29.0/SDK/2308-fix-ibc-gen-shielded.md @@ -0,0 +1,2 @@ +- ibc-gen-shielded can set non-Namada token + ([\#2308](https://github.com/anoma/namada/issues/2308)) \ No newline at end of file diff --git a/.changelog/v0.29.0/SDK/2315-tx-expiration-update.md b/.changelog/v0.29.0/SDK/2315-tx-expiration-update.md new file mode 100644 index 0000000000..058aa005f9 --- /dev/null +++ b/.changelog/v0.29.0/SDK/2315-tx-expiration-update.md @@ -0,0 +1,2 @@ +- Updated `gen_shielded_transfer` to attach a sensible expiration to a MASP + `Transaction`. ([\#2315](https://github.com/anoma/namada/pull/2315)) \ No newline at end of file diff --git a/.changelog/v0.29.0/SDK/2321-ibc_shielded_transfer.md b/.changelog/v0.29.0/SDK/2321-ibc_shielded_transfer.md new file mode 100644 index 0000000000..a90b2c4075 --- /dev/null +++ b/.changelog/v0.29.0/SDK/2321-ibc_shielded_transfer.md @@ -0,0 +1,2 @@ +- ibc-transfer can set a spending key to the source + ([\#2321](https://github.com/anoma/namada/issues/2321)) \ No newline at end of file diff --git a/.changelog/v0.29.0/bug-fixes/2240-nullifier-uniqueness.md b/.changelog/v0.29.0/bug-fixes/2240-nullifier-uniqueness.md new file mode 100644 index 0000000000..dc80af96ac --- /dev/null +++ b/.changelog/v0.29.0/bug-fixes/2240-nullifier-uniqueness.md @@ -0,0 +1,2 @@ +- Prevents double-spending in masp by adding a nullifier set. + ([\#2240](https://github.com/anoma/namada/pull/2240)) \ No newline at end of file diff --git a/.changelog/v0.29.0/bug-fixes/2244-spend-description-validation.md b/.changelog/v0.29.0/bug-fixes/2244-spend-description-validation.md new file mode 100644 index 0000000000..c531894144 --- /dev/null +++ b/.changelog/v0.29.0/bug-fixes/2244-spend-description-validation.md @@ -0,0 +1,2 @@ +- Updates masp tx to store the notes and the native vp to validate them and the + anchors. ([\#2244](https://github.com/anoma/namada/pull/2244)) \ No newline at end of file diff --git a/.changelog/v0.29.0/bug-fixes/2248-convert-description-validation.md b/.changelog/v0.29.0/bug-fixes/2248-convert-description-validation.md new file mode 100644 index 0000000000..2f7b72ceb2 --- /dev/null +++ b/.changelog/v0.29.0/bug-fixes/2248-convert-description-validation.md @@ -0,0 +1,2 @@ +- Updates the masp vp to validate the convert description's anchor. + ([\#2248](https://github.com/anoma/namada/pull/2248)) \ No newline at end of file diff --git a/.changelog/v0.29.0/bug-fixes/2279-fix-tx-result-handle.md b/.changelog/v0.29.0/bug-fixes/2279-fix-tx-result-handle.md new file mode 100644 index 0000000000..8d63c81eab --- /dev/null +++ b/.changelog/v0.29.0/bug-fixes/2279-fix-tx-result-handle.md @@ -0,0 +1,2 @@ +- Client: Check that transaction is successful before taking further actions. + ([\#2279](https://github.com/anoma/namada/pull/2279)) \ No newline at end of file diff --git a/.changelog/v0.29.0/bug-fixes/2308-fix-ibc-gen-shielded.md b/.changelog/v0.29.0/bug-fixes/2308-fix-ibc-gen-shielded.md new file mode 100644 index 0000000000..9386818074 --- /dev/null +++ b/.changelog/v0.29.0/bug-fixes/2308-fix-ibc-gen-shielded.md @@ -0,0 +1,2 @@ +- Non-Namada token can be given to ibc-gen-shielded + ([\#2308](https://github.com/anoma/namada/issues/2308)) \ No newline at end of file diff --git a/.changelog/v0.29.0/bug-fixes/2310-wait-for-genesis-time.md b/.changelog/v0.29.0/bug-fixes/2310-wait-for-genesis-time.md new file mode 100644 index 0000000000..0d8c8ab34f --- /dev/null +++ b/.changelog/v0.29.0/bug-fixes/2310-wait-for-genesis-time.md @@ -0,0 +1 @@ + - Make the ledger wait for genesis before starting up any processes ([\#2310](https://github.com/anoma/namada/pull/2310)) \ No newline at end of file diff --git a/.changelog/v0.29.0/features/2255-add-dry-run-genesis-cli-command.md b/.changelog/v0.29.0/features/2255-add-dry-run-genesis-cli-command.md new file mode 100644 index 0000000000..79ba4abac3 --- /dev/null +++ b/.changelog/v0.29.0/features/2255-add-dry-run-genesis-cli-command.md @@ -0,0 +1,2 @@ +- A new client command has been added that takes a set of pre-genesis template files, validates them, +and runs them through init_chain. All errors are collected into a report. ([\#2255](https://github.com/anoma/namada/pull/2255)) \ No newline at end of file diff --git a/.changelog/v0.29.0/features/2260-wallet-cli-revamping-main-rebased.md b/.changelog/v0.29.0/features/2260-wallet-cli-revamping-main-rebased.md new file mode 100644 index 0000000000..a174f62c37 --- /dev/null +++ b/.changelog/v0.29.0/features/2260-wallet-cli-revamping-main-rebased.md @@ -0,0 +1,7 @@ +- The wallet CLI structure has been significantly reworked and simplified. + Alias argument is now obligatory for key generation / derivation + commands. Feature of raw (non-HD) key generation has been restored, + which was removed in the previous release. Key export / import + functionality for both transparent and shielded key kinds has been + implemented. Additionally, several other improvements have been made. + ([\#2260](https://github.com/anoma/namada/pull/2260)) \ No newline at end of file diff --git a/.changelog/v0.29.0/features/2321-ibc_shielded_transfer.md b/.changelog/v0.29.0/features/2321-ibc_shielded_transfer.md new file mode 100644 index 0000000000..1d6de75a7a --- /dev/null +++ b/.changelog/v0.29.0/features/2321-ibc_shielded_transfer.md @@ -0,0 +1,2 @@ +- IBC transfer from a spending key + ([\#2321](https://github.com/anoma/namada/issues/2321)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/1995-eth-tx-emits-events.md b/.changelog/v0.29.0/improvements/1995-eth-tx-emits-events.md new file mode 100644 index 0000000000..7ec43540cc --- /dev/null +++ b/.changelog/v0.29.0/improvements/1995-eth-tx-emits-events.md @@ -0,0 +1,2 @@ +- Emit Bridge pool transfer status update events from FinalizeBlock + ([\#1995](https://github.com/anoma/namada/pull/1995)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2127-basic-abstraction.md b/.changelog/v0.29.0/improvements/2127-basic-abstraction.md new file mode 100644 index 0000000000..8ae68fafc2 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2127-basic-abstraction.md @@ -0,0 +1,2 @@ +- Refactored module dealing with Tendermint configuration. + ([\#2127](https://github.com/anoma/namada/pull/2127)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2213-vp-less-permissive.md b/.changelog/v0.29.0/improvements/2213-vp-less-permissive.md new file mode 100644 index 0000000000..18ffa15100 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2213-vp-less-permissive.md @@ -0,0 +1,3 @@ +- The default implicit and established user account VPs now + require valid signature(s) for unknown storage changes. + ([\#2213](https://github.com/anoma/namada/pull/2213)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2222-masp-cross-epoch-proof-fix.md b/.changelog/v0.29.0/improvements/2222-masp-cross-epoch-proof-fix.md new file mode 100644 index 0000000000..bd8f0ac7b3 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2222-masp-cross-epoch-proof-fix.md @@ -0,0 +1,2 @@ +- Allowed the unshielding of previous epochs assets from the masp. + ([\#2222](https://github.com/anoma/namada/pull/2222)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2245-denominated-fee-amount.md b/.changelog/v0.29.0/improvements/2245-denominated-fee-amount.md new file mode 100644 index 0000000000..718bef820f --- /dev/null +++ b/.changelog/v0.29.0/improvements/2245-denominated-fee-amount.md @@ -0,0 +1,2 @@ +- Fee amounts in transaction wrappers are now denominated to facilitate hardware + wallet support. ([\#2245](https://github.com/anoma/namada/pull/2245)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2253-pos-crate-refactor.md b/.changelog/v0.29.0/improvements/2253-pos-crate-refactor.md new file mode 100644 index 0000000000..716e2166a5 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2253-pos-crate-refactor.md @@ -0,0 +1,2 @@ +- Refactor the PoS crate by breaking up the lib and tests code into smaller + files. ([\#2253](https://github.com/anoma/namada/pull/2253)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2259-strategy-constructors.md b/.changelog/v0.29.0/improvements/2259-strategy-constructors.md new file mode 100644 index 0000000000..a4b9a0bf76 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2259-strategy-constructors.md @@ -0,0 +1,2 @@ +- Made test vector generation easier and reduced the difficulty of maintaining + the generation code. ([\#2259](https://github.com/anoma/namada/pull/2259)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2276-nicer-client-tx-result.md b/.changelog/v0.29.0/improvements/2276-nicer-client-tx-result.md new file mode 100644 index 0000000000..f6b555e4fc --- /dev/null +++ b/.changelog/v0.29.0/improvements/2276-nicer-client-tx-result.md @@ -0,0 +1,2 @@ +- Client: Improved output of transaction results. + ([\#2276](https://github.com/anoma/namada/pull/2276)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2277-speed-up-tests.md b/.changelog/v0.29.0/improvements/2277-speed-up-tests.md new file mode 100644 index 0000000000..f15fc42ddd --- /dev/null +++ b/.changelog/v0.29.0/improvements/2277-speed-up-tests.md @@ -0,0 +1,2 @@ +- Enhances the speed of two PoS tests that run particularly longer than others + in CI. ([\#2277](https://github.com/anoma/namada/pull/2277)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2282-masp-misc-fixes.md b/.changelog/v0.29.0/improvements/2282-masp-misc-fixes.md new file mode 100644 index 0000000000..29ec4fe01e --- /dev/null +++ b/.changelog/v0.29.0/improvements/2282-masp-misc-fixes.md @@ -0,0 +1,2 @@ +- Removed useless epoch for fee unshielding and refactored tests. + ([\#2282](https://github.com/anoma/namada/pull/2282)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2288-refactor-ethbridge-crate.md b/.changelog/v0.29.0/improvements/2288-refactor-ethbridge-crate.md new file mode 100644 index 0000000000..33f77bf902 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2288-refactor-ethbridge-crate.md @@ -0,0 +1,2 @@ +- Refactor internal structure of the Ethereum bridge crate + ([\#2288](https://github.com/anoma/namada/pull/2288)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2294-move-gain-params-to-pos.md b/.changelog/v0.29.0/improvements/2294-move-gain-params-to-pos.md new file mode 100644 index 0000000000..54203f8ab4 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2294-move-gain-params-to-pos.md @@ -0,0 +1,2 @@ +- Move the pos inflation gain parameters to the PosParams. + ([\#2294](https://github.com/anoma/namada/pull/2294)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2295-refactor-apply-inflation.md b/.changelog/v0.29.0/improvements/2295-refactor-apply-inflation.md new file mode 100644 index 0000000000..95990a60a2 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2295-refactor-apply-inflation.md @@ -0,0 +1,2 @@ +- Move the inflation code for PoS and PGF into their own native modules. + ([\#2295](https://github.com/anoma/namada/pull/2295)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvements/2315-tx-expiration-update.md b/.changelog/v0.29.0/improvements/2315-tx-expiration-update.md new file mode 100644 index 0000000000..3259838811 --- /dev/null +++ b/.changelog/v0.29.0/improvements/2315-tx-expiration-update.md @@ -0,0 +1,2 @@ +- Improved validation on transaction's expiration. Added an expiration for MASP + transfers. ([\#2315](https://github.com/anoma/namada/pull/2315)) \ No newline at end of file diff --git a/.changelog/v0.29.0/improvments/2285-remove-hardcoded-masp-tokens.md b/.changelog/v0.29.0/improvments/2285-remove-hardcoded-masp-tokens.md new file mode 100644 index 0000000000..512ab72cb0 --- /dev/null +++ b/.changelog/v0.29.0/improvments/2285-remove-hardcoded-masp-tokens.md @@ -0,0 +1,5 @@ +- Previously, a hardcoded set of tokens were expected to be used in Masp conversions. + If these tokens did not have configs in genesis, this would lead to a panic after the first + epoch change. This PR fixes this to use the tokens found in genesis belonging to the MASP + rewards whitelist instead of hardcoding the tokens. + ([\#2285](https://github.com/anoma/namada/pull/2285)) \ No newline at end of file diff --git a/.changelog/v0.29.0/summary.md b/.changelog/v0.29.0/summary.md new file mode 100644 index 0000000000..c4b0a9a2e6 --- /dev/null +++ b/.changelog/v0.29.0/summary.md @@ -0,0 +1 @@ +Namada 0.29.0 is a minor release that introduces the light SDK, upgrades the MASP and CLI, and includes other fixes and refactors of the PoS, IBC, and Ethereum Birdge modules. diff --git a/.changelog/v0.29.0/testing/2218-update-consensus-key-change.md b/.changelog/v0.29.0/testing/2218-update-consensus-key-change.md new file mode 100644 index 0000000000..6c6daba7b8 --- /dev/null +++ b/.changelog/v0.29.0/testing/2218-update-consensus-key-change.md @@ -0,0 +1,2 @@ +- Added e2e test for change-consensus-key command. + ([\#2218](https://github.com/anoma/namada/pull/2218)) \ No newline at end of file diff --git a/.github/workflows/scripts/e2e.json b/.github/workflows/scripts/e2e.json index 6e736d16f1..8817c9b6d7 100644 --- a/.github/workflows/scripts/e2e.json +++ b/.github/workflows/scripts/e2e.json @@ -27,6 +27,7 @@ "e2e::ledger_tests::test_bond_queries": 95, "e2e::ledger_tests::suspend_ledger": 30, "e2e::ledger_tests::stop_ledger_at_height": 18, + "e2e::ledger_tests::change_consensus_key": 91, "e2e::wallet_tests::wallet_address_cmds": 1, "e2e::wallet_tests::wallet_encrypted_key_cmds": 1, "e2e::wallet_tests::wallet_encrypted_key_cmds_env_var": 1, diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b22829430..b6b6e31c22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,98 @@ # CHANGELOG +## v0.29.0 + +Namada 0.29.0 is a minor release that introduces the light SDK, upgrades the MASP and CLI, and includes other fixes and refactors of the PoS, IBC, and Ethereum Birdge modules. + +### BUG FIXES + +- Prevents double-spending in masp by adding a nullifier set. + ([\#2240](https://github.com/anoma/namada/pull/2240)) +- Updates masp tx to store the notes and the native vp to validate them and the + anchors. ([\#2244](https://github.com/anoma/namada/pull/2244)) +- Updates the masp vp to validate the convert description's anchor. + ([\#2248](https://github.com/anoma/namada/pull/2248)) +- Client: Check that transaction is successful before taking further actions. + ([\#2279](https://github.com/anoma/namada/pull/2279)) +- Non-Namada token can be given to ibc-gen-shielded + ([\#2308](https://github.com/anoma/namada/issues/2308)) + - Make the ledger wait for genesis before starting up any processes ([\#2310](https://github.com/anoma/namada/pull/2310)) + +### FEATURES + +- A new client command has been added that takes a set of pre-genesis template files, validates them, +and runs them through init_chain. All errors are collected into a report. ([\#2255](https://github.com/anoma/namada/pull/2255)) +- The wallet CLI structure has been significantly reworked and simplified. + Alias argument is now obligatory for key generation / derivation + commands. Feature of raw (non-HD) key generation has been restored, + which was removed in the previous release. Key export / import + functionality for both transparent and shielded key kinds has been + implemented. Additionally, several other improvements have been made. + ([\#2260](https://github.com/anoma/namada/pull/2260)) +- IBC transfer from a spending key + ([\#2321](https://github.com/anoma/namada/issues/2321)) + +### IMPROVEMENTS + +- Emit Bridge pool transfer status update events from FinalizeBlock + ([\#1995](https://github.com/anoma/namada/pull/1995)) +- Refactored module dealing with Tendermint configuration. + ([\#2127](https://github.com/anoma/namada/pull/2127)) +- The default implicit and established user account VPs now + require valid signature(s) for unknown storage changes. + ([\#2213](https://github.com/anoma/namada/pull/2213)) +- Allowed the unshielding of previous epochs assets from the masp. + ([\#2222](https://github.com/anoma/namada/pull/2222)) +- Fee amounts in transaction wrappers are now denominated to facilitate hardware + wallet support. ([\#2245](https://github.com/anoma/namada/pull/2245)) +- Refactor the PoS crate by breaking up the lib and tests code into smaller + files. ([\#2253](https://github.com/anoma/namada/pull/2253)) +- Made test vector generation easier and reduced the difficulty of maintaining + the generation code. ([\#2259](https://github.com/anoma/namada/pull/2259)) +- Client: Improved output of transaction results. + ([\#2276](https://github.com/anoma/namada/pull/2276)) +- Enhances the speed of two PoS tests that run particularly longer than others + in CI. ([\#2277](https://github.com/anoma/namada/pull/2277)) +- Removed useless epoch for fee unshielding and refactored tests. + ([\#2282](https://github.com/anoma/namada/pull/2282)) +- Refactor internal structure of the Ethereum bridge crate + ([\#2288](https://github.com/anoma/namada/pull/2288)) +- Move the pos inflation gain parameters to the PosParams. + ([\#2294](https://github.com/anoma/namada/pull/2294)) +- Move the inflation code for PoS and PGF into their own native modules. + ([\#2295](https://github.com/anoma/namada/pull/2295)) +- Improved validation on transaction's expiration. Added an expiration for MASP + transfers. ([\#2315](https://github.com/anoma/namada/pull/2315)) + +### IMPROVMENTS + +- Previously, a hardcoded set of tokens were expected to be used in Masp conversions. + If these tokens did not have configs in genesis, this would lead to a panic after the first + epoch change. This PR fixes this to use the tokens found in genesis belonging to the MASP + rewards whitelist instead of hardcoding the tokens. + ([\#2285](https://github.com/anoma/namada/pull/2285)) + +### SDK + +- Introduce a method to query the status (pending, relayed or expired) of Bridge + pool transfers ([\#1995](https://github.com/anoma/namada/pull/1995)) +- Added light sdk ([\#2220](https://github.com/anoma/namada/pull/2220)) +- Improved the TxResponse type. + ([\#2276](https://github.com/anoma/namada/pull/2276)) +- Removed useless epoch for fee unshielding. + ([\#2282](https://github.com/anoma/namada/pull/2282)) +- ibc-gen-shielded can set non-Namada token + ([\#2308](https://github.com/anoma/namada/issues/2308)) +- Updated `gen_shielded_transfer` to attach a sensible expiration to a MASP + `Transaction`. ([\#2315](https://github.com/anoma/namada/pull/2315)) +- ibc-transfer can set a spending key to the source + ([\#2321](https://github.com/anoma/namada/issues/2321)) + +### TESTING + +- Added e2e test for change-consensus-key command. + ([\#2218](https://github.com/anoma/namada/pull/2218)) + ## v0.28.2 Namada 0.28.2 is a patch release that fixes a stack overflow issue for nodes. @@ -130,7 +223,7 @@ Namada 0.27.0 is a minor release that incorporates the remaining essential proof can execute transactions that manipulate its own validator data ([\#2169](https://github.com/anoma/namada/pull/2169)) - Various improvements to the PoS code, including adding a panic on a slashing - failure, some more checked arithmetic, aesthetic code cleanup, and fixing a + failure, some more checked arithmetics, aesthetic code cleanup, and fixing a bug in is_delegator. ([\#2178](https://github.com/anoma/namada/pull/2178)) - Added type tags to transactions to enable hardware wallets to fully decode transactions even after minor Namada updates. @@ -279,7 +372,7 @@ Namada 0.24.0 is a minor release that introduces an SDK crate, PoS redelegation, data in storage. ([\#1944](https://github.com/anoma/namada/pull/1944)) - Query also IBC token balances ([\#1946](https://github.com/anoma/namada/issues/1946)) -- Increased resolution of gas accounting for signature verification. +- Increased resoultion of gas accounting for signature verification. ([\#1954](https://github.com/anoma/namada/pull/1954)) - Refactor benchmarks to avoid enabling `"testing`" and `"dev"`` features by default in the workspace. @@ -582,7 +675,7 @@ stability. show more info. ([\#1656](https://github.com/anoma/namada/pull/1656)) - Removed associated type on `masp::ShieldedUtils`. This type was an attempt to reduce the number of generic parameters needed when interacting - with MASP but resulted in making code reuse extremely difficult. + with MASP but resulted in making code re-use extremely difficult. ([\#1670](https://github.com/anoma/namada/pull/1670)) - Removed `impl From for EthBridgeVotingPower` and replaced it with a `TryFrom`. ([\#1692](https://github.com/anoma/namada/pull/1692)) @@ -597,7 +690,7 @@ stability. ETH bridge. ([\#1693](https://github.com/anoma/namada/pull/1693)) - PoS: Keep the data for last two epochs by default. ([\#1733](https://github.com/anoma/namada/pull/1733)) -- Refactored CLI into libraries for future reuse in integration tests and +- Refactored CLI into libraries for future re-use in integration tests and to enable generic IO. ([\#1738](https://github.com/anoma/namada/pull/1738)) ### TESTING @@ -757,7 +850,7 @@ Namada 0.17.2 is a minor release featuring improvements to the client stability. ([\#1512](https://github.com/anoma/namada/pull/1512)) - Improve help message for address add command ([\#1514](https://github.com/anoma/namada/issues/1514)) -- PoS: make a reusable bonds and unbonds details query. +- PoS: make a re-usable bonds and unbonds details query. ([\#1518](https://github.com/anoma/namada/pull/1518)) ## v0.17.1 @@ -809,7 +902,7 @@ wallet address derivation, transaction structure and the ledger stability. ([\#1425](https://github.com/anoma/namada/issues/1425)) - Added some missing cli option for cli wallet ([#1432](https://github.com/anoma/namada/pull/1432)) -- Improve logging error when submitting an invalid validator commission change tx +- Improve logging error when submiting an invalid validator commission change tx ([#1434](https://github.com/anoma/namada/pull/1434)) - Correct a typo in the error change commission error handling ([#1435](https://github.com/anoma/namada/pull/1435)) @@ -854,7 +947,7 @@ proposal. ### IMPROVEMENTS -- Make Tendermint consensus parameters configurable via Namada configuration. +- Make Tendermint consensus paramenters configurable via Namada configuration. ([#1399](https://github.com/anoma/namada/pull/1399)) - Improved error logs in `process_proposal` and added more info to `InternalStats` ([#1407](https://github.com/anoma/namada/pull/1407)) @@ -1379,7 +1472,7 @@ integrations. ### BUG FIXES -- Fix compatibility of IBC Acknowledgement message and FungibleTokenData with +- Fix compatiblity of IBC Acknowledgement message and FungibleTokenData with ibc-go ([#261](https://github.com/anoma/namada/pull/261)) - Fix the block header merkle root hash for response to finalizing block. ([#298](https://github.com/anoma/namada/pull/298)) @@ -1478,7 +1571,7 @@ Namada 0.8.0 is a regular minor release. ([#324](https://github.com/anoma/namada/pull/324)) - Added a StorageWrite trait for a common interface for transactions and direct storage access for protocol ([#331](https://github.com/anoma/namada/pull/331)) -- Reuse encoding/decoding storage write/read and handle any errors +- Re-use encoding/decoding storage write/read and handle any errors ([#334](https://github.com/anoma/namada/pull/334)) - Added a simpler prefix iterator API that returns `std::iter::Iterator` with the storage keys parsed and a variant that also decodes stored values with @@ -1486,12 +1579,12 @@ Namada 0.8.0 is a regular minor release. - Handles the case where a custom `$CARGO_TARGET_DIR` is set during WASM build ([#337](https://github.com/anoma/anoma/pull/337)) - Added `pre/post` methods into `trait VpEnv` that return objects implementing - `trait StorageRead` for reuse of library code written on top of `StorageRead` + `trait StorageRead` for re-use of library code written on top of `StorageRead` inside validity predicates. ([#380](https://github.com/anoma/namada/pull/380)) - Fix order of prefix iterator to be sorted by storage keys and add support for a reverse order prefix iterator. ([#409](https://github.com/anoma/namada/issues/409)) -- Reuse `storage_api::Error` type that supports wrapping custom error in `VpEnv` and `TxEnv` traits. +- Re-use `storage_api::Error` type that supports wrapping custom error in `VpEnv` and `TxEnv` traits. ([#465](https://github.com/anoma/namada/pull/465)) - Fixed governance parameters, tally, tx whitelist and renamed treasury ([#467](https://github.com/anoma/namada/issues/467)) @@ -1503,7 +1596,7 @@ Namada 0.8.0 is a regular minor release. - Added WASM transaction and validity predicate `Ctx` with methods for host environment functions to unify the interface of native VPs and WASM VPs under `trait VpEnv` ([#1093](https://github.com/anoma/anoma/pull/1093)) -- Allows simple retrieval of aliases from addresses in the wallet without +- Allows simple retrival of aliases from addresses in the wallet without the need for multiple hashmaps. This is the first step to improving the UI if one wants to show aliases when fetching addresses from anoma wallet ([#1138](https://github.com/anoma/anoma/pull/1138)) @@ -1760,7 +1853,7 @@ Anoma 0.5.0 is a scheduled minor release. - Dependency: Backport libp2p-noise patch that fixes a compilation issue from ([#908](https://github.com/anoma/anoma/issues/908)) -- Wasm: Re-add accidentally removed `tx_ibc` WASM and `vm_env::ibc` module +- Wasm: Re-add accidentaly removed `tx_ibc` WASM and `vm_env::ibc` module ([#916](https://github.com/anoma/anoma/pull/916)) - Ledger & Matchmaker: In "dev" chain with "dev" build, load WASM directly from the root `wasm` directory. ([#933](https://github.com/anoma/anoma/issues/933)) @@ -1900,7 +1993,7 @@ Anoma 0.4.0 is a scheduled minor release, released 31 January 2022. command. The command now doesn't unpack the network config archive into its default directories, if any of them are specified with non-default values. ([#813](https://github.com/anoma/anoma/issues/813)) -- Install the default token exchange matchmaker implementation into +- Install the default token exchange matchmaker implemenetation into `~/.cargo/lib` directory when building from source. When not absolute, the matchmaker will attempt to load the matchmaker from the same path as where the binary is being ran from, from `~/.cargo/lib` or the current working diff --git a/Cargo.lock b/Cargo.lock index 0e98645550..5504a26399 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2899,6 +2899,34 @@ dependencies = [ "cc", ] +[[package]] +name = "ibc" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "184eb22140cb4143bbcf7ddc8fdfeb9cc058ef73a6066f8ea78162e69d3565d1" +dependencies = [ + "bytes", + "derive_more", + "displaydoc", + "ibc-derive 0.3.0", + "ibc-proto 0.37.1", + "ics23", + "num-traits 0.2.17", + "primitive-types", + "prost 0.12.3", + "serde 1.0.193", + "serde-json-wasm", + "serde_derive", + "sha2 0.10.8", + "subtle-encoding", + "tendermint", + "tendermint-light-client-verifier", + "tendermint-proto", + "time", + "tracing", + "uint", +] + [[package]] name = "ibc" version = "0.48.1" @@ -2909,7 +2937,7 @@ dependencies = [ "ibc-clients", "ibc-core", "ibc-core-host-cosmos", - "ibc-derive", + "ibc-derive 0.4.0", "ibc-primitives", ] @@ -2933,7 +2961,7 @@ dependencies = [ "derive_more", "displaydoc", "ibc-core", - "ibc-proto", + "ibc-proto 0.38.0", "primitive-types", "serde 1.0.193", "uint", @@ -2978,7 +3006,7 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "tendermint", @@ -3041,7 +3069,7 @@ dependencies = [ "ibc-core-connection-types", "ibc-core-host-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "sha2 0.10.8", @@ -3076,7 +3104,7 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-handler-types", "ibc-core-host-types", - "ibc-derive", + "ibc-derive 0.4.0", "ibc-primitives", "prost 0.12.3", "subtle-encoding", @@ -3094,7 +3122,7 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "subtle-encoding", @@ -3110,7 +3138,7 @@ dependencies = [ "derive_more", "displaydoc", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "ics23", "prost 0.12.3", "serde 1.0.193", @@ -3143,7 +3171,7 @@ dependencies = [ "ibc-core-commitment-types", "ibc-core-host-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "subtle-encoding", @@ -3181,7 +3209,7 @@ dependencies = [ "ibc-core-host-types", "ibc-core-router-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "subtle-encoding", @@ -3225,7 +3253,7 @@ dependencies = [ "ibc-core-handler-types", "ibc-core-host-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "sha2 0.10.8", @@ -3271,7 +3299,7 @@ dependencies = [ "displaydoc", "ibc-core-host-types", "ibc-primitives", - "ibc-proto", + "ibc-proto 0.38.0", "ics23", "prost 0.12.3", "serde 1.0.193", @@ -3279,6 +3307,18 @@ dependencies = [ "tendermint", ] +[[package]] +name = "ibc-derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92f1528535e9ca495badb76c143bdd4763c1c9d987f59d1f8b47963ba0c11674" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "ibc-derive" version = "0.4.0" @@ -3299,13 +3339,29 @@ checksum = "d5edea4685267fd68514c87e7aa3a62712340c4cff6903f088a9ab571428a08a" dependencies = [ "derive_more", "displaydoc", - "ibc-proto", + "ibc-proto 0.38.0", "prost 0.12.3", "serde 1.0.193", "tendermint", "time", ] +[[package]] +name = "ibc-proto" +version = "0.37.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63042806bb2f662ca1c68026231900cfe13361136ddfd0dd09bcb315056a22b8" +dependencies = [ + "base64 0.21.5", + "bytes", + "flex-error", + "ics23", + "prost 0.12.3", + "serde 1.0.193", + "subtle-encoding", + "tendermint-proto", +] + [[package]] name = "ibc-proto" version = "0.38.0" @@ -3331,8 +3387,8 @@ dependencies = [ "bytes", "derive_more", "displaydoc", - "ibc", - "ibc-proto", + "ibc 0.48.1", + "ibc-proto 0.38.0", "parking_lot", "primitive-types", "prost 0.12.3", @@ -4122,7 +4178,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.28.2" +version = "0.29.0" dependencies = [ "assert_matches", "async-trait", @@ -4194,7 +4250,7 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.28.2" +version = "0.29.0" dependencies = [ "ark-serialize", "ark-std", @@ -4236,7 +4292,6 @@ dependencies = [ "namada", "namada_sdk", "namada_test_utils", - "num-derive", "num-rational 0.4.1", "num-traits 0.2.17", "num256", @@ -4285,11 +4340,12 @@ dependencies = [ [[package]] name = "namada_benchmarks" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", "criterion", + "masp_primitives", "namada", "namada_apps", "rand 0.8.5", @@ -4300,7 +4356,7 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.28.2" +version = "0.29.0" dependencies = [ "ark-bls12-381", "ark-serialize", @@ -4315,8 +4371,8 @@ dependencies = [ "ethabi", "ethbridge-structs", "eyre", - "ibc", - "ibc-derive", + "ibc 0.48.1", + "ibc-derive 0.4.0", "ibc-testkit", "ics23", "impl-num-traits", @@ -4325,6 +4381,7 @@ dependencies = [ "k256", "masp_primitives", "namada_macros", + "num-derive", "num-integer", "num-rational 0.4.1", "num-traits 0.2.17", @@ -4357,7 +4414,7 @@ dependencies = [ [[package]] name = "namada_encoding_spec" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "itertools 0.10.5", @@ -4368,7 +4425,7 @@ dependencies = [ [[package]] name = "namada_ethereum_bridge" -version = "0.28.2" +version = "0.29.0" dependencies = [ "assert_matches", "borsh", @@ -4391,9 +4448,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_examples" +version = "0.29.0" +dependencies = [ + "masp_proofs", + "namada_sdk", + "proptest", + "serde_json", + "tokio", +] + +[[package]] +name = "namada_light_sdk" +version = "0.29.0" +dependencies = [ + "borsh", + "borsh-ext", + "ibc 0.47.0", + "namada_core", + "namada_sdk", + "prost 0.12.3", + "tendermint-config", + "tendermint-rpc", + "tokio", +] + [[package]] name = "namada_macros" -version = "0.28.2" +version = "0.29.0" dependencies = [ "proc-macro2", "quote", @@ -4402,7 +4485,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.28.2" +version = "0.29.0" dependencies = [ "assert_matches", "borsh", @@ -4424,7 +4507,7 @@ dependencies = [ [[package]] name = "namada_sdk" -version = "0.28.2" +version = "0.29.0" dependencies = [ "assert_matches", "async-trait", @@ -4452,6 +4535,7 @@ dependencies = [ "owo-colors", "parse_duration", "paste", + "proptest", "prost 0.12.3", "rand 0.8.5", "rand_core 0.6.4", @@ -4474,7 +4558,7 @@ dependencies = [ [[package]] name = "namada_test_utils" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "namada_core", @@ -4483,7 +4567,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.28.2" +version = "0.29.0" dependencies = [ "assert_cmd", "async-trait", @@ -4534,7 +4618,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -4549,7 +4633,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "masp_primitives", @@ -4558,7 +4642,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", diff --git a/Cargo.toml b/Cargo.toml index b5c367036e..bd20510b10 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,17 +5,19 @@ members = [ "apps", "benches", "core", + "encoding_spec", "ethereum_bridge", + "light_sdk", + "macros", "proof_of_stake", + "sdk", "shared", "test_utils", "tests", "tx_prelude", "vm_env", - "macros", "vp_prelude", - "encoding_spec", - "sdk", + "examples", ] # wasm packages have to be built separately @@ -34,7 +36,7 @@ keywords = ["blockchain", "privacy", "crypto", "protocol", "network"] license = "GPL-3.0" readme = "README.md" repository = "https://github.com/anoma/namada" -version = "0.28.2" +version = "0.29.0" [workspace.dependencies] ark-bls12-381 = {version = "0.3"} diff --git a/apps/Cargo.toml b/apps/Cargo.toml index e01b4bac86..a03d6dabf6 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -102,7 +102,6 @@ masp_primitives = { workspace = true, features = ["transparent-inputs"] } masp_proofs = { workspace = true, features = ["bundled-prover", "download-params"] } num_cpus.workspace = true num256.workspace = true -num-derive.workspace = true num-rational.workspace = true num-traits.workspace = true once_cell.workspace = true diff --git a/apps/src/lib/bench_utils.rs b/apps/src/lib/bench_utils.rs index c793c84fc0..ddfae1a4cc 100644 --- a/apps/src/lib/bench_utils.rs +++ b/apps/src/lib/bench_utils.rs @@ -217,7 +217,8 @@ impl Default for BenchShell { source: Some(defaults::albert_address()), }; let params = - proof_of_stake::read_pos_params(&shell.wl_storage).unwrap(); + proof_of_stake::storage::read_pos_params(&shell.wl_storage) + .unwrap(); let mut bench_shell = BenchShell { inner: shell, tempdir, @@ -398,13 +399,14 @@ impl BenchShell { pub fn advance_epoch(&mut self) { let params = - proof_of_stake::read_pos_params(&self.inner.wl_storage).unwrap(); + proof_of_stake::storage::read_pos_params(&self.inner.wl_storage) + .unwrap(); self.wl_storage.storage.block.epoch = self.wl_storage.storage.block.epoch.next(); let current_epoch = self.wl_storage.storage.block.epoch; - proof_of_stake::copy_validator_sets_and_positions( + proof_of_stake::validator_set_update::copy_validator_sets_and_positions( &mut self.wl_storage, ¶ms, current_epoch, @@ -821,10 +823,7 @@ impl BenchShieldedCtx { source: TransferSource, target: TransferTarget, ) -> (Self, Tx) { - let denominated_amount = DenominatedAmount { - amount, - denom: 0.into(), - }; + let denominated_amount = DenominatedAmount::native(amount); let async_runtime = tokio::runtime::Runtime::new().unwrap(); let spending_key = self .wallet diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 384331a8be..fca330ec48 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -246,6 +246,7 @@ pub mod cmds { .subcommand(QueryAccount::def().display_order(5)) .subcommand(QueryTransfers::def().display_order(5)) .subcommand(QueryConversions::def().display_order(5)) + .subcommand(QueryMaspRewardTokens::def().display_order(5)) .subcommand(QueryBlock::def().display_order(5)) .subcommand(QueryBalance::def().display_order(5)) .subcommand(QueryBonds::def().display_order(5)) @@ -313,6 +314,8 @@ pub mod cmds { let query_transfers = Self::parse_with_ctx(matches, QueryTransfers); let query_conversions = Self::parse_with_ctx(matches, QueryConversions); + let query_masp_reward_tokens = + Self::parse_with_ctx(matches, QueryMaspRewardTokens); let query_block = Self::parse_with_ctx(matches, QueryBlock); let query_balance = Self::parse_with_ctx(matches, QueryBalance); let query_bonds = Self::parse_with_ctx(matches, QueryBonds); @@ -370,6 +373,7 @@ pub mod cmds { .or(query_epoch) .or(query_transfers) .or(query_conversions) + .or(query_masp_reward_tokens) .or(query_block) .or(query_balance) .or(query_bonds) @@ -456,6 +460,7 @@ pub mod cmds { QueryAccount(QueryAccount), QueryTransfers(QueryTransfers), QueryConversions(QueryConversions), + QueryMaspRewardTokens(QueryMaspRewardTokens), QueryBlock(QueryBlock), QueryBalance(QueryBalance), QueryBonds(QueryBonds), @@ -479,26 +484,58 @@ pub mod cmds { #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] pub enum NamadaWallet { - /// Key management commands - Key(WalletKey), - /// Address management commands - Address(WalletAddress), - /// MASP key, address management commands - Masp(WalletMasp), + /// Key generation + KeyGen(WalletGen), + /// Key derivation + KeyDerive(WalletDerive), + /// Payment address generation + PayAddrGen(WalletGenPaymentAddress), + /// Key / address list + KeyAddrList(WalletListKeysAddresses), + /// Key / address search + KeyAddrFind(WalletFindKeysAddresses), + /// Key export + KeyExport(WalletExportKey), + /// Key import + KeyImport(WalletImportKey), + /// Key / address add + KeyAddrAdd(WalletAddKeyAddress), + /// Key / address remove + KeyAddrRemove(WalletRemoveKeyAddress), } impl Cmd for NamadaWallet { fn add_sub(app: App) -> App { - app.subcommand(WalletKey::def()) - .subcommand(WalletAddress::def()) - .subcommand(WalletMasp::def()) + app.subcommand(WalletGen::def()) + .subcommand(WalletDerive::def()) + .subcommand(WalletGenPaymentAddress::def()) + .subcommand(WalletListKeysAddresses::def()) + .subcommand(WalletFindKeysAddresses::def()) + .subcommand(WalletExportKey::def()) + .subcommand(WalletImportKey::def()) + .subcommand(WalletAddKeyAddress::def()) + .subcommand(WalletRemoveKeyAddress::def()) } fn parse(matches: &ArgMatches) -> Option { - let key = SubCmd::parse(matches).map(Self::Key); - let address = SubCmd::parse(matches).map(Self::Address); - let masp = SubCmd::parse(matches).map(Self::Masp); - key.or(address).or(masp) + let gen = SubCmd::parse(matches).map(Self::KeyGen); + let derive = SubCmd::parse(matches).map(Self::KeyDerive); + let pay_addr_gen = SubCmd::parse(matches).map(Self::PayAddrGen); + let key_addr_list = SubCmd::parse(matches).map(Self::KeyAddrList); + let key_addr_find = SubCmd::parse(matches).map(Self::KeyAddrFind); + let export = SubCmd::parse(matches).map(Self::KeyExport); + let import = SubCmd::parse(matches).map(Self::KeyImport); + let key_addr_add = SubCmd::parse(matches).map(Self::KeyAddrAdd); + let key_addr_remove = + SubCmd::parse(matches).map(Self::KeyAddrRemove); + gen.or(derive) + .or(pay_addr_gen) + .or(key_addr_list) + .or(key_addr_find) + .or(export) + .or(import) + .or(key_addr_add) + .or(key_addr_remove) } } @@ -521,138 +558,129 @@ pub mod cmds { } } + /// In the transparent setting, generate a new keypair and an implicit + /// address derived from it. In the shielded setting, generate a new + /// spending key. #[derive(Clone, Debug)] - #[allow(clippy::large_enum_variant)] - pub enum WalletKey { - Derive(KeyDerive), - Gen(KeyGen), - Find(KeyFind), - List(KeyList), - Export(Export), - } + pub struct WalletGen(pub args::KeyGen); - impl SubCmd for WalletKey { - const CMD: &'static str = "key"; + impl SubCmd for WalletGen { + const CMD: &'static str = "gen"; fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).and_then(|matches| { - let generate = SubCmd::parse(matches).map(Self::Gen); - let restore = SubCmd::parse(matches).map(Self::Derive); - let lookup = SubCmd::parse(matches).map(Self::Find); - let list = SubCmd::parse(matches).map(Self::List); - let export = SubCmd::parse(matches).map(Self::Export); - generate.or(restore).or(lookup).or(list).or(export) - }) + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::KeyGen::parse(matches))) } fn def() -> App { App::new(Self::CMD) - .about( - "Keypair management, including methods to generate and \ - look-up keys.", + .about("Generates a new transparent / shielded secret key.") + .long_about( + "In the transparent setting, generates a keypair with a \ + given alias and derives the implicit address from its \ + public key. The address will be stored with the same \ + alias.\nIn the shielded setting, generates a new \ + spending key with a given alias.\nIn both settings, by \ + default, an HD-key with a default derivation path is \ + generated, with a random mnemonic code.", ) - .subcommand_required(true) - .arg_required_else_help(true) - .subcommand(KeyDerive::def()) - .subcommand(KeyGen::def()) - .subcommand(KeyFind::def()) - .subcommand(KeyList::def()) - .subcommand(Export::def()) + .add_args::() } } - /// Restore a keypair and implicit address from the mnemonic code + /// In the transparent setting, derive a keypair and implicit address from + /// the mnemonic code. + /// In the shielded setting, derive a spending key from the mnemonic code. #[derive(Clone, Debug)] - pub struct KeyDerive(pub args::KeyAndAddressDerive); + pub struct WalletDerive(pub args::KeyDerive); - impl SubCmd for KeyDerive { + impl SubCmd for WalletDerive { const CMD: &'static str = "derive"; fn parse(matches: &ArgMatches) -> Option { matches .subcommand_matches(Self::CMD) - .map(|matches| Self(args::KeyAndAddressDerive::parse(matches))) + .map(|matches| Self(args::KeyDerive::parse(matches))) } fn def() -> App { App::new(Self::CMD) .about( - "Derives a keypair from the given mnemonic code and HD \ - derivation path and derives the implicit address from \ - its public key. Stores the keypair and the address with \ - the given alias. A hardware wallet can be used, in which \ - case a private key is not derivable.", + "Derive transparent / shielded key from the mnemonic code \ + or a seed stored on the hardware wallet device.", + ) + .long_about( + "In the transparent setting, derives a keypair from the \ + given mnemonic code and HD derivation path and derives \ + the implicit address from its public key. Stores the \ + keypair and the address with the given alias.\nIn the \ + shielded setting, derives a spending key.\nA hardware \ + wallet can be used, in which case the private key is not \ + derivable.", ) - .add_args::() + .add_args::() } } - /// Generate a new keypair and an implicit address derived from it + /// List known keys and addresses #[derive(Clone, Debug)] - pub struct KeyGen(pub args::KeyAndAddressGen); + pub struct WalletListKeysAddresses(pub args::KeyAddressList); - impl SubCmd for KeyGen { - const CMD: &'static str = "gen"; + impl SubCmd for WalletListKeysAddresses { + const CMD: &'static str = "list"; fn parse(matches: &ArgMatches) -> Option { matches .subcommand_matches(Self::CMD) - .map(|matches| Self(args::KeyAndAddressGen::parse(matches))) + .map(|matches| (Self(args::KeyAddressList::parse(matches)))) } fn def() -> App { App::new(Self::CMD) - .about( - "Generates a keypair with a given alias and derives the \ - implicit address from its public key. The address will \ - be stored with the same alias.", + .about("List known keys and addresses in the wallet.") + .long_about( + "In the transparent setting, list known keypairs and \ + addresses.\nIn the shielded setting, list known spending \ + / viewing keys and payment addresses.", ) - .add_args::() + .add_args::() } } + /// Find known keys and addresses #[derive(Clone, Debug)] - pub struct KeyFind(pub args::KeyFind); + pub struct WalletFindKeysAddresses(pub args::KeyAddressFind); - impl SubCmd for KeyFind { + impl SubCmd for WalletFindKeysAddresses { const CMD: &'static str = "find"; fn parse(matches: &ArgMatches) -> Option { matches .subcommand_matches(Self::CMD) - .map(|matches| (Self(args::KeyFind::parse(matches)))) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Searches for a keypair from a public key or an alias.") - .add_args::() - } - } - - #[derive(Clone, Debug)] - pub struct KeyList(pub args::KeyList); - - impl SubCmd for KeyList { - const CMD: &'static str = "list"; - - fn parse(matches: &ArgMatches) -> Option { - matches - .subcommand_matches(Self::CMD) - .map(|matches| (Self(args::KeyList::parse(matches)))) + .map(|matches| Self(args::KeyAddressFind::parse(matches))) } fn def() -> App { App::new(Self::CMD) - .about("List all known keys.") - .add_args::() + .about("Find known keys and addresses in the wallet.") + .long_about( + "In the transparent setting, searches for a keypair / \ + address by a given alias, public key, or a public key \ + hash. Looks up an alias of the given address.\nIn the \ + shielded setting, searches for a spending / viewing key \ + and payment address by a given alias. Looks up an alias \ + of the given payment address.", + ) + .add_args::() } } + /// Export key to a file #[derive(Clone, Debug)] - pub struct Export(pub args::KeyExport); + pub struct WalletExportKey(pub args::KeyExport); - impl SubCmd for Export { + impl SubCmd for WalletExportKey { const CMD: &'static str = "export"; fn parse(matches: &ArgMatches) -> Option { @@ -663,320 +691,99 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) - .about("Exports a keypair to a file.") + .about( + "Exports a transparent keypair / shielded spending key to \ + a file.", + ) .add_args::() } } - #[allow(clippy::large_enum_variant)] + /// Import key from a file #[derive(Clone, Debug)] - pub enum WalletMasp { - GenPayAddr(MaspGenPayAddr), - GenSpendKey(MaspGenSpendKey), - AddAddrKey(MaspAddAddrKey), - ListPayAddrs, - ListKeys(MaspListKeys), - FindAddrKey(MaspFindAddrKey), - } + pub struct WalletImportKey(pub args::KeyImport); - impl SubCmd for WalletMasp { - const CMD: &'static str = "masp"; + impl SubCmd for WalletImportKey { + const CMD: &'static str = "import"; fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).and_then(|matches| { - let genpa = SubCmd::parse(matches).map(Self::GenPayAddr); - let gensk = SubCmd::parse(matches).map(Self::GenSpendKey); - let addak = SubCmd::parse(matches).map(Self::AddAddrKey); - let listpa = ::parse(matches) - .map(|_| Self::ListPayAddrs); - let listsk = SubCmd::parse(matches).map(Self::ListKeys); - let findak = SubCmd::parse(matches).map(Self::FindAddrKey); - gensk.or(genpa).or(addak).or(listpa).or(listsk).or(findak) - }) + matches + .subcommand_matches(Self::CMD) + .map(|matches| (Self(args::KeyImport::parse(matches)))) } fn def() -> App { App::new(Self::CMD) .about( - "Multi-asset shielded pool address and keypair management \ - including methods to generate and look-up addresses and \ - keys.", + "Imports a transparent keypair / shielded spending key \ + from a file.", ) - .subcommand_required(true) - .arg_required_else_help(true) - .subcommand(MaspGenSpendKey::def()) - .subcommand(MaspGenPayAddr::def()) - .subcommand(MaspAddAddrKey::def()) - .subcommand(MaspListPayAddrs::def()) - .subcommand(MaspListKeys::def()) - .subcommand(MaspFindAddrKey::def()) + .add_args::() } } - /// Find the given shielded address or key + /// Add public / payment address to the wallet #[derive(Clone, Debug)] - pub struct MaspFindAddrKey(pub args::AddrKeyFind); + pub struct WalletAddKeyAddress(pub args::KeyAddressAdd); - impl SubCmd for MaspFindAddrKey { - const CMD: &'static str = "find"; + impl SubCmd for WalletAddKeyAddress { + const CMD: &'static str = "add"; fn parse(matches: &ArgMatches) -> Option { matches .subcommand_matches(Self::CMD) - .map(|matches| Self(args::AddrKeyFind::parse(matches))) + .map(|matches| (Self(args::KeyAddressAdd::parse(matches)))) } fn def() -> App { App::new(Self::CMD) - .about("Find the given shielded address or key in the wallet") - .add_args::() + .about("Adds the given key or address to the wallet.") + .add_args::() } } - /// List all known shielded keys + /// Remove key / address #[derive(Clone, Debug)] - pub struct MaspListKeys(pub args::MaspKeysList); + pub struct WalletRemoveKeyAddress(pub args::KeyAddressRemove); - impl SubCmd for MaspListKeys { - const CMD: &'static str = "list-keys"; + impl SubCmd for WalletRemoveKeyAddress { + const CMD: &'static str = "remove"; fn parse(matches: &ArgMatches) -> Option { matches .subcommand_matches(Self::CMD) - .map(|matches| Self(args::MaspKeysList::parse(matches))) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Lists all shielded keys in the wallet") - .add_args::() - } - } - - /// List all known payment addresses - #[derive(Clone, Debug)] - pub struct MaspListPayAddrs; - - impl SubCmd for MaspListPayAddrs { - const CMD: &'static str = "list-addrs"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|_| Self) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Lists all payment addresses in the wallet") - } - } - - /// Add a key or an address - #[derive(Clone, Debug)] - pub struct MaspAddAddrKey(pub args::MaspAddrKeyAdd); - - impl SubCmd for MaspAddAddrKey { - const CMD: &'static str = "add"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - MaspAddAddrKey(args::MaspAddrKeyAdd::parse(matches)) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Adds the given payment address or key to the wallet") - .add_args::() - } - } - - /// Generate a spending key - #[derive(Clone, Debug)] - pub struct MaspGenSpendKey(pub args::MaspSpendKeyGen); - - impl SubCmd for MaspGenSpendKey { - const CMD: &'static str = "gen-key"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - MaspGenSpendKey(args::MaspSpendKeyGen::parse(matches)) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Generates a random spending key") - .add_args::() - } - } - - /// Generate a payment address from a viewing key or payment address - #[derive(Clone, Debug)] - pub struct MaspGenPayAddr(pub args::MaspPayAddrGen); - - impl SubCmd for MaspGenPayAddr { - const CMD: &'static str = "gen-addr"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - MaspGenPayAddr(args::MaspPayAddrGen::parse(matches)) - }) + .map(|matches| Self(args::KeyAddressRemove::parse(matches))) } fn def() -> App { App::new(Self::CMD) .about( - "Generates a payment address from the given spending key", + "Remove the given alias and all associated keys / \ + addresses from the wallet.", ) - .add_args::>() + .add_args::() } } + /// Generate a payment address from a viewing key or payment address #[derive(Clone, Debug)] - pub enum WalletAddress { - Gen(AddressGen), - Derive(AddressDerive), - Find(AddressOrAliasFind), - List, - Add(AddressAdd), - } - - impl SubCmd for WalletAddress { - const CMD: &'static str = "address"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).and_then(|matches| { - let gen = SubCmd::parse(matches).map(Self::Gen); - let restore = SubCmd::parse(matches).map(Self::Derive); - let find = SubCmd::parse(matches).map(Self::Find); - let list = - ::parse(matches).map(|_| Self::List); - let add = SubCmd::parse(matches).map(Self::Add); - gen.or(restore).or(find).or(list).or(add) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about( - "Address management, including methods to generate and \ - look-up addresses.", - ) - .subcommand_required(true) - .arg_required_else_help(true) - .subcommand(AddressGen::def()) - .subcommand(AddressDerive::def()) - .subcommand(AddressOrAliasFind::def()) - .subcommand(AddressList::def()) - .subcommand(AddressAdd::def()) - } - } - - /// Generate a new keypair and an implicit address derived from it - #[derive(Clone, Debug)] - pub struct AddressGen(pub args::KeyAndAddressGen); - - impl SubCmd for AddressGen { - const CMD: &'static str = "gen"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - AddressGen(args::KeyAndAddressGen::parse(matches)) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about( - "Generates a keypair with a given alias and derives the \ - implicit address from its public key. The address will \ - be stored with the same alias.", - ) - .add_args::() - } - } - - /// Restore a keypair and an implicit address from the mnemonic code - #[derive(Clone, Debug)] - pub struct AddressDerive(pub args::KeyAndAddressDerive); - - impl SubCmd for AddressDerive { - const CMD: &'static str = "derive"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - AddressDerive(args::KeyAndAddressDerive::parse(matches)) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about( - "Derives a keypair from the given mnemonic code and HD \ - derivation path and derives the implicit address from \ - its public key. Stores the keypair and the address with \ - the given alias. A hardware wallet can be used, in which \ - case a private key is not derivable.", - ) - .add_args::() - } - } - - /// Find an address by its alias - #[derive(Clone, Debug)] - pub struct AddressOrAliasFind(pub args::AddressOrAliasFind); - - impl SubCmd for AddressOrAliasFind { - const CMD: &'static str = "find"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|matches| { - AddressOrAliasFind(args::AddressOrAliasFind::parse(matches)) - }) - } - - fn def() -> App { - App::new(Self::CMD) - .about( - "Find an address by its alias or an alias by its address.", - ) - .add_args::() - } - } - - /// List known addresses - #[derive(Clone, Debug)] - pub struct AddressList; - - impl SubCmd for AddressList { - const CMD: &'static str = "list"; - - fn parse(matches: &ArgMatches) -> Option { - matches.subcommand_matches(Self::CMD).map(|_| Self) - } - - fn def() -> App { - App::new(Self::CMD).about("List all known addresses.") - } - } - - /// Generate a new keypair and an implicit address derived from it - #[derive(Clone, Debug)] - pub struct AddressAdd(pub args::AddressAdd); + pub struct WalletGenPaymentAddress(pub args::PayAddressGen); - impl SubCmd for AddressAdd { - const CMD: &'static str = "add"; + impl SubCmd for WalletGenPaymentAddress { + const CMD: &'static str = "gen-payment-addr"; fn parse(matches: &ArgMatches) -> Option { matches .subcommand_matches(Self::CMD) - .map(|matches| AddressAdd(args::AddressAdd::parse(matches))) + .map(|matches| Self(args::PayAddressGen::parse(matches))) } fn def() -> App { App::new(Self::CMD) - .about("Store an alias for an address in the wallet.") - .add_args::() + .about( + "Generates a payment address from the given spending key.", + ) + .add_args::>() } } @@ -1670,6 +1477,28 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct QueryMaspRewardTokens(pub args::Query); + + impl SubCmd for QueryMaspRewardTokens { + const CMD: &'static str = "masp-reward-tokens"; + + fn parse(matches: &ArgMatches) -> Option { + matches.subcommand_matches(Self::CMD).map(|matches| { + QueryMaspRewardTokens(args::Query::parse(matches)) + }) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Query the tokens which can earn MASP rewards while \ + shielded.", + ) + .add_args::>() + } + } + #[derive(Clone, Debug)] pub struct QueryBlock(pub args::Query); @@ -2043,7 +1872,7 @@ pub mod cmds { fn def() -> App { App::new(Self::CMD) - .about("Change commission raate.") + .about("Change commission rate.") .add_args::>() } } @@ -2208,6 +2037,7 @@ pub mod cmds { DefaultBaseDir(DefaultBaseDir), EpochSleep(EpochSleep), ValidateGenesisTemplates(ValidateGenesisTemplates), + TestGenesis(TestGenesis), SignGenesisTxs(SignGenesisTxs), } @@ -2240,6 +2070,8 @@ pub mod cmds { SubCmd::parse(matches).map(Self::ValidateGenesisTemplates); let genesis_tx = SubCmd::parse(matches).map(Self::SignGenesisTxs); + let test_genesis = + SubCmd::parse(matches).map(Self::TestGenesis); join_network .or(fetch_wasms) .or(validate_wasm) @@ -2252,6 +2084,7 @@ pub mod cmds { .or(default_base_dir) .or(epoch_sleep) .or(validate_genesis_templates) + .or(test_genesis) .or(genesis_tx) }) } @@ -2271,6 +2104,7 @@ pub mod cmds { .subcommand(DefaultBaseDir::def()) .subcommand(EpochSleep::def()) .subcommand(ValidateGenesisTemplates::def()) + .subcommand(TestGenesis::def()) .subcommand(SignGenesisTxs::def()) .subcommand_required(true) .arg_required_else_help(true) @@ -2460,6 +2294,28 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct TestGenesis(pub args::TestGenesis); + + impl SubCmd for TestGenesis { + const CMD: &'static str = "test-genesis"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| Self(args::TestGenesis::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about( + "Dry run genesis files and get a report on problems that \ + may be found.", + ) + .add_args::() + } + } + #[derive(Clone, Debug)] pub struct SignGenesisTxs(pub args::SignGenesisTxs); @@ -2947,7 +2803,7 @@ pub mod args { use namada::types::ethereum_events::EthAddress; use namada::types::keccak::KeccakHash; use namada::types::key::*; - use namada::types::masp::MaspValue; + use namada::types::masp::PaymentAddress; use namada::types::storage::{self, BlockHeight, Epoch}; use namada::types::time::DateTimeUtc; use namada::types::token; @@ -2996,9 +2852,11 @@ pub mod args { pub const BRIDGE_POOL_GAS_AMOUNT: ArgDefault = arg_default( "pool-gas-amount", - DefaultFn(|| token::DenominatedAmount { - amount: token::Amount::zero(), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), + DefaultFn(|| { + token::DenominatedAmount::new( + token::Amount::zero(), + NATIVE_MAX_DECIMAL_PLACES.into(), + ) }), ); pub const BRIDGE_POOL_GAS_PAYER: ArgOpt = @@ -3035,6 +2893,7 @@ pub mod args { pub const DESTINATION_VALIDATOR: Arg = arg("destination-validator"); pub const DISCORD_OPT: ArgOpt = arg_opt("discord-handle"); + pub const DO_IT: ArgFlag = flag("do-it"); pub const DONT_ARCHIVE: ArgFlag = flag("dont-archive"); pub const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); pub const DRY_RUN_TX: ArgFlag = flag("dry-run"); @@ -3060,6 +2919,7 @@ pub mod args { pub const FEE_AMOUNT_OPT: ArgOpt = arg_opt("gas-price"); pub const FEE_PAYER_OPT: ArgOpt = arg_opt("gas-payer"); + pub const FILE_PATH: Arg = arg("file"); pub const FORCE: ArgFlag = flag("force"); pub const GAS_LIMIT: ArgDefault = arg_default("gas-limit", DefaultFn(|| GasLimit::from(25_000))); @@ -3068,9 +2928,11 @@ pub mod args { pub const FEE_PAYER: Arg = arg("fee-payer"); pub const FEE_AMOUNT: ArgDefault = arg_default( "fee-amount", - DefaultFn(|| token::DenominatedAmount { - amount: token::Amount::default(), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), + DefaultFn(|| { + token::DenominatedAmount::new( + token::Amount::default(), + NATIVE_MAX_DECIMAL_PLACES.into(), + ) }), ); pub const GENESIS_BOND_SOURCE: ArgOpt = arg_opt("source"); @@ -3095,10 +2957,10 @@ pub mod args { let raw = "127.0.0.1:26657"; TendermintAddress::from_str(raw).unwrap() })); - pub const LEDGER_ADDRESS: Arg = arg("node"); + pub const LIST_FIND_ADDRESSES_ONLY: ArgFlag = flag("addr"); + pub const LIST_FIND_KEYS_ONLY: ArgFlag = flag("keys"); pub const LOCALHOST: ArgFlag = flag("localhost"); - pub const MASP_VALUE: Arg = arg("value"); pub const MAX_COMMISSION_RATE_CHANGE: Arg = arg("max-commission-rate-change"); pub const MAX_ETH_GAS: ArgOpt = arg_opt("max_eth-gas"); @@ -3137,9 +2999,16 @@ pub mod args { pub const RAW_ADDRESS: Arg
= arg("address"); pub const RAW_ADDRESS_ESTABLISHED: Arg = arg("address"); pub const RAW_ADDRESS_OPT: ArgOpt
= RAW_ADDRESS.opt(); + pub const RAW_KEY_GEN: ArgFlag = flag("raw"); + pub const RAW_PAYMENT_ADDRESS: Arg = arg("payment-address"); + pub const RAW_PAYMENT_ADDRESS_OPT: ArgOpt = + RAW_PAYMENT_ADDRESS.opt(); pub const RAW_PUBLIC_KEY: Arg = arg("public-key"); pub const RAW_PUBLIC_KEY_OPT: ArgOpt = - arg_opt("public-key"); + RAW_PUBLIC_KEY.opt(); + pub const RAW_PUBLIC_KEY_HASH: Arg = arg("public-key-hash"); + pub const RAW_PUBLIC_KEY_HASH_OPT: ArgOpt = + RAW_PUBLIC_KEY_HASH.opt(); pub const RECEIVER: Arg = arg("receiver"); pub const RELAYER: Arg
= arg("relayer"); pub const SAFE_MODE: ArgFlag = flag("safe-mode"); @@ -3148,6 +3017,7 @@ pub mod args { pub const SELF_BOND_AMOUNT: Arg = arg("self-bond-amount"); pub const SENDER: Arg = arg("sender"); + pub const SHIELDED: ArgFlag = flag("shielded"); pub const SIGNER: ArgOpt = arg_opt("signer"); pub const SIGNING_KEYS: ArgMulti = arg_multi("signing-keys"); @@ -3164,8 +3034,10 @@ pub mod args { pub const TM_ADDRESS: Arg = arg("tm-address"); pub const TOKEN_OPT: ArgOpt = TOKEN.opt(); pub const TOKEN: Arg = arg("token"); + pub const TOKEN_STR: Arg = arg("token"); pub const TRANSFER_SOURCE: Arg = arg("source"); pub const TRANSFER_TARGET: Arg = arg("target"); + pub const TRANSPARENT: ArgFlag = flag("transparent"); pub const TX_HASH: Arg = arg("tx-hash"); pub const THRESHOLD: ArgOpt = arg_opt("threshold"); pub const UNSAFE_DONT_ENCRYPT: ArgFlag = flag("unsafe-dont-encrypt"); @@ -3185,7 +3057,7 @@ pub mod args { arg_opt("eth-cold-key"); pub const VALIDATOR_ETH_HOT_KEY: ArgOpt = arg_opt("eth-hot-key"); - pub const VALUE: ArgOpt = arg_opt("value"); + pub const VALUE: Arg = arg("value"); pub const VIEWING_KEY: Arg = arg("key"); pub const VP: ArgOpt = arg_opt("vp"); pub const WALLET_ALIAS_FORCE: ArgFlag = flag("wallet-alias-force"); @@ -4025,7 +3897,7 @@ pub mod args { let chain_ctx = ctx.borrow_mut_chain_or_exit(); TxIbcTransfer:: { tx, - source: chain_ctx.get(&self.source), + source: chain_ctx.get_cached(&self.source), receiver: self.receiver, token: chain_ctx.get(&self.token), amount: self.amount, @@ -4042,7 +3914,7 @@ pub mod args { impl Args for TxIbcTransfer { fn parse(matches: &ArgMatches) -> Self { let tx = Tx::parse(matches); - let source = SOURCE.parse(matches); + let source = TRANSFER_SOURCE.parse(matches); let receiver = RECEIVER.parse(matches); let token = TOKEN.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); @@ -4500,7 +4372,7 @@ pub mod args { println!("Could not parse bond amount: {:?}", e); safe_exit(1); }) - .amount; + .amount(); let source = SOURCE_OPT.parse(matches); let tx_code_path = PathBuf::from(TX_BOND_WASM); Self { @@ -4550,7 +4422,7 @@ pub mod args { println!("Could not parse bond amount: {:?}", e); safe_exit(1); }) - .amount; + .amount(); let source = SOURCE_OPT.parse(matches); let tx_code_path = PathBuf::from(TX_UNBOND_WASM); Self { @@ -4676,7 +4548,7 @@ pub mod args { println!("Could not parse bond amount: {:?}", e); safe_exit(1); }) - .amount; + .amount(); let tx_code_path = PathBuf::from(TX_REDELEGATE_WASM); Self { tx, @@ -5412,6 +5284,7 @@ pub mod args { tx, validator: chain_ctx.get(&self.validator), consensus_key: self.consensus_key.map(|x| chain_ctx.get(&x)), + unsafe_dont_encrypt: self.unsafe_dont_encrypt, tx_code_path: self.tx_code_path.to_path_buf(), } } @@ -5422,11 +5295,13 @@ pub mod args { let tx = Tx::parse(matches); let validator = VALIDATOR.parse(matches); let consensus_key = VALIDATOR_CONSENSUS_KEY.parse(matches); + let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); let tx_code_path = PathBuf::from(TX_CHANGE_CONSENSUS_KEY_WASM); Self { tx, validator, consensus_key, + unsafe_dont_encrypt, tx_code_path, } } @@ -5440,6 +5315,10 @@ pub mod args { "The desired new consensus key. A new one will be \ generated if none given. Note this key must be ed25519.", )) + .arg(UNSAFE_DONT_ENCRYPT.def().help( + "UNSAFE: Do not encrypt the generated keypairs. Do not \ + use this for keys used in a live network.", + )) } } @@ -5654,7 +5533,7 @@ pub mod args { query, output_folder: self.output_folder, target: chain_ctx.get(&self.target), - token: chain_ctx.get(&self.token), + token: self.token, amount: self.amount, port_id: self.port_id, channel_id: self.channel_id, @@ -5667,7 +5546,7 @@ pub mod args { let query = Query::parse(matches); let output_folder = OUTPUT_FOLDER_PATH.parse(matches); let target = TRANSFER_TARGET.parse(matches); - let token = TOKEN.parse(matches); + let token = TOKEN_STR.parse(matches); let amount = InputAmount::Unvalidated(AMOUNT.parse(matches)); let port_id = PORT_ID.parse(matches); let channel_id = CHANNEL_ID.parse(matches); @@ -6144,71 +6023,8 @@ pub mod args { } } - impl Args for MaspAddrKeyAdd { - fn parse(matches: &ArgMatches) -> Self { - let alias = ALIAS.parse(matches); - let alias_force = ALIAS_FORCE.parse(matches); - let value = MASP_VALUE.parse(matches); - let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); - Self { - alias, - alias_force, - value, - unsafe_dont_encrypt, - } - } - - fn def(app: App) -> App { - app.arg( - ALIAS - .def() - .help("An alias to be associated with the new entry."), - ) - .arg(ALIAS_FORCE.def().help( - "Override the alias without confirmation if it already exists.", - )) - .arg( - MASP_VALUE - .def() - .help("A spending key, viewing key, or payment address."), - ) - .arg(UNSAFE_DONT_ENCRYPT.def().help( - "UNSAFE: Do not encrypt the keypair. Do not use this for keys \ - used in a live network.", - )) - } - } - - impl Args for MaspSpendKeyGen { - fn parse(matches: &ArgMatches) -> Self { - let alias = ALIAS.parse(matches); - let alias_force = ALIAS_FORCE.parse(matches); - let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); - Self { - alias, - alias_force, - unsafe_dont_encrypt, - } - } - - fn def(app: App) -> App { - app.arg( - ALIAS - .def() - .help("An alias to be associated with the spending key."), - ) - .arg(ALIAS_FORCE.def().help( - "Override the alias without confirmation if it already exists.", - )) - .arg(UNSAFE_DONT_ENCRYPT.def().help( - "UNSAFE: Do not encrypt the keypair. Do not use this for keys \ - used in a live network.", - )) - } - } - - impl CliToSdk> for MaspPayAddrGen { - fn to_sdk(self, ctx: &mut Context) -> MaspPayAddrGen { + impl CliToSdk> for PayAddressGen { + fn to_sdk(self, ctx: &mut Context) -> PayAddressGen { use namada_sdk::wallet::Wallet; use crate::wallet::CliWalletUtils; @@ -6232,7 +6048,7 @@ pub mod args { } else { find_viewing_key(&mut ctx.borrow_mut_chain_or_exit().wallet) }; - MaspPayAddrGen:: { + PayAddressGen:: { alias: self.alias, alias_force: self.alias_force, viewing_key, @@ -6241,7 +6057,7 @@ pub mod args { } } - impl Args for MaspPayAddrGen { + impl Args for PayAddressGen { fn parse(matches: &ArgMatches) -> Self { let alias = ALIAS.parse(matches); let alias_force = ALIAS_FORCE.parse(matches); @@ -6272,16 +6088,18 @@ pub mod args { } } - impl Args for KeyAndAddressDerive { + impl Args for KeyDerive { fn parse(matches: &ArgMatches) -> Self { let scheme = SCHEME.parse(matches); - let alias = ALIAS_OPT.parse(matches); + let shielded = SHIELDED.parse(matches); + let alias = ALIAS.parse(matches); let alias_force = ALIAS_FORCE.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); let use_device = USE_DEVICE.parse(matches); let derivation_path = HD_WALLET_DERIVATION_PATH.parse(matches); Self { scheme, + shielded, alias, alias_force, unsafe_dont_encrypt, @@ -6291,15 +6109,18 @@ pub mod args { } fn def(app: App) -> App { - app.arg(SCHEME.def().help( - "The type of key that should be added. Argument must be \ - either ed25519 or secp256k1. If none provided, the default \ - key scheme is ed25519.", - )) - .arg(ALIAS_OPT.def().help( - "The key and address alias. If none provided, the alias will \ - be the public key hash.", + app.arg(SCHEME.def().conflicts_with(SHIELDED.name).help( + "For the transparent pool, the type of key that should be \ + derived. Argument must be either ed25519 or secp256k1. If \ + none provided, the default key scheme is ed25519.\nNot \ + applicable for the shielded pool.", )) + .arg( + SHIELDED + .def() + .help("Derive a spending key for the shielded pool."), + ) + .arg(ALIAS.def().help("The key and address alias.")) .arg( ALIAS_FORCE .def() @@ -6324,15 +6145,19 @@ pub mod args { } } - impl Args for KeyAndAddressGen { + impl Args for KeyGen { fn parse(matches: &ArgMatches) -> Self { let scheme = SCHEME.parse(matches); - let alias = ALIAS_OPT.parse(matches); + let shielded = SHIELDED.parse(matches); + let raw = RAW_KEY_GEN.parse(matches); + let alias = ALIAS.parse(matches); let alias_force = ALIAS_FORCE.parse(matches); let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); let derivation_path = HD_WALLET_DERIVATION_PATH.parse(matches); Self { scheme, + shielded, + raw, alias, alias_force, unsafe_dont_encrypt, @@ -6341,15 +6166,27 @@ pub mod args { } fn def(app: App) -> App { - app.arg(SCHEME.def().help( - "The type of key that should be generated. Argument must be \ - either ed25519 or secp256k1. If none provided, the default \ - key scheme is ed25519.", - )) - .arg(ALIAS_OPT.def().help( - "The key and address alias. If none provided, the alias will \ - be the public key hash.", + app.arg(SCHEME.def().conflicts_with(SHIELDED.name).help( + "For the transparent pool, the type of key that should be \ + generated. Argument must be either ed25519 or secp256k1. If \ + none provided, the default key scheme is ed25519.\nNot \ + applicable for the shielded pool.", )) + .arg( + SHIELDED + .def() + .help("Generate a spending key for the shielded pool."), + ) + .arg( + RAW_KEY_GEN + .def() + .conflicts_with(HD_WALLET_DERIVATION_PATH.name) + .help( + "Generate a random non-HD secret / spending key. No \ + mnemonic code is generated.", + ), + ) + .arg(ALIAS.def().help("The key and address alias.")) .arg(ALIAS_FORCE.def().help( "Override the alias without confirmation if it already exists.", )) @@ -6358,115 +6195,186 @@ pub mod args { used in a live network.", )) .arg(HD_WALLET_DERIVATION_PATH.def().help( - "Generate a new key and wallet using BIP39 mnemonic code and \ - HD derivation path. Use keyword `default` to refer to a \ + "HD key derivation path. Use keyword `default` to refer to a \ scheme default path:\n- m/44'/60'/0'/0/0 for secp256k1 \ scheme\n- m/44'/877'/0'/0'/0' for ed25519 scheme.\nFor \ ed25519, all path indices will be promoted to hardened \ - indexes. If none specified, mnemonic code and derivation \ - path are not used.", + indexes. If none is specified, the scheme default path is \ + used.", )) } } - impl Args for KeyFind { + impl Args for KeyAddressList { fn parse(matches: &ArgMatches) -> Self { - let public_key = RAW_PUBLIC_KEY_OPT.parse(matches); - let alias = ALIAS_OPT.parse(matches); - let value = VALUE.parse(matches); + let transparent_only = TRANSPARENT.parse(matches); + let shielded_only = SHIELDED.parse(matches); + let keys_only = LIST_FIND_KEYS_ONLY.parse(matches); + let addresses_only = LIST_FIND_ADDRESSES_ONLY.parse(matches); + let decrypt = DECRYPT.parse(matches); let unsafe_show_secret = UNSAFE_SHOW_SECRET.parse(matches); - Self { - public_key, - alias, - value, + transparent_only, + shielded_only, + keys_only, + addresses_only, + decrypt, unsafe_show_secret, } } fn def(app: App) -> App { app.arg( - RAW_PUBLIC_KEY_OPT + TRANSPARENT .def() - .help("A public key associated with the keypair.") - .conflicts_with_all([ALIAS_OPT.name, VALUE.name]), + .help("List transparent keys / addresses only."), ) .arg( - ALIAS_OPT + SHIELDED .def() - .help("An alias associated with the keypair.") - .conflicts_with(VALUE.name), + .help("List keys / addresses of the shielded pool only."), ) - .arg( - VALUE - .def() - .help("A public key or alias associated with the keypair."), + .group( + ArgGroup::new("only_group_1") + .args([TRANSPARENT.name, SHIELDED.name]), ) + .arg(LIST_FIND_KEYS_ONLY.def().help("List keys only.")) + .arg(LIST_FIND_ADDRESSES_ONLY.def().help("List addresses only.")) + .group(ArgGroup::new("only_group_2").args([ + LIST_FIND_KEYS_ONLY.name, + LIST_FIND_ADDRESSES_ONLY.name, + ])) + .arg(DECRYPT.def().help("Decrypt keys that are encrypted.")) .arg( UNSAFE_SHOW_SECRET .def() - .help("UNSAFE: Print the secret key."), + .help("UNSAFE: Print the secret / spending keys."), ) } } - impl Args for AddrKeyFind { + impl Args for KeyAddressFind { fn parse(matches: &ArgMatches) -> Self { - let alias = ALIAS.parse(matches); + let alias = ALIAS_OPT.parse(matches); + let address = RAW_ADDRESS_OPT.parse(matches); + let public_key = RAW_PUBLIC_KEY_OPT.parse(matches); + let public_key_hash = RAW_PUBLIC_KEY_HASH_OPT.parse(matches); + let payment_address = RAW_PAYMENT_ADDRESS_OPT.parse(matches); + let keys_only = LIST_FIND_KEYS_ONLY.parse(matches); + let addresses_only = LIST_FIND_ADDRESSES_ONLY.parse(matches); + let decrypt = DECRYPT.parse(matches); let unsafe_show_secret = UNSAFE_SHOW_SECRET.parse(matches); Self { alias, + address, + public_key, + public_key_hash, + payment_address, + keys_only, + addresses_only, + decrypt, unsafe_show_secret, } } fn def(app: App) -> App { - app.arg(ALIAS.def().help("The alias that is to be found.")) - .arg( - UNSAFE_SHOW_SECRET - .def() - .help("UNSAFE: Print the spending key values."), - ) + app.arg( + ALIAS_OPT + .def() + .help("An alias associated with the keys / addresses."), + ) + .arg( + RAW_ADDRESS_OPT.def().help( + "The bech32m encoded string of a transparent address.", + ), + ) + .arg( + RAW_PUBLIC_KEY_OPT.def().help( + "A public key associated with the transparent keypair.", + ), + ) + .arg(RAW_PUBLIC_KEY_HASH_OPT.def().help( + "A public key hash associated with the transparent keypair.", + )) + .arg(RAW_PAYMENT_ADDRESS_OPT.def().help( + "The bech32m encoded string of a shielded payment address.", + )) + .group( + ArgGroup::new("addr_find_args") + .args([ + ALIAS_OPT.name, + RAW_ADDRESS_OPT.name, + RAW_PUBLIC_KEY_OPT.name, + RAW_PUBLIC_KEY_HASH_OPT.name, + RAW_PAYMENT_ADDRESS_OPT.name, + ]) + .required(true), + ) + .arg(LIST_FIND_KEYS_ONLY.def().help("Find keys only.")) + .arg(LIST_FIND_ADDRESSES_ONLY.def().help("Find addresses only.")) + .group(ArgGroup::new("only_group").args([ + LIST_FIND_KEYS_ONLY.name, + LIST_FIND_ADDRESSES_ONLY.name, + ])) + .arg(PRE_GENESIS.def().help( + "Use pre-genesis wallet, instead of for the current chain, if \ + any.", + )) + .arg(DECRYPT.def().help("Decrypt keys that are encrypted.")) + .arg( + UNSAFE_SHOW_SECRET + .def() + .help("UNSAFE: Print the secret / spending key."), + ) } } - impl Args for MaspKeysList { + impl Args for KeyAddressAdd { fn parse(matches: &ArgMatches) -> Self { - let decrypt = DECRYPT.parse(matches); - let unsafe_show_secret = UNSAFE_SHOW_SECRET.parse(matches); + let alias = ALIAS.parse(matches); + let alias_force = ALIAS_FORCE.parse(matches); + let value = VALUE.parse(matches); + let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { - decrypt, - unsafe_show_secret, + alias, + alias_force, + value, + unsafe_dont_encrypt, } } fn def(app: App) -> App { - app.arg(DECRYPT.def().help("Decrypt keys that are encrypted.")) - .arg( - UNSAFE_SHOW_SECRET - .def() - .help("UNSAFE: Print the spending key values."), - ) + app.arg( + ALIAS + .def() + .help("An alias to be associated with the new entry."), + ) + .arg(ALIAS_FORCE.def().help( + "Override the alias without confirmation if it already exists.", + )) + .arg(VALUE.def().help( + "Any value of the following:\n- transparent pool secret \ + key\n- transparent pool public key\n- transparent pool \ + address\n- shielded pool spending key\n- shielded pool \ + viewing key\n- shielded pool payment address ", + )) + .arg(UNSAFE_DONT_ENCRYPT.def().help( + "UNSAFE: Do not encrypt the added keys. Do not use this for \ + keys used in a live network.", + )) } } - impl Args for KeyList { + impl Args for KeyAddressRemove { fn parse(matches: &ArgMatches) -> Self { - let decrypt = DECRYPT.parse(matches); - let unsafe_show_secret = UNSAFE_SHOW_SECRET.parse(matches); - Self { - decrypt, - unsafe_show_secret, - } + let alias = ALIAS.parse(matches); + let do_it = DO_IT.parse(matches); + Self { alias, do_it } } fn def(app: App) -> App { - app.arg(DECRYPT.def().help("Decrypt keys that are encrypted.")) - .arg( - UNSAFE_SHOW_SECRET - .def() - .help("UNSAFE: Print the secret keys."), - ) + app.arg(ALIAS.def().help("An alias to be removed.")) + .arg(DO_IT.def().help("Confirm alias removal.").required(true)) } } @@ -6483,58 +6391,34 @@ pub mod args { } } - impl Args for AddressOrAliasFind { - fn parse(matches: &ArgMatches) -> Self { - let alias = ALIAS_OPT.parse(matches); - let address = RAW_ADDRESS_OPT.parse(matches); - Self { alias, address } - } - - fn def(app: App) -> App { - app.arg( - ALIAS_OPT - .def() - .help("An alias associated with the address."), - ) - .arg( - RAW_ADDRESS_OPT - .def() - .help("The bech32m encoded address string."), - ) - .group( - ArgGroup::new("find_flags") - .args([ALIAS_OPT.name, RAW_ADDRESS_OPT.name]) - .required(true), - ) - } - } - - impl Args for AddressAdd { + impl Args for KeyImport { fn parse(matches: &ArgMatches) -> Self { + let file_path = FILE_PATH.parse(matches); let alias = ALIAS.parse(matches); let alias_force = ALIAS_FORCE.parse(matches); - let address = RAW_ADDRESS.parse(matches); + let unsafe_dont_encrypt = UNSAFE_DONT_ENCRYPT.parse(matches); Self { alias, alias_force, - address, + file_path, + unsafe_dont_encrypt, } } fn def(app: App) -> App { - app.arg( - ALIAS - .def() - .help("An alias to be associated with the address."), - ) - .arg(ALIAS_FORCE.def().help( - "Override the alias without confirmation if it already exists.", + app.arg(FILE_PATH.def().help( + "Path to the file containing the key you wish to import.", )) + .arg(ALIAS.def().help("The alias assigned to the.")) .arg( - RAW_ADDRESS + ALIAS_FORCE .def() - .help("The bech32m encoded address string."), + .help("An alias to be associated with the imported entry."), ) + .arg(UNSAFE_DONT_ENCRYPT.def().help( + "UNSAFE: Do not encrypt the imported keys. Do not use this \ + for keys used in a live network.", + )) } } @@ -6936,6 +6820,32 @@ pub mod args { } } + #[derive(Clone, Debug)] + pub struct TestGenesis { + /// Templates dir + pub path: PathBuf, + pub wasm_dir: PathBuf, + } + + impl Args for TestGenesis { + fn parse(matches: &ArgMatches) -> Self { + let path = PATH.parse(matches); + let wasm_dir = WASM_DIR.parse(matches).unwrap_or_default(); + Self { path, wasm_dir } + } + + fn def(app: App) -> App { + app.arg( + PATH.def() + .help("Path to the directory with the template files."), + ) + .arg(WASM_DIR.def().help( + "Optional wasm directory to provide as part of verifying \ + genesis template files", + )) + } + } + #[derive(Clone, Debug)] pub struct SignGenesisTxs { pub path: PathBuf, diff --git a/apps/src/lib/cli/client.rs b/apps/src/lib/cli/client.rs index 9602f80cdd..601988a5f8 100644 --- a/apps/src/lib/cli/client.rs +++ b/apps/src/lib/cli/client.rs @@ -416,6 +416,16 @@ impl CliApi { let namada = ctx.to_sdk(client, io); rpc::query_conversions(&namada, args).await; } + Sub::QueryMaspRewardTokens(QueryMaspRewardTokens( + mut args, + )) => { + let client = client.unwrap_or_else(|| { + C::from_tendermint_address(&mut args.ledger_address) + }); + client.wait_until_node_is_synced(&io).await?; + let namada = ctx.to_sdk(client, io); + rpc::query_masp_reward_tokens(&namada).await; + } Sub::QueryBlock(QueryBlock(mut args)) => { let client = client.unwrap_or_else(|| { C::from_tendermint_address(&mut args.ledger_address) @@ -677,6 +687,9 @@ impl CliApi { Utils::ValidateGenesisTemplates(ValidateGenesisTemplates( args, )) => utils::validate_genesis_templates(global_args, args), + Utils::TestGenesis(TestGenesis(args)) => { + utils::test_genesis(args) + } Utils::SignGenesisTxs(SignGenesisTxs(args)) => { utils::sign_genesis_tx(global_args, args).await } diff --git a/apps/src/lib/cli/wallet.rs b/apps/src/lib/cli/wallet.rs index 088e204d57..c6de0d6c5c 100644 --- a/apps/src/lib/cli/wallet.rs +++ b/apps/src/lib/cli/wallet.rs @@ -12,15 +12,15 @@ use ledger_namada_rs::{BIP44Path, NamadaApp}; use ledger_transport_hid::hidapi::HidApi; use ledger_transport_hid::TransportNativeHID; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada::types::address::Address; +use namada::types::address::{Address, DecodeError}; use namada::types::io::Io; use namada::types::key::*; -use namada::types::masp::{MaspValue, PaymentAddress}; +use namada::types::masp::{ExtendedSpendingKey, MaspValue, PaymentAddress}; use namada_sdk::masp::find_valid_diversifier; use namada_sdk::wallet::{ DecryptionError, DerivationPath, DerivationPathError, FindKeyError, Wallet, }; -use namada_sdk::{display, display_line, edisplay_line}; +use namada_sdk::{display_line, edisplay_line}; use rand_core::OsRng; use crate::cli; @@ -39,143 +39,75 @@ impl CliApi { io: &impl Io, ) -> Result<()> { match cmd { - cmds::NamadaWallet::Key(sub) => match sub { - cmds::WalletKey::Derive(cmds::KeyDerive(args)) => { - key_and_address_derive(ctx, io, args).await - } - cmds::WalletKey::Gen(cmds::KeyGen(args)) => { - key_and_address_gen(ctx, io, args) - } - cmds::WalletKey::Find(cmds::KeyFind(args)) => { - key_find(ctx, io, args) - } - cmds::WalletKey::List(cmds::KeyList(args)) => { - key_list(ctx, io, args) - } - cmds::WalletKey::Export(cmds::Export(args)) => { - key_export(ctx, io, args) - } - }, - cmds::NamadaWallet::Address(sub) => match sub { - cmds::WalletAddress::Gen(cmds::AddressGen(args)) => { - key_and_address_gen(ctx, io, args) - } - cmds::WalletAddress::Derive(cmds::AddressDerive(args)) => { - key_and_address_derive(ctx, io, args).await - } - cmds::WalletAddress::Find(cmds::AddressOrAliasFind(args)) => { - address_or_alias_find(ctx, io, args) - } - cmds::WalletAddress::List => address_list(ctx, io), - cmds::WalletAddress::Add(cmds::AddressAdd(args)) => { - address_add(ctx, io, args) - } - }, - cmds::NamadaWallet::Masp(sub) => match sub { - cmds::WalletMasp::GenSpendKey(cmds::MaspGenSpendKey(args)) => { - spending_key_gen(ctx, io, args) - } - cmds::WalletMasp::GenPayAddr(cmds::MaspGenPayAddr(args)) => { - let args = args.to_sdk(&mut ctx); - payment_address_gen(ctx, io, args) - } - cmds::WalletMasp::AddAddrKey(cmds::MaspAddAddrKey(args)) => { - address_key_add(ctx, io, args) - } - cmds::WalletMasp::ListPayAddrs => { - payment_addresses_list(ctx, io) - } - cmds::WalletMasp::ListKeys(cmds::MaspListKeys(args)) => { - spending_keys_list(ctx, io, args) - } - cmds::WalletMasp::FindAddrKey(cmds::MaspFindAddrKey(args)) => { - address_key_find(ctx, io, args) - } - }, - } - Ok(()) - } -} - -/// Find shielded address or key -fn address_key_find( - ctx: Context, - io: &impl Io, - args::AddrKeyFind { - alias, - unsafe_show_secret, - }: args::AddrKeyFind, -) { - let mut wallet = load_wallet(ctx); - let alias = alias.to_lowercase(); - if let Ok(viewing_key) = wallet.find_viewing_key(&alias) { - // Check if alias is a viewing key - display_line!(io, "Viewing key: {}", viewing_key); - if unsafe_show_secret { - // Check if alias is also a spending key - match wallet.find_spending_key(&alias, None) { - Ok(spending_key) => { - display_line!(io, "Spending key: {}", spending_key) - } - Err(FindKeyError::KeyNotFound(_)) => {} - Err(err) => edisplay_line!(io, "{}", err), + cmds::NamadaWallet::KeyGen(cmds::WalletGen(args)) => { + key_gen(ctx, io, args) + } + cmds::NamadaWallet::KeyDerive(cmds::WalletDerive(args)) => { + key_derive(ctx, io, args).await + } + cmds::NamadaWallet::KeyAddrList(cmds::WalletListKeysAddresses( + args, + )) => key_address_list(ctx, io, args), + cmds::NamadaWallet::KeyAddrFind(cmds::WalletFindKeysAddresses( + args, + )) => key_address_find(ctx, io, args), + cmds::NamadaWallet::KeyExport(cmds::WalletExportKey(args)) => { + key_export(ctx, io, args) + } + cmds::NamadaWallet::KeyImport(cmds::WalletImportKey(args)) => { + key_import(ctx, io, args) + } + cmds::NamadaWallet::KeyAddrAdd(cmds::WalletAddKeyAddress(args)) => { + key_address_add(ctx, io, args) + } + cmds::NamadaWallet::KeyAddrRemove( + cmds::WalletRemoveKeyAddress(args), + ) => key_address_remove(ctx, io, args), + cmds::NamadaWallet::PayAddrGen(cmds::WalletGenPaymentAddress( + args, + )) => { + let args = args.to_sdk(&mut ctx); + payment_address_gen(ctx, io, args) } } - } else if let Some(payment_addr) = wallet.find_payment_addr(&alias) { - // Failing that, check if alias is a payment address - display_line!(io, "Payment address: {}", payment_addr); - } else { - // Otherwise alias cannot be referring to any shielded value - display_line!( - io, - "No shielded address or key with alias {} found. Use the commands \ - `masp list-addrs` and `masp list-keys` to see all the known \ - addresses and keys.", - alias.to_lowercase() - ); + Ok(()) } } -/// List spending keys. -fn spending_keys_list( - ctx: Context, +/// List shielded keys. +fn shielded_keys_list( + wallet: &Wallet, io: &impl Io, - args::MaspKeysList { - decrypt, - unsafe_show_secret, - }: args::MaspKeysList, + decrypt: bool, + unsafe_show_secret: bool, + show_hint: bool, ) { - let wallet = load_wallet(ctx); let known_view_keys = wallet.get_viewing_keys(); let known_spend_keys = wallet.get_spending_keys(); if known_view_keys.is_empty() { - display_line!( - io, - "No known keys. Try `masp add --alias my-addr --value ...` to add \ - a new key to the wallet.", - ); + if show_hint { + display_line!( + io, + "No known keys. Try `add --alias my-addr --value ...` to add \ + a new key to the wallet, or `gen --shielded --alias my-key` \ + to generate a new key.", + ); + } } else { - let stdout = io::stdout(); - let mut w = stdout.lock(); - display_line!(io, &mut w; "Known keys:").unwrap(); + let mut w_lock = io::stdout().lock(); + display_line!(io, &mut w_lock; "Known shielded keys:").unwrap(); for (alias, key) in known_view_keys { - display!(io, &mut w; " Alias \"{}\"", alias).unwrap(); let spending_key_opt = known_spend_keys.get(&alias); // If this alias is associated with a spending key, indicate whether // or not the spending key is encrypted - // TODO: consider turning if let into match - if let Some(spending_key) = spending_key_opt { - if spending_key.is_encrypted() { - display_line!(io, &mut w; " (encrypted):") - } else { - display_line!(io, &mut w; " (not encrypted):") - } - .unwrap(); - } else { - display_line!(io, &mut w; ":").unwrap(); - } + let encrypted_status = match spending_key_opt { + None => "external", + Some(spend_key) if spend_key.is_encrypted() => "encrypted", + _ => "not encrypted", + }; + display_line!(io, &mut w_lock; " Alias \"{}\" ({}):", alias, encrypted_status).unwrap(); // Always print the corresponding viewing key - display_line!(io, &mut w; " Viewing Key: {}", key).unwrap(); + display_line!(io, &mut w_lock; " Viewing Key: {}", key).unwrap(); // A subset of viewing keys will have corresponding spending keys. // Print those too if they are available and requested. if unsafe_show_secret { @@ -185,7 +117,7 @@ fn spending_keys_list( // decrypted Ok(spending_key) => { display_line!(io, - &mut w; + &mut w_lock; " Spending key: {}", spending_key, ) .unwrap(); @@ -199,7 +131,7 @@ fn spending_keys_list( // been provided Err(err) => { display_line!(io, - &mut w; + &mut w_lock; " Couldn't decrypt the spending key: {}", err, ) @@ -213,21 +145,26 @@ fn spending_keys_list( } /// List payment addresses. -fn payment_addresses_list(ctx: Context, io: &impl Io) { - let wallet = load_wallet(ctx); +fn payment_addresses_list( + wallet: &Wallet, + io: &impl Io, + show_hint: bool, +) { let known_addresses = wallet.get_payment_addrs(); if known_addresses.is_empty() { - display_line!( - io, - "No known payment addresses. Try `masp gen-addr --alias my-addr` \ - to generate a new payment address.", - ); + if show_hint { + display_line!( + io, + "No known payment addresses. Try `gen-payment-addr --alias \ + my-payment-addr` to generate a new payment address.", + ); + } } else { - let stdout = io::stdout(); - let mut w = stdout.lock(); - display_line!(io, &mut w; "Known payment addresses:").unwrap(); + let mut w_lock = io::stdout().lock(); + display_line!(io, &mut w_lock; "Known payment addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { - display_line!(io, &mut w; " \"{}\": {}", alias, address).unwrap(); + display_line!(io, &mut w_lock; " \"{}\": {}", alias, address) + .unwrap(); } } } @@ -236,18 +173,21 @@ fn payment_addresses_list(ctx: Context, io: &impl Io) { fn spending_key_gen( ctx: Context, io: &impl Io, - args::MaspSpendKeyGen { + args::KeyGen { alias, alias_force, unsafe_dont_encrypt, - }: args::MaspSpendKeyGen, + .. + }: args::KeyGen, ) { let mut wallet = load_wallet(ctx); let alias = alias.to_lowercase(); let password = read_and_confirm_encryption_password(unsafe_dont_encrypt); let (alias, _key) = wallet.gen_store_spending_key(alias, password, alias_force, &mut OsRng); - wallet.save().unwrap_or_else(|err| eprintln!("{}", err)); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( io, "Successfully added a spending key with alias: \"{}\"", @@ -259,13 +199,13 @@ fn spending_key_gen( fn payment_address_gen( ctx: Context, io: &impl Io, - args::MaspPayAddrGen { + args::PayAddressGen { alias, alias_force, viewing_key, pin, .. - }: args::MaspPayAddrGen, + }: args::PayAddressGen, ) { let mut wallet = load_wallet(ctx); let alias = alias.to_lowercase(); @@ -293,19 +233,17 @@ fn payment_address_gen( } /// Add a viewing key, spending key, or payment address to wallet. -fn address_key_add( +fn shielded_key_address_add( ctx: Context, io: &impl Io, - args::MaspAddrKeyAdd { - alias, - alias_force, - value, - unsafe_dont_encrypt, - }: args::MaspAddrKeyAdd, + alias: String, + alias_force: bool, + masp_value: MaspValue, + unsafe_dont_encrypt: bool, ) { let alias = alias.to_lowercase(); let mut wallet = load_wallet(ctx); - let (alias, typ) = match value { + let (alias, typ) = match masp_value { MaspValue::FullViewingKey(viewing_key) => { let alias = wallet .insert_viewing_key(alias, viewing_key, alias_force) @@ -369,17 +307,18 @@ pub fn decode_derivation_path( /// Derives a keypair and an implicit address from the mnemonic code in the /// wallet. -async fn key_and_address_derive( +async fn transparent_key_and_address_derive( ctx: Context, io: &impl Io, - args::KeyAndAddressDerive { + args::KeyDerive { scheme, alias, alias_force, unsafe_dont_encrypt, derivation_path, use_device, - }: args::KeyAndAddressDerive, + .. + }: args::KeyDerive, ) { let mut wallet = load_wallet(ctx); let derivation_path = decode_derivation_path(scheme, derivation_path) @@ -387,13 +326,14 @@ async fn key_and_address_derive( edisplay_line!(io, "{}", err); cli::safe_exit(1) }); + let alias = alias.to_lowercase(); let alias = if !use_device { let encryption_password = read_and_confirm_encryption_password(unsafe_dont_encrypt); wallet .derive_key_from_mnemonic_code( scheme, - alias, + Some(alias), alias_force, derivation_path, None, @@ -407,7 +347,7 @@ async fn key_and_address_derive( .0 } else { let hidapi = HidApi::new().unwrap_or_else(|err| { - edisplay_line!(io, "Failed to create Hidapi: {}", err); + edisplay_line!(io, "Failed to create HidApi: {}", err); cli::safe_exit(1) }); let app = NamadaApp::new( @@ -436,13 +376,12 @@ async fn key_and_address_derive( let pubkey = common::PublicKey::try_from_slice(&response.public_key) .expect("unable to decode public key from hardware wallet"); - let pkh = PublicKeyHash::from(&pubkey); let address = Address::from_str(&response.address_str) .expect("unable to decode address from hardware wallet"); wallet .insert_public_key( - alias.unwrap_or_else(|| pkh.to_string()), + alias, pubkey, Some(address), Some(derivation_path), @@ -465,50 +404,61 @@ async fn key_and_address_derive( /// Generate a new keypair and derive implicit address from it and store them in /// the wallet. -fn key_and_address_gen( +fn transparent_key_and_address_gen( ctx: Context, io: &impl Io, - args::KeyAndAddressGen { + args::KeyGen { scheme, + raw, alias, alias_force, unsafe_dont_encrypt, derivation_path, - }: args::KeyAndAddressGen, + .. + }: args::KeyGen, ) { + let alias = alias.to_lowercase(); let mut wallet = load_wallet(ctx); let encryption_password = read_and_confirm_encryption_password(unsafe_dont_encrypt); - let derivation_path = decode_derivation_path(scheme, derivation_path) + let alias = if raw { + wallet.gen_store_secret_key( + scheme, + Some(alias), + alias_force, + encryption_password, + &mut OsRng, + ) + } else { + let derivation_path = decode_derivation_path(scheme, derivation_path) + .unwrap_or_else(|err| { + edisplay_line!(io, "{}", err); + cli::safe_exit(1) + }); + let (_mnemonic, seed) = Wallet::::gen_hd_seed( + None, + &mut OsRng, + unsafe_dont_encrypt, + ) .unwrap_or_else(|err| { edisplay_line!(io, "{}", err); cli::safe_exit(1) }); - let mut rng = OsRng; - let (_mnemonic, seed) = Wallet::::gen_hd_seed( - None, - &mut rng, - unsafe_dont_encrypt, - ) - .unwrap_or_else(|err| { - edisplay_line!(io, "{}", err); - cli::safe_exit(1) - }); - let alias = wallet - .derive_store_hd_secret_key( + wallet.derive_store_hd_secret_key( scheme, - alias, + Some(alias), alias_force, seed, derivation_path, encryption_password, ) - .map(|x| x.0) - .unwrap_or_else(|err| { - eprintln!("{}", err); - println!("No changes are persisted. Exiting."); - cli::safe_exit(0); - }); + } + .map(|x| x.0) + .unwrap_or_else(|err| { + eprintln!("{}", err); + println!("No changes are persisted. Exiting."); + cli::safe_exit(0); + }); wallet .save() .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); @@ -519,22 +469,268 @@ fn key_and_address_gen( ); } -/// Find a keypair in the wallet store. -fn key_find( +/// Key generation +fn key_gen(ctx: Context, io: &impl Io, args_key_gen: args::KeyGen) { + if !args_key_gen.shielded { + transparent_key_and_address_gen(ctx, io, args_key_gen) + } else { + spending_key_gen(ctx, io, args_key_gen) + } +} + +/// HD key derivation from mnemonic code +async fn key_derive( + ctx: Context, + io: &impl Io, + args_key_derive: args::KeyDerive, +) { + if !args_key_derive.shielded { + transparent_key_and_address_derive(ctx, io, args_key_derive).await + } else { + todo!() + } +} + +/// List keys and addresses +fn key_address_list( + ctx: Context, + io: &impl Io, + args::KeyAddressList { + decrypt, + transparent_only, + shielded_only, + keys_only, + addresses_only, + unsafe_show_secret, + }: args::KeyAddressList, +) { + let wallet = load_wallet(ctx); + if !shielded_only { + if !addresses_only { + transparent_keys_list( + &wallet, + io, + decrypt, + unsafe_show_secret, + transparent_only && keys_only, + ) + } + if !keys_only { + transparent_addresses_list( + &wallet, + io, + transparent_only && addresses_only, + ) + } + } + + if !transparent_only { + if !addresses_only { + shielded_keys_list( + &wallet, + io, + decrypt, + unsafe_show_secret, + shielded_only && keys_only, + ) + } + if !keys_only { + payment_addresses_list(&wallet, io, shielded_only && addresses_only) + } + } +} + +/// Find keys and addresses +fn key_address_find( ctx: Context, io: &impl Io, - args::KeyFind { + args::KeyAddressFind { + alias, + address, public_key, + public_key_hash, + payment_address, + keys_only, + addresses_only, + decrypt, + unsafe_show_secret, + }: args::KeyAddressFind, +) { + if let Some(alias) = alias { + // Search keys and addresses by alias + let mut wallet = load_wallet(ctx); + let found_transparent = transparent_key_address_find_by_alias( + &mut wallet, + io, + alias.clone(), + keys_only, + addresses_only, + decrypt, + unsafe_show_secret, + ); + let found_shielded = shielded_key_address_find_by_alias( + &mut wallet, + io, + alias.clone(), + keys_only, + addresses_only, + decrypt, + unsafe_show_secret, + ); + if !found_transparent && !found_shielded { + display_line!(io, "Alias \"{}\" not found.", alias); + } + } else if address.is_some() { + // Search alias by address + transparent_address_or_alias_find(ctx, io, None, address) + } else if public_key.is_some() || public_key_hash.is_some() { + // Search transparent keypair by public key or public key hash + transparent_key_find( + ctx, + io, + None, + public_key, + public_key_hash, + unsafe_show_secret, + ) + } else if payment_address.is_some() { + // Search alias by MASP payment address + payment_address_or_alias_find(ctx, io, None, payment_address) + } +} + +#[derive(Debug)] +pub enum TransparentValue { + /// Transparent secret key + TranspSecretKey(common::SecretKey), + /// Transparent public key + TranspPublicKey(common::PublicKey), + /// Transparent address + TranspAddress(Address), +} + +impl FromStr for TransparentValue { + type Err = DecodeError; + + fn from_str(s: &str) -> Result { + // Try to decode this value first as a secret key, then as a public key, + // then as an address + common::SecretKey::from_str(s) + .map(Self::TranspSecretKey) + .or_else(|_| { + common::PublicKey::from_str(s).map(Self::TranspPublicKey) + }) + .or_else(|_| Address::from_str(s).map(Self::TranspAddress)) + } +} + +/// Value for wallet `add` command +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +pub enum KeyAddrAddValue { + /// Transparent value + TranspValue(TransparentValue), + /// Masp value + MASPValue(MaspValue), +} + +impl FromStr for KeyAddrAddValue { + type Err = DecodeError; + + fn from_str(s: &str) -> Result { + // Try to decode this value first as a transparent value, then as a MASP + // value + TransparentValue::from_str(s) + .map(Self::TranspValue) + .or_else(|_| MaspValue::from_str(s).map(Self::MASPValue)) + } +} + +fn add_key_or_address( + ctx: Context, + io: &impl Io, + alias: String, + alias_force: bool, + value: KeyAddrAddValue, + unsafe_dont_encrypt: bool, +) { + match value { + KeyAddrAddValue::TranspValue(TransparentValue::TranspSecretKey(sk)) => { + transparent_secret_key_add( + ctx, + io, + alias, + alias_force, + sk, + unsafe_dont_encrypt, + ) + } + KeyAddrAddValue::TranspValue(TransparentValue::TranspPublicKey( + pubkey, + )) => transparent_public_key_add(ctx, io, alias, alias_force, pubkey), + KeyAddrAddValue::TranspValue(TransparentValue::TranspAddress( + address, + )) => transparent_address_add(ctx, io, alias, alias_force, address), + KeyAddrAddValue::MASPValue(masp_value) => shielded_key_address_add( + ctx, + io, + alias, + alias_force, + masp_value, + unsafe_dont_encrypt, + ), + } +} + +/// Add key or address +fn key_address_add( + ctx: Context, + io: &impl Io, + args::KeyAddressAdd { alias, + alias_force, value, - unsafe_show_secret, - }: args::KeyFind, + unsafe_dont_encrypt, + .. + }: args::KeyAddressAdd, +) { + let value = KeyAddrAddValue::from_str(&value).unwrap_or_else(|err| { + edisplay_line!(io, "{}", err); + display_line!(io, "No changes are persisted. Exiting."); + cli::safe_exit(1) + }); + add_key_or_address(ctx, io, alias, alias_force, value, unsafe_dont_encrypt) +} + +/// Remove keys and addresses +fn key_address_remove( + ctx: Context, + io: &impl Io, + args::KeyAddressRemove { alias, .. }: args::KeyAddressRemove, +) { + let alias = alias.to_lowercase(); + let mut wallet = load_wallet(ctx); + wallet.remove_all_by_alias(alias.clone()); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); + display_line!(io, "Successfully removed alias: \"{}\"", alias); +} + +/// Find a keypair in the wallet store. +fn transparent_key_find( + ctx: Context, + io: &impl Io, + alias: Option, + public_key: Option, + public_key_hash: Option, + unsafe_show_secret: bool, ) { let mut wallet = load_wallet(ctx); let found_keypair = match public_key { Some(pk) => wallet.find_key_by_pk(&pk, None), None => { - let alias = alias.or(value); + let alias = alias.map(|a| a.to_lowercase()).or(public_key_hash); match alias { None => { edisplay_line!( @@ -544,9 +740,7 @@ fn key_find( ); cli::safe_exit(1) } - Some(alias) => { - wallet.find_secret_key(alias.to_lowercase(), None) - } + Some(alias) => wallet.find_secret_key(alias, None), } } }; @@ -565,27 +759,247 @@ fn key_find( } } -/// List all known keys. -fn key_list( +/// Find address (alias) by its alias (address). +fn transparent_address_or_alias_find( ctx: Context, io: &impl Io, - args::KeyList { - decrypt, - unsafe_show_secret, - }: args::KeyList, + alias: Option, + address: Option
, +) { + let wallet = load_wallet(ctx); + if address.is_some() && alias.is_some() { + panic!( + "This should not be happening: clap should emit its own error \ + message." + ); + } else if alias.is_some() { + let alias = alias.unwrap().to_lowercase(); + if let Some(address) = wallet.find_address(&alias) { + display_line!(io, "Found address {}", address.to_pretty_string()); + } else { + display_line!( + io, + "No address with alias {} found. Use the command `list \ + --addr` to see all the known transparent addresses.", + alias + ); + } + } else if address.is_some() { + if let Some(alias) = wallet.find_alias(address.as_ref().unwrap()) { + display_line!(io, "Found alias {}", alias); + } else { + display_line!( + io, + "No address with alias {} found. Use the command `list \ + --addr` to see all the known transparent addresses.", + address.unwrap() + ); + } + } +} + +/// Find payment address (alias) by its alias (payment address). +fn payment_address_or_alias_find( + ctx: Context, + io: &impl Io, + alias: Option, + payment_address: Option, ) { let wallet = load_wallet(ctx); + if payment_address.is_some() && alias.is_some() { + panic!( + "This should not be happening: clap should emit its own error \ + message." + ); + } else if alias.is_some() { + let alias = alias.unwrap().to_lowercase(); + if let Some(payment_addr) = wallet.find_payment_addr(&alias) { + display_line!(io, "Found payment address {}", payment_addr); + } else { + display_line!( + io, + "No payment address with alias {} found. Use the command \ + `list --shielded --addr` to see all the known payment \ + addresses.", + alias + ); + } + } else if payment_address.is_some() { + if let Some(alias) = + wallet.find_alias_by_payment_addr(payment_address.as_ref().unwrap()) + { + display_line!(io, "Found alias {}", alias); + } else { + display_line!( + io, + "No address with alias {} found. Use the command `list \ + --shielded --addr` to see all the known payment addresses.", + payment_address.unwrap() + ); + } + } +} + +/// Find transparent addresses and keys by alias +fn transparent_key_address_find_by_alias( + wallet: &mut Wallet, + io: &impl Io, + alias: String, + keys_only: bool, + addresses_only: bool, + decrypt: bool, + unsafe_show_secret: bool, +) -> bool { + let alias = alias.to_lowercase(); + let mut w_lock = io::stdout().lock(); + let mut found = false; + + // Find transparent keys + if !addresses_only { + // Check if alias is a public key + if let Ok(public_key) = wallet.find_public_key(&alias) { + found = true; + display_line!(io, &mut w_lock; "Found transparent keys:").unwrap(); + let encrypted = match wallet.is_encrypted_secret_key(&alias) { + None => "external", + Some(res) if res => "encrypted", + _ => "not encrypted", + }; + display_line!(io, + &mut w_lock; + " Alias \"{}\" ({}):", alias, encrypted, + ) + .unwrap(); + let pkh = PublicKeyHash::from(&public_key); + // Always print the public key and hash + display_line!(io, &mut w_lock; " Public key hash: {}", pkh) + .unwrap(); + display_line!( + io, + &mut w_lock; + " Public key: {}", + public_key + ) + .unwrap(); + if decrypt { + // Check if alias is also a secret key. Decrypt and print it if + // requested. + match wallet.find_secret_key(&alias, None) { + Ok(keypair) => { + if unsafe_show_secret { + display_line!(io, &mut w_lock; " Secret key: {}", keypair) + .unwrap(); + } + } + Err(FindKeyError::KeyNotFound(_)) => {} + Err(err) => edisplay_line!(io, "{}", err), + } + } + } + } + + // Find transparent address + if !keys_only { + if let Some(address) = wallet.find_address(&alias) { + found = true; + display_line!(io, &mut w_lock; "Found transparent address:") + .unwrap(); + display_line!(io, + &mut w_lock; + " \"{}\": {}", alias, address.to_pretty_string(), + ) + .unwrap(); + } + } + + found +} + +/// Find shielded payment address and keys by alias +fn shielded_key_address_find_by_alias( + wallet: &mut Wallet, + io: &impl Io, + alias: String, + keys_only: bool, + addresses_only: bool, + decrypt: bool, + unsafe_show_secret: bool, +) -> bool { + let alias = alias.to_lowercase(); + let mut w_lock = io::stdout().lock(); + let mut found = false; + + // Find shielded keys + if !addresses_only { + let encrypted = match wallet.is_encrypted_spending_key(&alias) { + None => "external", + Some(res) if res => "encrypted", + _ => "not encrypted", + }; + // Check if alias is a viewing key + if let Ok(viewing_key) = wallet.find_viewing_key(&alias) { + found = true; + display_line!(io, &mut w_lock; "Found shielded keys:").unwrap(); + display_line!(io, + &mut w_lock; + " Alias \"{}\" ({}):", alias, encrypted, + ) + .unwrap(); + // Always print the viewing key + display_line!(io, &mut w_lock; " Viewing key: {}", viewing_key) + .unwrap(); + if decrypt { + // Check if alias is also a spending key. Decrypt and print it + // if requested. + match wallet.find_spending_key(&alias, None) { + Ok(spending_key) => { + if unsafe_show_secret { + display_line!(io, &mut w_lock; " Spending key: {}", spending_key).unwrap(); + } + } + Err(FindKeyError::KeyNotFound(_)) => {} + Err(err) => edisplay_line!(io, "{}", err), + } + } + } + } + + // Find payment addresses + if !keys_only { + if let Some(payment_addr) = wallet.find_payment_addr(&alias) { + found = true; + display_line!(io, &mut w_lock; "Found payment address:").unwrap(); + display_line!(io, + &mut w_lock; + " \"{}\": {}", alias, payment_addr.to_string(), + ) + .unwrap(); + } + } + + found +} + +/// List all known keys. +fn transparent_keys_list( + wallet: &Wallet, + io: &impl Io, + decrypt: bool, + unsafe_show_secret: bool, + show_hint: bool, +) { let known_public_keys = wallet.get_public_keys(); if known_public_keys.is_empty() { - display_line!( - io, - "No known keys. Try `key gen --alias my-key` to generate a new \ - key.", - ); + if show_hint { + display_line!( + io, + "No known keys. Try `gen --alias my-key` to generate a new \ + key.", + ); + } } else { - let stdout = io::stdout(); - let mut w = stdout.lock(); - display_line!(io, &mut w; "Known keys:").unwrap(); + let mut w_lock = io::stdout().lock(); + display_line!(io, &mut w_lock; "Known transparent keys:").unwrap(); let known_secret_keys = wallet.get_secret_keys(); for (alias, public_key) in known_public_keys { let stored_keypair = known_secret_keys.get(&alias); @@ -599,19 +1013,22 @@ fn key_list( Some(_) => "not encrypted", }; display_line!(io, - &mut w; + &mut w_lock; " Alias \"{}\" ({}):", alias, encrypted, ) .unwrap(); - display_line!(io, &mut w; " Public key hash: {}", PublicKeyHash::from(&public_key)) + // Always print the corresponding public key and hash + display_line!(io, &mut w_lock; " Public key hash: {}", PublicKeyHash::from(&public_key)) .unwrap(); - display_line!(io, &mut w; " Public key: {}", public_key) + display_line!(io, &mut w_lock; " Public key: {}", public_key) .unwrap(); + // A subset of public keys will have corresponding secret keys. + // Print those too if they are available and requested. if let Some((stored_keypair, _pkh)) = stored_keypair { match stored_keypair.get::(decrypt, None) { Ok(keypair) if unsafe_show_secret => { display_line!(io, - &mut w; + &mut w_lock; " Secret key: {}", keypair, ) .unwrap(); @@ -622,7 +1039,7 @@ fn key_list( } Err(err) => { display_line!(io, - &mut w; + &mut w_lock; " Couldn't decrypt the keypair: {}", err, ) .unwrap(); @@ -633,20 +1050,25 @@ fn key_list( } } -/// Export a keypair to a file. +/// Export a transparent keypair / MASP spending key to a file. fn key_export( ctx: Context, io: &impl Io, args::KeyExport { alias }: args::KeyExport, ) { + let alias = alias.to_lowercase(); let mut wallet = load_wallet(ctx); - wallet - .find_secret_key(alias.to_lowercase(), None) - .map(|keypair| { - let file_data = keypair.serialize_to_vec(); - let file_name = format!("key_{}", alias.to_lowercase()); + let key_to_export = wallet + .find_secret_key(&alias, None) + .map(|sk| Box::new(sk) as Box) + .or(wallet + .find_spending_key(&alias, None) + .map(|spk| Box::new(spk) as Box)); + key_to_export + .map(|key| { + let file_data = key.serialize_to_vec(); + let file_name = format!("key_{}", alias); let mut file = File::create(&file_name).unwrap(); - file.write_all(file_data.as_ref()).unwrap(); display_line!(io, "Exported to file {}", file_name); }) @@ -656,23 +1078,71 @@ fn key_export( }) } -/// List all known addresses. -fn address_list(ctx: Context, io: &impl Io) { - let wallet = load_wallet(ctx); - let known_addresses = wallet.get_addresses(); - if known_addresses.is_empty() { - display_line!( +/// Import a transparent keypair / MASP spending key from a file. +fn key_import( + ctx: Context, + io: &impl Io, + args::KeyImport { + file_path, + alias, + alias_force, + unsafe_dont_encrypt, + }: args::KeyImport, +) { + let file_data = std::fs::read(file_path).unwrap_or_else(|err| { + edisplay_line!(io, "{}", err); + display_line!(io, "No changes are persisted. Exiting."); + cli::safe_exit(1) + }); + if let Ok(sk) = common::SecretKey::try_from_slice(&file_data) { + transparent_secret_key_add( + ctx, + io, + alias, + alias_force, + sk, + unsafe_dont_encrypt, + ); + } else if let Ok(spend_key) = + ExtendedSpendingKey::try_from_slice(&file_data) + { + let masp_value = MaspValue::ExtendedSpendingKey(spend_key); + shielded_key_address_add( + ctx, io, - "No known addresses. Try `address gen --alias my-addr` to \ - generate a new implicit address.", + alias, + alias_force, + masp_value, + unsafe_dont_encrypt, ); } else { - let stdout = io::stdout(); - let mut w = stdout.lock(); - display_line!(io, &mut w; "Known addresses:").unwrap(); + display_line!(io, "Could not parse the data."); + display_line!(io, "No changes are persisted. Exiting."); + cli::safe_exit(1) + } +} + +/// List all known transparent addresses. +fn transparent_addresses_list( + wallet: &Wallet, + io: &impl Io, + show_hint: bool, +) { + let known_addresses = wallet.get_addresses(); + if known_addresses.is_empty() { + if show_hint { + display_line!( + io, + "No known addresses. Try `gen --alias my-addr` to generate a \ + new implicit address.", + ); + } + } else { + let mut w_lock = io::stdout().lock(); + display_line!(io, &mut w_lock; "Known transparent addresses:").unwrap(); for (alias, address) in sorted(known_addresses) { display_line!(io, - &mut w; + &mut w_lock; " \"{}\": {}", alias, address.to_pretty_string(), ) .unwrap(); @@ -680,56 +1150,74 @@ fn address_list(ctx: Context, io: &impl Io) { } } -/// Find address (alias) by its alias (address). -fn address_or_alias_find( +/// Add a transparent secret key to the wallet. +fn transparent_secret_key_add( ctx: Context, io: &impl Io, - args::AddressOrAliasFind { alias, address }: args::AddressOrAliasFind, + alias: String, + alias_force: bool, + sk: common::SecretKey, + unsafe_dont_encrypt: bool, ) { - let wallet = load_wallet(ctx); - if address.is_some() && alias.is_some() { - panic!( - "This should not be happening: clap should emit its own error \ - message." - ); - } else if alias.is_some() { - if let Some(address) = wallet.find_address(alias.as_ref().unwrap()) { - display_line!(io, "Found address {}", address.to_pretty_string()); - } else { - display_line!( - io, - "No address with alias {} found. Use the command `address \ - list` to see all the known addresses.", - alias.unwrap().to_lowercase() - ); - } - } else if address.is_some() { - if let Some(alias) = wallet.find_alias(address.as_ref().unwrap()) { - display_line!(io, "Found alias {}", alias); - } else { - display_line!( - io, - "No alias with address {} found. Use the command `address \ - list` to see all the known addresses.", - address.unwrap() - ); - } + let mut wallet = load_wallet(ctx); + let encryption_password = + read_and_confirm_encryption_password(unsafe_dont_encrypt); + let alias = wallet + .insert_keypair(alias, alias_force, sk, encryption_password, None, None) + .unwrap_or_else(|err| { + edisplay_line!(io, "{}", err); + display_line!(io, "No changes are persisted. Exiting."); + cli::safe_exit(1); + }); + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); + display_line!( + io, + "Successfully added a key and an address with alias: \"{}\"", + alias + ); +} + +/// Add a public key to the wallet. +fn transparent_public_key_add( + ctx: Context, + io: &impl Io, + alias: String, + alias_force: bool, + pubkey: common::PublicKey, +) { + let alias = alias.to_lowercase(); + let mut wallet = load_wallet(ctx); + if wallet + .insert_public_key(alias.clone(), pubkey, None, None, alias_force) + .is_none() + { + edisplay_line!(io, "Public key not added"); + cli::safe_exit(1); } + wallet + .save() + .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); + display_line!( + io, + "Successfully added a public key with alias: \"{}\"", + alias + ); } -/// Add an address to the wallet. -fn address_add( +/// Add a transparent address to the wallet. +fn transparent_address_add( ctx: Context, io: &impl Io, - args::AddressAdd { - alias, - alias_force, - address, - }: args::AddressAdd, + alias: String, + alias_force: bool, + address: Address, ) { + let alias = alias.to_lowercase(); let mut wallet = load_wallet(ctx); if wallet - .insert_address(alias.to_lowercase(), address, alias_force) + .insert_address(&alias, address, alias_force) .is_none() { edisplay_line!(io, "Address not added"); @@ -740,8 +1228,8 @@ fn address_add( .unwrap_or_else(|err| edisplay_line!(io, "{}", err)); display_line!( io, - "Successfully added a key and an address with alias: \"{}\"", - alias.to_lowercase() + "Successfully added an address with alias: \"{}\"", + alias ); } diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index b4a5ab20c6..bc57f58f14 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -53,6 +53,7 @@ use namada_sdk::proof_of_stake::types::ValidatorMetaData; use namada_sdk::rpc::{ self, enriched_bonds_and_unbonds, query_epoch, TxResponse, }; +use namada_sdk::tx::{display_inner_resp, display_wrapper_resp_and_get_result}; use namada_sdk::wallet::AddressVpType; use namada_sdk::{display, display_line, edisplay_line, error, prompt, Namada}; use tokio::time::Instant; @@ -2276,22 +2277,6 @@ pub async fn query_find_validator( } } -/// Dry run a transaction -pub async fn dry_run_tx( - context: &N, - tx_bytes: Vec, -) -> Result<(), error::Error> -where - ::Error: std::fmt::Display, -{ - display_line!( - context.io(), - "Dry-run result: {}", - rpc::dry_run_tx(context, tx_bytes).await? - ); - Ok(()) -} - /// Get account's public key stored in its storage sub-space pub async fn get_public_key( client: &C, @@ -2428,6 +2413,17 @@ pub async fn query_conversion( namada_sdk::rpc::query_conversion(client, asset_type).await } +/// Query to read the tokens that earn masp rewards. +pub async fn query_masp_reward_tokens(context: &impl Namada) { + let tokens = namada_sdk::rpc::query_masp_reward_tokens(context.client()) + .await + .expect("The tokens that may earn MASP rewards should be defined"); + display_line!(context.io(), "The following tokens may ear MASP rewards:"); + for (alias, address) in tokens { + display_line!(context.io(), "{}: {}", alias, address); + } +} + /// Query a wasm code hash pub async fn query_wasm_code_hash( context: &impl Namada, @@ -2511,32 +2507,26 @@ pub async fn query_tx_response( /// blockchain. pub async fn query_result(context: &impl Namada, args: args::QueryResult) { // First try looking up application event pertaining to given hash. - let tx_response = query_tx_response( + let inner_resp = query_tx_response( context.client(), namada_sdk::rpc::TxEventQuery::Applied(&args.tx_hash), ) .await; - match tx_response { - Ok(result) => { - display_line!( - context.io(), - "Transaction was applied with result: {}", - serde_json::to_string_pretty(&result).unwrap() - ) + match inner_resp { + Ok(resp) => { + display_inner_resp(context, &resp); } Err(err1) => { // If this fails then instead look for an acceptance event. - let tx_response = query_tx_response( + let wrapper_resp = query_tx_response( context.client(), namada_sdk::rpc::TxEventQuery::Accepted(&args.tx_hash), ) .await; - match tx_response { - Ok(result) => display_line!( - context.io(), - "Transaction was accepted with result: {}", - serde_json::to_string_pretty(&result).unwrap() - ), + match wrapper_resp { + Ok(resp) => { + display_wrapper_resp_and_get_result(context, &resp); + } Err(err2) => { // Print the errors that caused the lookups to fail edisplay_line!(context.io(), "{}\n{}", err1, err2); diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index e1a0d5511c..f57cddd4eb 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -13,6 +13,7 @@ use namada::core::ledger::governance::cli::offline::{ use namada::core::ledger::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, ProposalVote, }; +use namada::core::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; use namada::ibc::apps::transfer::types::Memo; use namada::proto::{CompressedSignature, Section, Signer, Tx}; use namada::types::address::{Address, ImplicitAddress}; @@ -20,7 +21,7 @@ use namada::types::dec::Dec; use namada::types::io::Io; use namada::types::key::{self, *}; use namada::types::transaction::pos::{BecomeValidator, ConsensusKeyChange}; -use namada_sdk::rpc::{TxBroadcastData, TxResponse}; +use namada_sdk::rpc::{InnerTxResult, TxBroadcastData, TxResponse}; use namada_sdk::wallet::alias::validator_consensus_key; use namada_sdk::wallet::{Wallet, WalletIo}; use namada_sdk::{display_line, edisplay_line, error, signing, tx, Namada}; @@ -218,15 +219,11 @@ pub async fn submit_reveal_aux( "Submitting a tx to reveal the public key for address \ {address}..." ); - let (mut tx, signing_data, _epoch) = + let (mut tx, signing_data) = tx::build_reveal_pk(context, &args, &public_key).await?; - signing::generate_test_vector(context, &tx).await?; - sign(context, &mut tx, &args, signing_data).await?; - signing::generate_test_vector(context, &tx).await?; - context.submit(tx, &args).await?; } } @@ -239,9 +236,7 @@ pub async fn submit_bridge_pool_tx( args: args::EthereumBridgePool, ) -> Result<(), error::Error> { let tx_args = args.tx.clone(); - let (mut tx, signing_data, _epoch) = args.clone().build(namada).await?; - - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.clone().build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); @@ -250,8 +245,6 @@ pub async fn submit_bridge_pool_tx( sign(namada, &mut tx, &tx_args, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &tx_args).await?; } @@ -267,17 +260,13 @@ where { submit_reveal_aux(namada, args.tx.clone(), &args.owner).await?; - let (mut tx, signing_data, _epoch) = args.build(namada).await?; - - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -291,17 +280,13 @@ pub async fn submit_update_account( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _epoch) = args.build(namada).await?; - - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -315,21 +300,16 @@ pub async fn submit_init_account( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _epoch) = - tx::build_init_account(namada, &args).await?; - - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = tx::build_init_account(namada, &args).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - - let result = namada.submit(tx, &args.tx).await?; - if let ProcessTxResponse::Applied(response) = result { - return Ok(response.initialized_accounts.first().cloned()); + let response = namada.submit(tx, &args.tx).await?; + if let Some(result) = response.is_applied_and_valid() { + return Ok(result.initialized_accounts.first().cloned()); } } @@ -343,6 +323,7 @@ pub async fn submit_change_consensus_key( tx: tx_args, validator, consensus_key, + unsafe_dont_encrypt, tx_code_path: _, }: args::ConsensusKeyChange, ) -> Result<(), error::Error> { @@ -354,12 +335,8 @@ pub async fn submit_change_consensus_key( ..tx_args.clone() }; - // TODO: do I need to get the validator alias from somewhere, if it exists? - // // Don't think I should generate a new one... Should get the alias - // for the consensus key though... - - let wallet = namada.wallet().await; - + // Determine the alias for the new key + let mut wallet = namada.wallet_mut().await; let alias = wallet.find_alias(&validator).cloned(); let base_consensus_key_alias = alias .map(|al| validator_consensus_key(&al)) @@ -375,8 +352,8 @@ pub async fn submit_change_consensus_key( format!("{base_consensus_key_alias}-{key_counter}"); } - let mut wallet = namada.wallet_mut().await; - let consensus_key = consensus_key + // Check the given key or generate a new one + let new_key = consensus_key .map(|key| match key { common::PublicKey::Ed25519(_) => key, common::PublicKey::Secp256k1(_) => { @@ -389,7 +366,8 @@ pub async fn submit_change_consensus_key( }) .unwrap_or_else(|| { display_line!(namada.io(), "Generating new consensus key..."); - let password = read_and_confirm_encryption_password(false); + let password = + read_and_confirm_encryption_password(unsafe_dont_encrypt); wallet .gen_store_secret_key( // Note that TM only allows ed25519 for consensus key @@ -409,9 +387,8 @@ pub async fn submit_change_consensus_key( // Check that the new consensus key is unique let consensus_keys = rpc::query_consensus_keys(namada.client()).await; - let new_ck = consensus_key; - if consensus_keys.contains(&new_ck) { - edisplay_line!(namada.io(), "Consensus key can only be ed25519"); + if consensus_keys.contains(&new_key) { + edisplay_line!(namada.io(), "The consensus key is already being used."); safe_exit(1) } @@ -425,7 +402,7 @@ pub async fn submit_change_consensus_key( let data = ConsensusKeyChange { validator: validator.clone(), - consensus_key: new_ck, + consensus_key: new_key.clone(), }; tx.add_code_from_hash( @@ -433,7 +410,9 @@ pub async fn submit_change_consensus_key( Some(args::TX_CHANGE_CONSENSUS_KEY_WASM.to_string()), ) .add_data(data); - let signing_data = aux_signing_data(namada, &tx_args, None, None).await?; + + let signing_data = + init_validator_signing_data(namada, &tx_args, vec![new_key]).await?; tx::prepare_tx( namada, @@ -444,33 +423,28 @@ pub async fn submit_change_consensus_key( ) .await?; - signing::generate_test_vector(namada, &tx).await?; - if tx_args.dump_tx { tx::dump_tx(namada.io(), &tx_args, tx); } else { sign(namada, &mut tx, &tx_args, signing_data).await?; - namada.submit(tx, &tx_args).await?; + let resp = namada.submit(tx, &tx_args).await?; if !tx_args.dry_run { - namada - .wallet_mut() - .await - .save() - .unwrap_or_else(|err| edisplay_line!(namada.io(), "{}", err)); - - // let tendermint_home = config.ledger.cometbft_dir(); - // tendermint_node::write_validator_key( - // &tendermint_home, - // &consensus_key, - // ); - // tendermint_node::write_validator_state(tendermint_home); + if resp.is_applied_and_valid().is_some() { + namada.wallet_mut().await.save().unwrap_or_else(|err| { + edisplay_line!(namada.io(), "{}", err) + }); - display_line!( - namada.io(), - " Consensus key \"{}\"", - consensus_key_alias - ); + display_line!( + namada.io(), + "New consensus key stored with alias \ + \"{consensus_key_alias}\". It will become active \ + {EPOCH_SWITCH_BLOCKS_DELAY} blocks before pipeline \ + offset from the current epoch, at which point you'll \ + need to give the new key to CometBFT in order to be able \ + to sign with it in consensus.", + ); + } } else { display_line!( namada.io(), @@ -773,79 +747,80 @@ pub async fn submit_become_validator( ) .await?; - signing::generate_test_vector(namada, &tx).await?; - if tx_args.dump_tx { tx::dump_tx(namada.io(), &tx_args, tx); } else { sign(namada, &mut tx, &tx_args, signing_data).await?; - - signing::generate_test_vector(namada, &tx).await?; - - namada.submit(tx, &tx_args).await?.initialized_accounts(); + let resp = namada.submit(tx, &tx_args).await?; if !tx_args.dry_run { - // add validator address and keys to the wallet - let mut wallet = namada.wallet_mut().await; - wallet.add_validator_data(address.clone(), validator_keys); - wallet - .save() - .unwrap_or_else(|err| edisplay_line!(namada.io(), "{}", err)); - - let tendermint_home = config.ledger.cometbft_dir(); - tendermint_node::write_validator_key( - &tendermint_home, - &wallet - .find_key_by_pk(&consensus_key, None) - .expect("unable to find consensus key pair in the wallet"), - ); - // To avoid wallet deadlocks in following operations - drop(wallet); - tendermint_node::write_validator_state(tendermint_home); - - // Write Namada config stuff or figure out how to do the above - // tendermint_node things two epochs in the future!!! - config.ledger.shell.tendermint_mode = TendermintMode::Validator; - config - .write( - &config.ledger.shell.base_dir, - &config.ledger.chain_id, - true, + if resp.is_applied_and_valid().is_some() { + // add validator address and keys to the wallet + let mut wallet = namada.wallet_mut().await; + wallet.add_validator_data(address.clone(), validator_keys); + wallet.save().unwrap_or_else(|err| { + edisplay_line!(namada.io(), "{}", err) + }); + + let tendermint_home = config.ledger.cometbft_dir(); + tendermint_node::write_validator_key( + &tendermint_home, + &wallet.find_key_by_pk(&consensus_key, None).expect( + "unable to find consensus key pair in the wallet", + ), ) .unwrap(); + // To avoid wallet deadlocks in following operations + drop(wallet); + tendermint_node::write_validator_state(tendermint_home) + .unwrap(); + + // Write Namada config stuff or figure out how to do the above + // tendermint_node things two epochs in the future!!! + config.ledger.shell.tendermint_mode = TendermintMode::Validator; + config + .write( + &config.ledger.shell.base_dir, + &config.ledger.chain_id, + true, + ) + .unwrap(); - let pos_params = rpc::query_pos_parameters(namada.client()).await; + let pos_params = + rpc::query_pos_parameters(namada.client()).await; - display_line!(namada.io(), ""); - display_line!( - namada.io(), - "The keys for validator \"{alias}\" were stored in the wallet:" - ); - display_line!( - namada.io(), - " Validator account key \"{}\"", - validator_key_alias - ); - display_line!( - namada.io(), - " Consensus key \"{}\"", - consensus_key_alias - ); - display_line!( - namada.io(), - "The ledger node has been setup to use this validator's \ - address and consensus key." - ); - display_line!( - namada.io(), - "Your validator will be active in {} epochs. Be sure to \ - restart your node for the changes to take effect!", - pos_params.pipeline_len - ); + display_line!(namada.io(), ""); + display_line!( + namada.io(), + "The keys for validator \"{alias}\" were stored in the \ + wallet:" + ); + display_line!( + namada.io(), + " Validator account key \"{}\"", + validator_key_alias + ); + display_line!( + namada.io(), + " Consensus key \"{}\"", + consensus_key_alias + ); + display_line!( + namada.io(), + "The ledger node has been setup to use this validator's \ + address and consensus key." + ); + display_line!( + namada.io(), + "Your validator will be active in {} epochs. Be sure to \ + restart your node for the changes to take effect!", + pos_params.pipeline_len + ); + } } else { display_line!( namada.io(), - "Transaction dry run. No addresses have been saved." + "Transaction dry run. No key or addresses have been saved." ); } } @@ -942,7 +917,6 @@ pub async fn submit_transfer( let (mut tx, signing_data, tx_epoch) = args.clone().build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); @@ -950,27 +924,25 @@ pub async fn submit_transfer( } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - let result = namada.submit(tx, &args.tx).await?; - let submission_epoch = rpc::query_and_print_epoch(namada).await; - match result { ProcessTxResponse::Applied(resp) if - // If a transaction is shielded + // If a transaction is shielded tx_epoch.is_some() && - // And it is rejected by a VP - resp.code == 1.to_string() && - // And its submission epoch doesn't match construction epoch - tx_epoch.unwrap() != submission_epoch => + // And it is rejected by a VP + matches!(resp.inner_tx_result(), InnerTxResult::VpsRejected(_)) => { - // Then we probably straddled an epoch boundary. Let's retry... - edisplay_line!(namada.io(), - "MASP transaction rejected and this may be due to the \ - epoch changing. Attempting to resubmit transaction.", - ); - continue; + let submission_epoch = rpc::query_and_print_epoch(namada).await; + // And its submission epoch doesn't match construction epoch + if tx_epoch.unwrap() != submission_epoch { + // Then we probably straddled an epoch boundary. Let's retry... + edisplay_line!(namada.io(), + "MASP transaction rejected and this may be due to the \ + epoch changing. Attempting to resubmit transaction.", + ); + continue; + } }, // Otherwise either the transaction was successful or it will not // benefit from resubmission @@ -989,19 +961,23 @@ pub async fn submit_ibc_transfer( where ::Error: std::fmt::Display, { - submit_reveal_aux(namada, args.tx.clone(), &args.source).await?; - let (mut tx, signing_data, _epoch) = args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + submit_reveal_aux( + namada, + args.tx.clone(), + &args.source.effective_address(), + ) + .await?; + let (mut tx, signing_data, _) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } + // NOTE that the tx could fail when its submission epoch doesn't match + // construction epoch Ok(()) } @@ -1016,8 +992,7 @@ where let current_epoch = rpc::query_and_print_epoch(namada).await; let governance_parameters = rpc::query_governance_parameters(namada.client()).await; - let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline - { + let (mut tx_builder, signing_data) = if args.is_offline { let proposal = OfflineProposal::try_from(args.proposal_data.as_ref()) .map_err(|e| { error::TxError::FailedGovernaneProposalDeserialize( @@ -1126,15 +1101,12 @@ where tx::build_default_proposal(namada, &args, proposal).await? }; - signing::generate_test_vector(namada, &tx_builder).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx_builder); } else { sign(namada, &mut tx_builder, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx_builder).await?; - namada.submit(tx_builder, &args.tx).await?; } @@ -1148,8 +1120,7 @@ pub async fn submit_vote_proposal( where ::Error: std::fmt::Display, { - let (mut tx_builder, signing_data, _fee_unshield_epoch) = if args.is_offline - { + let (mut tx_builder, signing_data) = if args.is_offline { let default_signer = Some(args.voter.clone()); let signing_data = aux_signing_data( namada, @@ -1212,15 +1183,12 @@ where } else { args.build(namada).await? }; - signing::generate_test_vector(namada, &tx_builder).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx_builder); } else { sign(namada, &mut tx_builder, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx_builder).await?; - namada.submit(tx_builder, &args.tx).await?; } @@ -1330,17 +1298,13 @@ where let default_address = args.source.clone().unwrap_or(args.validator.clone()); submit_reveal_aux(namada, args.tx.clone(), &default_address).await?; - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1354,20 +1318,19 @@ pub async fn submit_unbond( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch, latest_withdrawal_pre) = + let (mut tx, signing_data, latest_withdrawal_pre) = args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; + let resp = namada.submit(tx, &args.tx).await?; - signing::generate_test_vector(namada, &tx).await?; - - namada.submit(tx, &args.tx).await?; - - tx::query_unbonds(namada, args.clone(), latest_withdrawal_pre).await?; + if !args.tx.dry_run && resp.is_applied_and_valid().is_some() { + tx::query_unbonds(namada, args.clone(), latest_withdrawal_pre) + .await?; + } } Ok(()) @@ -1380,17 +1343,13 @@ pub async fn submit_withdraw( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1404,17 +1363,13 @@ pub async fn submit_claim_rewards( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1429,15 +1384,12 @@ where ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1451,17 +1403,13 @@ pub async fn submit_validator_commission_change( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1475,45 +1423,19 @@ pub async fn submit_validator_metadata_change( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } Ok(()) } -// pub async fn submit_change_consensus_key( -// namada: &N, -// args: args::ConsensusKeyChange, -// ) -> Result<(), error::Error> -// where -// ::Error: std::fmt::Display, -// { -// let (mut tx, signing_data, _fee_unshield_epoch) = -// args.build(namada).await?; -// signing::generate_test_vector(namada, &tx).await?; - -// if args.tx.dump_tx { -// tx::dump_tx(namada.io(), &args.tx, tx); -// } else { -// namada.sign(&mut tx, &args.tx, signing_data).await?; - -// namada.submit(tx, &args.tx).await?; -// } - -// Ok(()) -// } - pub async fn submit_unjail_validator( namada: &N, args: args::TxUnjailValidator, @@ -1521,17 +1443,13 @@ pub async fn submit_unjail_validator( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1545,17 +1463,13 @@ pub async fn submit_deactivate_validator( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1569,17 +1483,13 @@ pub async fn submit_reactivate_validator( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1593,18 +1503,13 @@ pub async fn submit_update_steward_commission( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _fee_unshield_epoch) = - args.build(namada).await?; - - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } @@ -1618,17 +1523,13 @@ pub async fn submit_resign_steward( where ::Error: std::fmt::Display, { - let (mut tx, signing_data, _epoch) = args.build(namada).await?; - - signing::generate_test_vector(namada, &tx).await?; + let (mut tx, signing_data) = args.build(namada).await?; if args.tx.dump_tx { tx::dump_tx(namada.io(), &args.tx, tx); } else { sign(namada, &mut tx, &args.tx, signing_data).await?; - signing::generate_test_vector(namada, &tx).await?; - namada.submit(tx, &args.tx).await?; } diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 5007df9f21..d334a8281b 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -24,6 +24,7 @@ use sha2::{Digest, Sha256}; use tokio::sync::RwLock; use crate::cli::args; +use crate::cli::args::TestGenesis; use crate::cli::context::ENV_VAR_WASM_DIR; use crate::config::genesis::chain::DeriveEstablishedAddress; use crate::config::genesis::transactions::{ @@ -291,13 +292,14 @@ pub async fn join_network( ); let tm_home_dir = chain_dir.join(config::COMETBFT_DIR); // Write consensus key to tendermint home - tendermint_node::write_validator_key(&tm_home_dir, &consensus_key); + tendermint_node::write_validator_key(&tm_home_dir, &consensus_key) + .unwrap(); // Write tendermint node key write_tendermint_node_key(&tm_home_dir, tendermint_node_key); // Pre-initialize tendermint validator state - tendermint_node::write_validator_state(&tm_home_dir); + tendermint_node::write_validator_state(&tm_home_dir).unwrap(); } else { println!( "No validator keys are being used. Make sure you didn't forget to \ @@ -569,7 +571,8 @@ pub fn init_network( // After the archive is created, try to copy the built WASM, if they're // present with the checksums. This is used for local network setup, so // that we can use a local WASM build. - let checksums = wasm_loader::Checksums::read_checksums(&wasm_dir_full); + let checksums = wasm_loader::Checksums::read_checksums(&wasm_dir_full) + .unwrap_or_else(|_| safe_exit(1)); for (_, full_name) in checksums.0 { // try to copy built file from the Namada WASM root dir let file = std::env::current_dir() @@ -582,6 +585,29 @@ pub fn init_network( } } +pub fn test_genesis(args: TestGenesis) { + use crate::facade::tendermint::Timeout; + + let templates = genesis::templates::load_and_validate(&args.path).unwrap(); + let genesis = genesis::chain::finalize( + templates, + FromStr::from_str("namada-dryrun").unwrap(), + Default::default(), + Timeout::from_str("30s").unwrap(), + ); + let chain_id = &genesis.metadata.chain_id; + let test_dir = tempfile::tempdir().unwrap(); + let config = crate::config::Config::load(test_dir.path(), chain_id, None); + genesis + .write_toml_files(&test_dir.path().join(chain_id.to_string())) + .unwrap(); + crate::node::ledger::test_genesis_files( + config.ledger, + genesis, + args.wasm_dir, + ); +} + pub fn pk_to_tm_address( _global_args: args::Global, args::PkToTmAddress { public_key }: args::PkToTmAddress, diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index f7da635257..8649d2a4d1 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -287,10 +287,6 @@ pub struct Parameters { pub epochs_per_year: u64, /// Maximum amount of signatures per transaction pub max_signatures_per_transaction: u8, - /// PoS gain p (read only) - pub pos_gain_p: Dec, - /// PoS gain d (read only) - pub pos_gain_d: Dec, /// PoS staked ratio (read + write for every epoch) pub staked_ratio: Dec, /// PoS inflation amount from the last epoch (read + write for every epoch) @@ -311,7 +307,7 @@ pub struct Parameters { #[cfg(all(any(test, feature = "benches"), not(feature = "integration")))] pub fn make_dev_genesis( num_validators: u64, - target_chain_dir: std::path::PathBuf, + target_chain_dir: &std::path::Path, ) -> Finalized { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; @@ -545,7 +541,7 @@ pub fn make_dev_genesis( // Write out the TOML files for benches #[cfg(feature = "benches")] genesis - .write_toml_files(&target_chain_dir) + .write_toml_files(target_chain_dir) .expect("Must be able to write the finalized genesis"); #[cfg(not(feature = "benches"))] let _ = target_chain_dir; // avoid unused warn diff --git a/apps/src/lib/config/genesis/chain.rs b/apps/src/lib/config/genesis/chain.rs index d8e57f11f6..cca64103cf 100644 --- a/apps/src/lib/config/genesis/chain.rs +++ b/apps/src/lib/config/genesis/chain.rs @@ -267,8 +267,6 @@ impl Finalized { tx_whitelist, implicit_vp, epochs_per_year, - pos_gain_p, - pos_gain_d, max_signatures_per_transaction, fee_unshielding_gas_limit, fee_unshielding_descriptions_limit, @@ -284,10 +282,11 @@ impl Finalized { .get(&implicit_vp) .expect("Implicit VP must be present") .filename; - let implicit_vp = + + let implicit_vp_code_hash = wasm_loader::read_wasm(&wasm_dir, implicit_vp_filename) - .expect("Implicit VP WASM code couldn't get read"); - let implicit_vp_code_hash = Hash::sha256(implicit_vp); + .ok() + .map(Hash::sha256); let min_duration: i64 = 60 * 60 * 24 * 365 / (epochs_per_year as i64); let epoch_duration = EpochDuration { @@ -311,8 +310,6 @@ impl Finalized { tx_whitelist, implicit_vp_code_hash, epochs_per_year, - pos_gain_p, - pos_gain_d, staked_ratio, pos_inflation_amount: Amount::native_whole(pos_inflation_amount), max_proposal_bytes, @@ -325,7 +322,7 @@ impl Finalized { .map(|(token, amt)| { ( self.tokens.token.get(token).cloned().unwrap().address, - amt.amount, + amt.amount(), ) }) .collect(), @@ -350,6 +347,8 @@ impl Finalized { validator_stake_threshold, liveness_window_check, liveness_threshold, + rewards_gain_p, + rewards_gain_d, } = self.parameters.pos_params.clone(); namada::proof_of_stake::parameters::PosParams { @@ -368,6 +367,8 @@ impl Finalized { validator_stake_threshold, liveness_window_check, liveness_threshold, + rewards_gain_p, + rewards_gain_d, }, max_proposal_period: self.parameters.gov_params.max_proposal_period, } diff --git a/apps/src/lib/config/genesis/templates.rs b/apps/src/lib/config/genesis/templates.rs index 62209dd24d..4a1c397046 100644 --- a/apps/src/lib/config/genesis/templates.rs +++ b/apps/src/lib/config/genesis/templates.rs @@ -6,7 +6,7 @@ use std::path::Path; use borsh::{BorshDeserialize, BorshSerialize}; use namada::core::types::{ethereum_structs, token}; -use namada::eth_bridge::parameters::{ +use namada::eth_bridge::storage::parameters::{ Contracts, Erc20WhitelistEntry, MinimumConfirmations, }; use namada::types::address::Address; @@ -276,10 +276,6 @@ pub struct ChainParams { pub implicit_vp: String, /// Expected number of epochs per year pub epochs_per_year: u64, - /// PoS gain p - pub pos_gain_p: Dec, - /// PoS gain d - pub pos_gain_d: Dec, /// Maximum number of signature per transaction pub max_signatures_per_transaction: u8, /// Max gas for block @@ -307,8 +303,6 @@ impl ChainParams { tx_whitelist, implicit_vp, epochs_per_year, - pos_gain_p, - pos_gain_d, max_signatures_per_transaction, max_block_gas, fee_unshielding_gas_limit, @@ -354,8 +348,6 @@ impl ChainParams { tx_whitelist, implicit_vp, epochs_per_year, - pos_gain_p, - pos_gain_d, max_signatures_per_transaction, max_block_gas, fee_unshielding_gas_limit, @@ -410,6 +402,10 @@ pub struct PosParams { /// The minimum required activity of consensus validators, in percentage, /// over the `liveness_window_check` pub liveness_threshold: Dec, + /// PoS gain p (read only) + pub rewards_gain_p: Dec, + /// PoS gain d (read only) + pub rewards_gain_d: Dec, } #[derive( @@ -489,7 +485,7 @@ pub struct EthBridgeParams { impl TokenBalances { pub fn get(&self, addr: &GenesisAddress) -> Option { - self.0.get(addr).map(|amt| amt.amount) + self.0.get(addr).map(|amt| amt.amount()) } } @@ -896,7 +892,7 @@ pub fn validate_balances( let sum = next.0.values().try_fold( token::Amount::default(), |acc, amount| { - let res = acc.checked_add(amount.amount); + let res = acc.checked_add(amount.amount()); if res.as_ref().is_none() { is_valid = false; eprintln!( @@ -984,6 +980,6 @@ mod tests { let balances = read_balances(&path).unwrap(); let example_balance = balances.token.get(&token_alias).unwrap(); - assert_eq!(balance, example_balance.0.get(&address).unwrap().amount); + assert_eq!(balance, example_balance.0.get(&address).unwrap().amount()); } } diff --git a/apps/src/lib/config/genesis/transactions.rs b/apps/src/lib/config/genesis/transactions.rs index 0fcf3ff7cc..ae8d892a29 100644 --- a/apps/src/lib/config/genesis/transactions.rs +++ b/apps/src/lib/config/genesis/transactions.rs @@ -117,7 +117,7 @@ fn get_tx_to_sign(tag: impl AsRef, data: impl BorshSerialize) -> Tx { let pk = get_sentinel_pubkey(); tx.add_wrapper( Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: Address::from(&pk), }, pk, @@ -308,7 +308,7 @@ pub fn init_validator( unsigned_validator_account_tx.address.raw.clone(); let validator_account = Some(vec![unsigned_validator_account_tx]); - let bond = if self_bond_amount.amount.is_zero() { + let bond = if self_bond_amount.amount().is_zero() { None } else { let unsigned_bond_tx = BondTx { @@ -524,7 +524,7 @@ impl Transactions { BTreeMap::new(); for tx in txs { let entry = stakes.entry(&tx.validator).or_default(); - *entry += tx.amount.amount; + *entry += tx.amount.amount(); } stakes.into_values().any(|stake| { @@ -878,7 +878,7 @@ where validator: self.validator.clone(), amount: denominate_amount::(self.amount.clone()) .unwrap() - .amount, + .amount(), source: Some(self.source.address()), }, ) @@ -1188,8 +1188,19 @@ fn validate_bond( // Deduct the amount from source if amount == balance { balances.amounts.remove(source); + } else if let Some(new_balance) = + balance.checked_sub(*amount) + { + *balance = new_balance; } else { - balance.amount -= amount.amount; + eprintln!( + "Invalid bond tx. Amount {} should have the \ + denomination {:?}. Got {:?}.", + amount, + balance.denom(), + amount.denom(), + ); + is_valid = false; } } } diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index 11bd75aba5..e2ce7ee2d6 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -17,6 +17,7 @@ use futures::future::TryFutureExt; use namada::core::ledger::governance::storage::keys as governance_storage; use namada::eth_bridge::ethers::providers::{Http, Provider}; use namada::types::storage::Key; +use namada::types::time::{DateTimeUtc, Utc}; use namada_sdk::tendermint::abci::request::CheckTxKind; use once_cell::unsync::Lazy; use sysinfo::{RefreshKind, System, SystemExt}; @@ -241,6 +242,12 @@ pub fn rollback(config: config::Ledger) -> Result<(), shell::Error> { /// /// All must be alive for correct functioning. async fn run_aux(config: config::Ledger, wasm_dir: PathBuf) { + // wait for genesis time + let genesis_time = DateTimeUtc::try_from(config.genesis_time.clone()) + .expect("Should be able to parse genesis time"); + if let std::ops::ControlFlow::Break(_) = sleep_until(genesis_time).await { + return; + } let setup_data = run_aux_setup(&config, &wasm_dir).await; // Create an `AbortableSpawner` for signalling shut down from the shell or @@ -743,8 +750,74 @@ async fn maybe_start_ethereum_oracle( } } +/// This function runs `Shell::init_chain` on the provided genesis files. +/// This is to check that all the transactions included therein run +/// successfully on chain initialization. +pub fn test_genesis_files( + config: config::Ledger, + genesis: config::genesis::chain::Finalized, + wasm_dir: PathBuf, +) { + use namada::ledger::storage::mockdb::MockDB; + use namada::ledger::storage::Sha256Hasher; + + // Channels for validators to send protocol txs to be broadcast to the + // broadcaster service + let (broadcast_sender, _broadcaster_receiver) = mpsc::unbounded_channel(); + + // Start dummy broadcaster + let _broadcaster = spawn_dummy_task(()); + let chain_id = config.chain_id.to_string(); + // start an instance of the ledger + let mut shell = Shell::::new( + config, + wasm_dir, + broadcast_sender, + None, + None, + 50 * 1024 * 1024, + 50 * 1024 * 1024, + ); + let mut initializer = shell::InitChainValidation::new(&mut shell, true); + initializer.run_validation(chain_id, genesis); + initializer.report(); +} + /// Spawn a dummy asynchronous task into the runtime, /// which will resolve instantly. fn spawn_dummy_task(ready: T) -> task::JoinHandle { tokio::spawn(async { std::future::ready(ready).await }) } + +/// Sleep until the genesis time if necessary. +async fn sleep_until(time: DateTimeUtc) -> std::ops::ControlFlow<()> { + // Sleep until start time if needed + let sleep = async { + if let Ok(sleep_time) = + time.0.signed_duration_since(Utc::now()).to_std() + { + if !sleep_time.is_zero() { + tracing::info!( + "Waiting for ledger genesis time: {:?}, time left: {:?}", + time, + sleep_time + ); + tokio::time::sleep(sleep_time).await + } + } + }; + let shutdown_signal = async { + let (tx, rx) = tokio::sync::oneshot::channel(); + namada_sdk::control_flow::shutdown_send(tx).await; + rx.await + }; + tokio::select! { + _ = shutdown_signal => { + std::ops::ControlFlow::Break(()) + } + _ = sleep => { + tracing::info!("Genesis time reached, starting ledger"); + std::ops::ControlFlow::Continue(()) + } + } +} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 9a716bddd1..0e46577a0d 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,25 +1,30 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell use data_encoding::HEXUPPER; -use namada::core::ledger::inflation; +use masp_primitives::merkle_tree::CommitmentTree; +use masp_primitives::sapling::Node; +use masp_proofs::bls12_381; use namada::core::ledger::masp_conversions::update_allowed_conversions; -use namada::core::ledger::pgf::ADDRESS as pgf_address; +use namada::core::ledger::pgf::inflation as pgf_inflation; +use namada::core::types::storage::KeySeg; use namada::ledger::events::EventType; use namada::ledger::gas::{GasMetering, TxGasMeter}; -use namada::ledger::parameters::storage as params_storage; -use namada::ledger::pos::{namada_proof_of_stake, staking_token_address}; +use namada::ledger::pos::namada_proof_of_stake; use namada::ledger::protocol; use namada::ledger::storage::wl_storage::WriteLogAndStorage; +use namada::ledger::storage::write_log::StorageModification; use namada::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; -use namada::ledger::storage_api::token::credit_tokens; -use namada::ledger::storage_api::{pgf, StorageRead, StorageWrite}; -use namada::proof_of_stake::{ +use namada::ledger::storage_api::{ResultExt, StorageRead, StorageWrite}; +use namada::proof_of_stake::storage::{ find_validator_by_raw_hash, read_last_block_proposer_address, - read_pos_params, read_total_stake, write_last_block_proposer_address, + write_last_block_proposer_address, }; -use namada::types::dec::Dec; +use namada::types::address::MASP; use namada::types::key::tm_raw_hash_to_string; use namada::types::storage::{BlockHash, BlockResults, Epoch, Header}; +use namada::types::token::{ + MASP_NOTE_COMMITMENT_ANCHOR_PREFIX, MASP_NOTE_COMMITMENT_TREE_KEY, +}; use namada::types::transaction::protocol::{ ethereum_tx_data_variants, ProtocolTxType, }; @@ -90,7 +95,7 @@ where } let pos_params = - namada_proof_of_stake::read_pos_params(&self.wl_storage)?; + namada_proof_of_stake::storage::read_pos_params(&self.wl_storage)?; if new_epoch { update_allowed_conversions(&mut self.wl_storage)?; @@ -99,7 +104,7 @@ where // Copy the new_epoch + pipeline_len - 1 validator set into // new_epoch + pipeline_len - namada_proof_of_stake::copy_validator_sets_and_positions( + namada_proof_of_stake::validator_set_update::copy_validator_sets_and_positions( &mut self.wl_storage, &pos_params, current_epoch, @@ -212,8 +217,8 @@ where }; // If [`process_proposal`] rejected a Tx due to invalid signature, // emit an event here and move on to next tx. - if ErrorCodes::from_u32(processed_tx.result.code).unwrap() - == ErrorCodes::InvalidSig + if ResultCode::from_u32(processed_tx.result.code).unwrap() + == ResultCode::InvalidSig { let mut tx_event = match tx.header().tx_type { TxType::Wrapper(_) | TxType::Protocol(_) => { @@ -247,8 +252,8 @@ where let tx_header = tx.header(); // If [`process_proposal`] rejected a Tx, emit an event here and // move on to next tx - if ErrorCodes::from_u32(processed_tx.result.code).unwrap() - != ErrorCodes::Ok + if ResultCode::from_u32(processed_tx.result.code).unwrap() + != ResultCode::Ok { let mut tx_event = Event::new_tx_event(&tx, height.0); tx_event["code"] = processed_tx.result.code.to_string(); @@ -309,7 +314,7 @@ where decrypted." .into(); event["code"] = - ErrorCodes::Undecryptable.into(); + ResultCode::Undecryptable.into(); response.events.push(event); continue; } @@ -440,35 +445,35 @@ where } self.wl_storage.commit_tx(); if !tx_event.contains_key("code") { - tx_event["code"] = ErrorCodes::Ok.into(); + tx_event["code"] = ResultCode::Ok.into(); self.wl_storage .storage .block .results .accept(tx_index); } - for ibc_event in &result.ibc_events { - // Add the IBC event besides the tx_event - let mut event = Event::from(ibc_event.clone()); - // Add the height for IBC event query - event["height"] = height.to_string(); - response.events.push(event); - } - match serde_json::to_string( - &result.initialized_accounts, - ) { - Ok(initialized_accounts) => { - tx_event["initialized_accounts"] = - initialized_accounts; - } - Err(err) => { - tracing::error!( - "Failed to serialize the initialized \ - accounts: {}", - err - ); - } - } + // events from other sources + response.events.extend( + // ibc events + result + .ibc_events + .iter() + .cloned() + .map(|ibc_event| { + // Add the IBC event besides the tx_event + let mut event = Event::from(ibc_event); + // Add the height for IBC event query + event["height"] = height.to_string(); + event + }) + // eth bridge events + .chain( + result + .eth_bridge_events + .iter() + .map(Event::from), + ), + ); } else { tracing::trace!( "some VPs rejected transaction {} storage \ @@ -488,10 +493,11 @@ where stats.increment_rejected_txs(); self.wl_storage.drop_tx(); - tx_event["code"] = ErrorCodes::InvalidTx.into(); + tx_event["code"] = ResultCode::InvalidTx.into(); } tx_event["gas_used"] = result.gas_used.to_string(); - tx_event["info"] = result.to_string(); + tx_event["info"] = "Check inner_tx for result.".to_string(); + tx_event["inner_tx"] = result.to_string(); } Err(msg) => { tracing::info!( @@ -539,9 +545,9 @@ where tx_event["info"] = msg.to_string(); if let EventType::Accepted = tx_event.event_type { // If wrapper, invalid tx error code - tx_event["code"] = ErrorCodes::InvalidTx.into(); + tx_event["code"] = ResultCode::InvalidTx.into(); } else { - tx_event["code"] = ErrorCodes::WasmRuntimeError.into(); + tx_event["code"] = ResultCode::WasmRuntimeError.into(); } } } @@ -560,6 +566,25 @@ where tracing::info!("{}", stats); tracing::info!("{}", stats.format_tx_executed()); + // Update the MASP commitment tree anchor if the tree was updated + let tree_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_TREE_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + if let Some(StorageModification::Write { value }) = + self.wl_storage.write_log.read(&tree_key).0 + { + let updated_tree = CommitmentTree::::try_from_slice(value) + .into_storage_result()?; + let anchor_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_ANCHOR_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") + .push(&namada::core::types::hash::Hash( + bls12_381::Scalar::from(updated_tree.root()).to_bytes(), + )) + .expect("Cannot obtain a storage key"); + self.wl_storage.write(&anchor_key, ())?; + } + if update_for_tendermint { self.update_epoch(&mut response); // send the latest oracle configs. These may have changed due to @@ -636,58 +661,6 @@ where /// with respect to the previous epoch. fn apply_inflation(&mut self, current_epoch: Epoch) -> Result<()> { let last_epoch = current_epoch.prev(); - // Get input values needed for the PD controller for PoS. - // Run the PD controllers to calculate new rates. - - let params = read_pos_params(&self.wl_storage)?; - - // Read from Parameters storage - let epochs_per_year: u64 = self - .read_storage_key(¶ms_storage::get_epochs_per_year_key()) - .expect("Epochs per year should exist in storage"); - let pos_p_gain_nom: Dec = self - .read_storage_key(¶ms_storage::get_pos_gain_p_key()) - .expect("PoS P-gain factor should exist in storage"); - let pos_d_gain_nom: Dec = self - .read_storage_key(¶ms_storage::get_pos_gain_d_key()) - .expect("PoS D-gain factor should exist in storage"); - - let pos_last_staked_ratio: Dec = self - .read_storage_key(¶ms_storage::get_staked_ratio_key()) - .expect("PoS staked ratio should exist in storage"); - let pos_last_inflation_amount: token::Amount = self - .read_storage_key(¶ms_storage::get_pos_inflation_amount_key()) - .expect("PoS inflation amount should exist in storage"); - // Read from PoS storage - let total_tokens: token::Amount = self - .read_storage_key(&token::minted_balance_key( - &staking_token_address(&self.wl_storage), - )) - .expect("Total NAM balance should exist in storage"); - let pos_locked_supply = - read_total_stake(&self.wl_storage, ¶ms, last_epoch)?; - let pos_locked_ratio_target = params.target_staked_ratio; - let pos_max_inflation_rate = params.max_inflation_rate; - - // Run rewards PD controller - let pos_controller = inflation::RewardsController { - locked_tokens: pos_locked_supply.raw_amount(), - total_tokens: total_tokens.raw_amount(), - total_native_tokens: total_tokens.raw_amount(), - locked_ratio_target: pos_locked_ratio_target, - locked_ratio_last: pos_last_staked_ratio, - max_reward_rate: pos_max_inflation_rate, - last_inflation_amount: pos_last_inflation_amount.raw_amount(), - p_gain_nom: pos_p_gain_nom, - d_gain_nom: pos_d_gain_nom, - epochs_per_year, - }; - - // Run the rewards controllers - let inflation::ValsToUpdate { - locked_ratio, - inflation, - } = pos_controller.run(); // Get the number of blocks in the last epoch let first_block_of_last_epoch = self @@ -700,116 +673,15 @@ where let num_blocks_in_last_epoch = self.wl_storage.storage.block.height.0 - first_block_of_last_epoch; - let staking_token = staking_token_address(&self.wl_storage); - - let inflation = token::Amount::from_uint(inflation, 0) - .expect("Should not fail Uint -> Amount conversion"); - namada_proof_of_stake::update_rewards_products_and_mint_inflation( + // PoS inflation + namada_proof_of_stake::rewards::apply_inflation( &mut self.wl_storage, - ¶ms, last_epoch, num_blocks_in_last_epoch, - inflation, - &staking_token, - ) - .expect( - "Must be able to update PoS rewards products and mint inflation", - ); - - // Write new rewards parameters that will be used for the inflation of - // the current new epoch - self.wl_storage - .write(¶ms_storage::get_pos_inflation_amount_key(), inflation) - .expect("unable to write new reward rate"); - self.wl_storage - .write(¶ms_storage::get_staked_ratio_key(), locked_ratio) - .expect("unable to write new locked ratio"); - - // Pgf inflation - let pgf_parameters = pgf::get_parameters(&self.wl_storage)?; - - let pgf_pd_rate = - pgf_parameters.pgf_inflation_rate / Dec::from(epochs_per_year); - let pgf_inflation = Dec::from(total_tokens) * pgf_pd_rate; - let pgf_inflation_amount = token::Amount::from(pgf_inflation); - - credit_tokens( - &mut self.wl_storage, - &staking_token, - &pgf_address, - pgf_inflation_amount, )?; - tracing::info!( - "Minting {} tokens for PGF rewards distribution into the PGF \ - account.", - pgf_inflation_amount.to_string_native() - ); - - let mut pgf_fundings = pgf::get_payments(&self.wl_storage)?; - // we want to pay first the oldest fundings - pgf_fundings.sort_by(|a, b| a.id.cmp(&b.id)); - - for funding in pgf_fundings { - if storage_api::token::transfer( - &mut self.wl_storage, - &staking_token, - &pgf_address, - &funding.detail.target, - funding.detail.amount, - ) - .is_ok() - { - tracing::info!( - "Paying {} tokens for {} project.", - funding.detail.amount.to_string_native(), - &funding.detail.target, - ); - } else { - tracing::warn!( - "Failed to pay {} tokens for {} project.", - funding.detail.amount.to_string_native(), - &funding.detail.target, - ); - } - } - - // Pgf steward inflation - let stewards = pgf::get_stewards(&self.wl_storage)?; - let pgf_stewards_pd_rate = - pgf_parameters.stewards_inflation_rate / Dec::from(epochs_per_year); - let pgf_steward_inflation = - Dec::from(total_tokens) * pgf_stewards_pd_rate; - - for steward in stewards { - for (address, percentage) in steward.reward_distribution { - let pgf_steward_reward = pgf_steward_inflation - .checked_mul(&percentage) - .unwrap_or_default(); - let reward_amount = token::Amount::from(pgf_steward_reward); - - if credit_tokens( - &mut self.wl_storage, - &staking_token, - &address, - reward_amount, - ) - .is_ok() - { - tracing::info!( - "Minting {} tokens for steward {}.", - reward_amount.to_string_native(), - address, - ); - } else { - tracing::warn!( - "Failed minting {} tokens for steward {}.", - reward_amount.to_string_native(), - address, - ); - } - } - } + // Pgf inflation + pgf_inflation::apply_inflation(&mut self.wl_storage)?; Ok(()) } @@ -829,7 +701,7 @@ where tracing::debug!( "Found last block proposer: {proposer_address}" ); - namada_proof_of_stake::log_block_rewards( + namada_proof_of_stake::rewards::log_block_rewards( &mut self.wl_storage, if new_epoch { current_epoch.prev() @@ -962,21 +834,21 @@ mod test_finalize_block { use namada::ledger::storage_api; use namada::ledger::storage_api::StorageWrite; use namada::proof_of_stake::storage::{ + enqueued_slashes_handle, get_num_consensus_validators, + read_consensus_validator_set_addresses_with_stake, read_total_stake, + read_validator_stake, rewards_accumulator_handle, + validator_consensus_key_handle, validator_rewards_products_handle, + validator_slashes_handle, validator_state_handle, write_pos_params, + }; + use namada::proof_of_stake::storage_key::{ is_validator_slashes_key, slashes_prefix, }; use namada::proof_of_stake::types::{ BondId, SlashType, ValidatorState, WeightedValidator, }; - use namada::proof_of_stake::{ - enqueued_slashes_handle, get_num_consensus_validators, - read_consensus_validator_set_addresses_with_stake, - read_validator_stake, rewards_accumulator_handle, unjail_validator, - validator_consensus_key_handle, validator_rewards_products_handle, - validator_slashes_handle, validator_state_handle, write_pos_params, - ADDRESS as pos_address, - }; + use namada::proof_of_stake::{unjail_validator, ADDRESS as pos_address}; use namada::proto::{Code, Data, Section, Signature}; - use namada::types::dec::POS_DECIMAL_PRECISION; + use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::types::ethereum_events::{EthAddress, Uint as ethUint}; use namada::types::hash::Hash; use namada::types::keccak::KeccakHash; @@ -984,7 +856,9 @@ mod test_finalize_block { use namada::types::key::tm_consensus_key_raw_hash; use namada::types::storage::Epoch; use namada::types::time::{DateTimeUtc, DurationSecs}; - use namada::types::token::{Amount, NATIVE_MAX_DECIMAL_PLACES}; + use namada::types::token::{ + Amount, DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES, + }; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; @@ -993,7 +867,7 @@ mod test_finalize_block { use namada::types::uint::Uint; use namada::types::vote_extensions::ethereum_events; use namada_sdk::eth_bridge::MinimumConfirmations; - use namada_sdk::proof_of_stake::{ + use namada_sdk::proof_of_stake::storage::{ liveness_missed_votes_handle, liveness_sum_missed_votes_handle, read_consensus_validator_set_addresses, }; @@ -1022,7 +896,7 @@ mod test_finalize_block { let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1046,7 +920,7 @@ mod test_finalize_block { ProcessedTx { tx: tx.into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }, @@ -1063,7 +937,7 @@ mod test_finalize_block { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1085,7 +959,7 @@ mod test_finalize_block { ProcessedTx { tx: outer_tx.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, } @@ -1178,7 +1052,9 @@ mod test_finalize_block { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native( + Default::default(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1201,7 +1077,7 @@ mod test_finalize_block { let processed_tx = ProcessedTx { tx: outer_tx.to_bytes().into(), result: TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ResultCode::InvalidTx.into(), info: "".into(), }, }; @@ -1216,7 +1092,7 @@ mod test_finalize_block { { assert_eq!(event.event_type.to_string(), String::from("applied")); let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &String::from(ErrorCodes::InvalidTx)); + assert_eq!(code, &String::from(ResultCode::InvalidTx)); } // check that the corresponding wrapper tx was removed from the queue assert!(shell.wl_storage.storage.tx_queue.is_empty()); @@ -1232,7 +1108,7 @@ mod test_finalize_block { // not valid tx bytes let wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1245,7 +1121,7 @@ mod test_finalize_block { .to_bytes() .into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }; @@ -1266,7 +1142,7 @@ mod test_finalize_block { { assert_eq!(event.event_type.to_string(), String::from("applied")); let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &String::from(ErrorCodes::Undecryptable)); + assert_eq!(code, &String::from(ResultCode::Undecryptable)); let log = event.attributes.get("log").expect("Test failed"); assert!(log.contains("Transaction could not be decrypted.")) } @@ -1325,7 +1201,7 @@ mod test_finalize_block { ); let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); } else { // these should be accepted decrypted txs assert_eq!( @@ -1334,7 +1210,7 @@ mod test_finalize_block { ); let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); } } @@ -1372,7 +1248,7 @@ mod test_finalize_block { txs: vec![ProcessedTx { tx: tx.into(), result: TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ResultCode::InvalidTx.into(), info: Default::default(), }, }], @@ -1383,7 +1259,7 @@ mod test_finalize_block { let event = resp.remove(0); assert_eq!(event.event_type.to_string(), String::from("applied")); let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &String::from(ErrorCodes::InvalidTx)); + assert_eq!(code, &String::from(ResultCode::InvalidTx)); } /// Test that once a validator's vote for an Ethereum event lands @@ -1442,7 +1318,7 @@ mod test_finalize_block { .to_bytes() .into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, } @@ -1459,7 +1335,7 @@ mod test_finalize_block { .expect("Test failed"); assert_eq!(result.event_type.to_string(), String::from("applied")); let code = result.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); // --- The event is removed from the queue assert!(shell.new_ethereum_events().is_empty()); @@ -1502,7 +1378,7 @@ mod test_finalize_block { .to_bytes() .into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }; @@ -1518,7 +1394,7 @@ mod test_finalize_block { .expect("Test failed"); assert_eq!(result.event_type.to_string(), String::from("applied")); let code = result.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); // --- The event is removed from the queue assert!(shell.new_ethereum_events().is_empty()); @@ -1561,7 +1437,7 @@ mod test_finalize_block { let processed_tx = ProcessedTx { tx: tx.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }; @@ -1795,12 +1671,15 @@ mod test_finalize_block { // Keep applying finalize block let validator = shell.mode.get_validator_address().unwrap(); let pos_params = - namada_proof_of_stake::read_pos_params(&shell.wl_storage).unwrap(); - let consensus_key = - namada_proof_of_stake::validator_consensus_key_handle(validator) - .get(&shell.wl_storage, Epoch::default(), &pos_params) - .unwrap() + namada_proof_of_stake::storage::read_pos_params(&shell.wl_storage) .unwrap(); + let consensus_key = + namada_proof_of_stake::storage::validator_consensus_key_handle( + validator, + ) + .get(&shell.wl_storage, Epoch::default(), &pos_params) + .unwrap() + .unwrap(); let proposer_address = HEXUPPER .decode(consensus_key.tm_raw_hash().as_bytes()) .unwrap(); @@ -2341,7 +2220,7 @@ mod test_finalize_block { // Check the bond amounts for rewards up thru the withdrawable epoch let withdraw_epoch = current_epoch + params.withdrawable_epoch_offset(); let last_claim_epoch = - namada_proof_of_stake::get_last_reward_claim_epoch( + namada_proof_of_stake::storage::get_last_reward_claim_epoch( &shell.wl_storage, &validator.address, &validator.address, @@ -2459,7 +2338,7 @@ mod test_finalize_block { let validator = validator_set.pop_first().unwrap(); let commission_rate = - namada_proof_of_stake::validator_commission_rate_handle( + namada_proof_of_stake::storage::validator_commission_rate_handle( &validator.address, ) .get(&shell.wl_storage, Epoch(0), ¶ms) @@ -2504,7 +2383,7 @@ mod test_finalize_block { let delegator = address::testing::gen_implicit_address(); let del_amount = init_stake; let staking_token = shell.wl_storage.storage.native_token.clone(); - credit_tokens( + storage_api::token::credit_tokens( &mut shell.wl_storage, &staking_token, &delegator, @@ -2629,21 +2508,21 @@ mod test_finalize_block { // Give the validators some tokens for txs let staking_token = shell.wl_storage.storage.native_token.clone(); - credit_tokens( + storage_api::token::credit_tokens( &mut shell.wl_storage, &staking_token, &validator1.address, init_stake, ) .unwrap(); - credit_tokens( + storage_api::token::credit_tokens( &mut shell.wl_storage, &staking_token, &validator2.address, init_stake, ) .unwrap(); - credit_tokens( + storage_api::token::credit_tokens( &mut shell.wl_storage, &staking_token, &validator3.address, @@ -2673,8 +2552,10 @@ mod test_finalize_block { // Check that there's 3 unique consensus keys let consensus_keys = - namada_proof_of_stake::get_consensus_key_set(&shell.wl_storage) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set( + &shell.wl_storage, + ) + .unwrap(); assert_eq!(consensus_keys.len(), 3); // let ck1 = validator_consensus_key_handle(&validator) // .get(&storage, current_epoch, ¶ms) @@ -2728,8 +2609,10 @@ mod test_finalize_block { // Check that there's 5 unique consensus keys let consensus_keys = - namada_proof_of_stake::get_consensus_key_set(&shell.wl_storage) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set( + &shell.wl_storage, + ) + .unwrap(); assert_eq!(consensus_keys.len(), 5); // Advance to pipeline epoch @@ -2808,8 +2691,10 @@ mod test_finalize_block { // Check that there's 7 unique consensus keys let consensus_keys = - namada_proof_of_stake::get_consensus_key_set(&shell.wl_storage) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set( + &shell.wl_storage, + ) + .unwrap(); assert_eq!(consensus_keys.len(), 7); // Advance to pipeline epoch @@ -2871,8 +2756,10 @@ mod test_finalize_block { // Check that there's 8 unique consensus keys let consensus_keys = - namada_proof_of_stake::get_consensus_key_set(&shell.wl_storage) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set( + &shell.wl_storage, + ) + .unwrap(); assert_eq!(consensus_keys.len(), 8); // Advance to pipeline epoch @@ -2962,7 +2849,7 @@ mod test_finalize_block { failed", ) .as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); // the merkle tree root should not change after finalize_block let root_post = shell.shell.wl_storage.storage.block.tree.root(); @@ -3004,7 +2891,7 @@ mod test_finalize_block { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -3021,7 +2908,7 @@ mod test_finalize_block { let mut new_wrapper = wrapper.clone(); new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair_2.ref_to(), @@ -3062,7 +2949,7 @@ mod test_finalize_block { processed_txs.push(ProcessedTx { tx: inner.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }) @@ -3086,10 +2973,10 @@ mod test_finalize_block { assert_eq!(event[0].event_type.to_string(), String::from("applied")); let code = event[0].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); assert_eq!(event[1].event_type.to_string(), String::from("applied")); let code = event[1].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); for (inner, wrapper) in [(inner, wrapper), (new_inner, new_wrapper)] { assert!( @@ -3130,7 +3017,9 @@ mod test_finalize_block { let mut unsigned_wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: DenominatedAmount::native( + Amount::zero(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -3207,7 +3096,7 @@ mod test_finalize_block { processed_txs.push(ProcessedTx { tx: inner.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }) @@ -3240,19 +3129,19 @@ mod test_finalize_block { assert_eq!(event[0].event_type.to_string(), String::from("applied")); let code = event[0].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); assert_eq!(event[1].event_type.to_string(), String::from("applied")); let code = event[1].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::Undecryptable).as_str()); + assert_eq!(code, String::from(ResultCode::Undecryptable).as_str()); assert_eq!(event[2].event_type.to_string(), String::from("applied")); let code = event[2].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); + assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); assert_eq!(event[3].event_type.to_string(), String::from("applied")); let code = event[3].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); assert_eq!(event[4].event_type.to_string(), String::from("applied")); let code = event[4].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); for (invalid_inner, valid_wrapper) in [ (out_of_gas_inner, out_of_gas_wrapper), @@ -3304,7 +3193,7 @@ mod test_finalize_block { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -3330,7 +3219,7 @@ mod test_finalize_block { let processed_txs = vec![ProcessedTx { tx: wrapper.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }]; @@ -3354,7 +3243,7 @@ mod test_finalize_block { .get("code") .expect("Test failed") .as_str(); - assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); + assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); assert!( shell @@ -3383,7 +3272,7 @@ mod test_finalize_block { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -3405,7 +3294,7 @@ mod test_finalize_block { let processed_tx = ProcessedTx { tx: wrapper.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }; @@ -3420,7 +3309,7 @@ mod test_finalize_block { // Check balance of fee payer is 0 assert_eq!(event.event_type.to_string(), String::from("accepted")); let code = event.attributes.get("code").expect("Testfailed").as_str(); - assert_eq!(code, String::from(ErrorCodes::InvalidTx).as_str()); + assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); let balance_key = namada::core::types::token::balance_key( &shell.wl_storage.storage.native_token, &Address::from(&keypair.to_public()), @@ -3442,12 +3331,15 @@ mod test_finalize_block { let validator = shell.mode.get_validator_address().unwrap().to_owned(); let pos_params = - namada_proof_of_stake::read_pos_params(&shell.wl_storage).unwrap(); - let consensus_key = - namada_proof_of_stake::validator_consensus_key_handle(&validator) - .get(&shell.wl_storage, Epoch::default(), &pos_params) - .unwrap() + namada_proof_of_stake::storage::read_pos_params(&shell.wl_storage) .unwrap(); + let consensus_key = + namada_proof_of_stake::storage::validator_consensus_key_handle( + &validator, + ) + .get(&shell.wl_storage, Epoch::default(), &pos_params) + .unwrap() + .unwrap(); let proposer_address = HEXUPPER .decode(consensus_key.tm_raw_hash().as_bytes()) .unwrap(); @@ -3466,7 +3358,7 @@ mod test_finalize_block { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -3488,6 +3380,12 @@ mod test_finalize_block { ))); let fee_amount = wrapper.header().wrapper().unwrap().get_tx_fee().unwrap(); + let fee_amount = fee_amount + .to_amount( + &wrapper.header().wrapper().unwrap().fee.token, + &shell.wl_storage, + ) + .unwrap(); let signer_balance = storage_api::token::read_balance( &shell.wl_storage, @@ -3499,7 +3397,7 @@ mod test_finalize_block { let processed_tx = ProcessedTx { tx: wrapper.to_bytes().into(), result: TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "".into(), }, }; @@ -3515,7 +3413,7 @@ mod test_finalize_block { // Check fee payment assert_eq!(event.event_type.to_string(), String::from("accepted")); let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ErrorCodes::Ok).as_str()); + assert_eq!(code, String::from(ResultCode::Ok).as_str()); let new_proposer_balance = storage_api::token::read_balance( &shell.wl_storage, @@ -3913,17 +3811,17 @@ mod test_finalize_block { /// `next_block_for_inflation` #[test] fn test_multiple_misbehaviors() -> storage_api::Result<()> { - for num_validators in 4u64..10u64 { - println!("NUM VALIDATORS = {}", num_validators); - test_multiple_misbehaviors_by_num_vals(num_validators)?; + for num_validators in &[4_u64, 6_u64, 9_u64] { + tracing::debug!("\nNUM VALIDATORS = {}", num_validators); + test_multiple_misbehaviors_by_num_vals(*num_validators)?; } Ok(()) } /// Current test procedure (prefixed by epoch in which the event occurs): - /// 0) Validator initial stake of 200_000 - /// 1) Delegate 67_231 to validator - /// 1) Self-unbond 154_654 + /// 0) Validator initial stake of 00_000 + /// 1) Delegate 37_231 to validator + /// 1) Self-unbond 84_654 /// 2) Unbond delegation of 18_000 /// 3) Self-bond 9_123 /// 4) Self-unbond 15_000 @@ -3942,7 +3840,7 @@ mod test_finalize_block { }); let mut params = read_pos_params(&shell.wl_storage).unwrap(); params.owned.unbonding_len = 4; - params.owned.max_validator_slots = 4; + params.owned.max_validator_slots = 50; write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; // Slash pool balance @@ -3992,9 +3890,9 @@ mod test_finalize_block { // Make an account with balance and delegate some tokens let delegator = address::testing::gen_implicit_address(); - let del_1_amount = token::Amount::native_whole(67_231); + let del_1_amount = token::Amount::native_whole(37_231); let staking_token = shell.wl_storage.storage.native_token.clone(); - credit_tokens( + storage_api::token::credit_tokens( &mut shell.wl_storage, &staking_token, &delegator, @@ -4012,7 +3910,7 @@ mod test_finalize_block { .unwrap(); // Self-unbond - let self_unbond_1_amount = token::Amount::native_whole(54_654); + let self_unbond_1_amount = token::Amount::native_whole(84_654); namada_proof_of_stake::unbond_tokens( &mut shell.wl_storage, None, @@ -4023,7 +3921,7 @@ mod test_finalize_block { ) .unwrap(); - let val_stake = namada_proof_of_stake::read_validator_stake( + let val_stake = namada_proof_of_stake::storage::read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, @@ -4031,7 +3929,7 @@ mod test_finalize_block { ) .unwrap(); - let total_stake = namada_proof_of_stake::read_total_stake( + let total_stake = namada_proof_of_stake::storage::read_total_stake( &shell.wl_storage, ¶ms, current_epoch + params.pipeline_len, @@ -4054,7 +3952,7 @@ mod test_finalize_block { shell.wl_storage.storage.block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); - println!("\nUnbonding in epoch 2"); + tracing::debug!("\nUnbonding in epoch 2"); let del_unbond_1_amount = token::Amount::native_whole(18_000); namada_proof_of_stake::unbond_tokens( &mut shell.wl_storage, @@ -4066,14 +3964,14 @@ mod test_finalize_block { ) .unwrap(); - let val_stake = namada_proof_of_stake::read_validator_stake( + let val_stake = namada_proof_of_stake::storage::read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, current_epoch + params.pipeline_len, ) .unwrap(); - let total_stake = namada_proof_of_stake::read_total_stake( + let total_stake = namada_proof_of_stake::storage::read_total_stake( &shell.wl_storage, ¶ms, current_epoch + params.pipeline_len, @@ -4099,7 +3997,7 @@ mod test_finalize_block { shell.wl_storage.storage.block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); - println!("\nBonding in epoch 3"); + tracing::debug!("\nBonding in epoch 3"); let self_bond_1_amount = token::Amount::native_whole(9_123); namada_proof_of_stake::bond_tokens( @@ -4140,7 +4038,7 @@ mod test_finalize_block { ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 5_u64); - println!("Delegating in epoch 5"); + tracing::debug!("Delegating in epoch 5"); // Delegate let del_2_amount = token::Amount::native_whole(8_144); @@ -4154,7 +4052,7 @@ mod test_finalize_block { ) .unwrap(); - println!("Advancing to epoch 6"); + tracing::debug!("Advancing to epoch 6"); // Advance to epoch 6 let votes = get_default_true_votes( @@ -4211,19 +4109,21 @@ mod test_finalize_block { assert_eq!(enqueued_slash.r#type, SlashType::DuplicateVote); assert_eq!(enqueued_slash.rate, Dec::zero()); let last_slash = - namada_proof_of_stake::read_validator_last_slash_epoch( + namada_proof_of_stake::storage::read_validator_last_slash_epoch( &shell.wl_storage, &val1.address, ) .unwrap(); assert_eq!(last_slash, Some(misbehavior_epoch)); assert!( - namada_proof_of_stake::validator_slashes_handle(&val1.address) - .is_empty(&shell.wl_storage) - .unwrap() + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.wl_storage) + .unwrap() ); - println!("Advancing to epoch 7"); + tracing::debug!("Advancing to epoch 7"); // Advance to epoch 7 let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -4279,7 +4179,7 @@ mod test_finalize_block { assert_eq!(enqueued_slashes_8.len(&shell.wl_storage).unwrap(), 2_u64); assert_eq!(enqueued_slashes_9.len(&shell.wl_storage).unwrap(), 1_u64); let last_slash = - namada_proof_of_stake::read_validator_last_slash_epoch( + namada_proof_of_stake::storage::read_validator_last_slash_epoch( &shell.wl_storage, &val1.address, ) @@ -4295,18 +4195,21 @@ mod test_finalize_block { .unwrap() ); assert!( - namada_proof_of_stake::validator_slashes_handle(&val1.address) - .is_empty(&shell.wl_storage) - .unwrap() + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.wl_storage) + .unwrap() ); - let pre_stake_10 = namada_proof_of_stake::read_validator_stake( - &shell.wl_storage, - ¶ms, - &val1.address, - Epoch(10), - ) - .unwrap(); + let pre_stake_10 = + namada_proof_of_stake::storage::read_validator_stake( + &shell.wl_storage, + ¶ms, + &val1.address, + Epoch(10), + ) + .unwrap(); assert_eq!( pre_stake_10, initial_stake + del_1_amount @@ -4317,7 +4220,7 @@ mod test_finalize_block { + del_2_amount ); - println!("\nNow processing the infractions\n"); + tracing::debug!("\nNow processing the infractions\n"); // Advance to epoch 9, where the infractions committed in epoch 3 will // be processed @@ -4333,14 +4236,14 @@ mod test_finalize_block { let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 9_u64); - let val_stake_3 = namada_proof_of_stake::read_validator_stake( + let val_stake_3 = namada_proof_of_stake::storage::read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, Epoch(3), ) .unwrap(); - let val_stake_4 = namada_proof_of_stake::read_validator_stake( + let val_stake_4 = namada_proof_of_stake::storage::read_validator_stake( &shell.wl_storage, ¶ms, &val1.address, @@ -4348,13 +4251,13 @@ mod test_finalize_block { ) .unwrap(); - let tot_stake_3 = namada_proof_of_stake::read_total_stake( + let tot_stake_3 = namada_proof_of_stake::storage::read_total_stake( &shell.wl_storage, ¶ms, Epoch(3), ) .unwrap(); - let tot_stake_4 = namada_proof_of_stake::read_total_stake( + let tot_stake_4 = namada_proof_of_stake::storage::read_total_stake( &shell.wl_storage, ¶ms, Epoch(4), @@ -4378,7 +4281,9 @@ mod test_finalize_block { // There should be 2 slashes processed for the validator, each with rate // equal to the cubic slashing rate let val_slashes = - namada_proof_of_stake::validator_slashes_handle(&val1.address); + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address, + ); assert_eq!(val_slashes.len(&shell.wl_storage).unwrap(), 2u64); let is_rate_good = val_slashes .iter(&shell.wl_storage) @@ -4554,8 +4459,8 @@ mod test_finalize_block { let current_epoch = shell.wl_storage.storage.block.epoch; assert_eq!(current_epoch.0, 12_u64); - println!("\nCHECK BOND AND UNBOND DETAILS"); - let details = namada_proof_of_stake::bonds_and_unbonds( + tracing::debug!("\nCHECK BOND AND UNBOND DETAILS"); + let details = namada_proof_of_stake::queries::bonds_and_unbonds( &shell.wl_storage, None, None, @@ -4592,7 +4497,6 @@ mod test_finalize_block { del_details.bonds[0].amount, del_1_amount - del_unbond_1_amount ); - // TODO: decimal mult issues should be resolved with PR 1282 assert!( (del_details.bonds[0].slashed_amount.unwrap().change() - std::cmp::min( @@ -4614,9 +4518,6 @@ mod test_finalize_block { initial_stake - self_unbond_1_amount + self_bond_1_amount - self_unbond_2_amount ); - // TODO: not sure why this is correct??? (with + self_bond_1_amount - - // self_unbond_2_amount) - // TODO: Make sure this is sound and what we expect assert!( (self_details.bonds[0].slashed_amount.unwrap().change() - (std::cmp::min( @@ -4673,7 +4574,7 @@ mod test_finalize_block { assert_eq!(self_details.unbonds[2].amount, self_bond_1_amount); assert_eq!(self_details.unbonds[2].slashed_amount, None); - println!("\nWITHDRAWING DELEGATION UNBOND"); + tracing::debug!("\nWITHDRAWING DELEGATION UNBOND"); // let slash_pool_balance_pre_withdraw = slash_pool_balance; // Withdraw the delegation unbonds, which total to 18_000. This should // only be affected by the slashes in epoch 3 @@ -4745,180 +4646,6 @@ mod test_finalize_block { Ok(()) } - #[test] - fn test_purge_validator_information() -> storage_api::Result<()> { - // Setup the network with pipeline_len = 2, unbonding_len = 4 - let num_validators = 4_u64; - let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { - last_height: 0, - num_validators, - ..Default::default() - }); - let mut params = read_pos_params(&shell.wl_storage).unwrap(); - params.owned.unbonding_len = 4; - // params.owned.max_validator_slots = 3; - // params.owned.validator_stake_threshold = token::Amount::zero(); - write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; - - let max_proposal_period = params.max_proposal_period; - let default_past_epochs = 2; - let consensus_val_set_len = max_proposal_period + default_past_epochs; - - let consensus_val_set = - namada_proof_of_stake::consensus_validator_set_handle(); - // let below_cap_val_set = - // namada_proof_of_stake::below_capacity_validator_set_handle(); - let validator_positions = - namada_proof_of_stake::validator_set_positions_handle(); - let all_validator_addresses = - namada_proof_of_stake::validator_addresses_handle(); - - let consensus_set: Vec = - read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, - Epoch::default(), - ) - .unwrap() - .into_iter() - .collect(); - let val1 = consensus_set[0].clone(); - let pkh1 = get_pkh_from_address( - &shell.wl_storage, - ¶ms, - val1.address, - Epoch::default(), - ); - - // Finalize block 1 - next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - - let votes = get_default_true_votes(&shell.wl_storage, Epoch::default()); - assert!(!votes.is_empty()); - - let check_is_data = |storage: &WlStorage<_, _>, - start: Epoch, - end: Epoch| { - for ep in Epoch::iter_bounds_inclusive(start, end) { - assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); - // assert!(!below_cap_val_set.at(&ep).is_empty(storage). - // unwrap()); - assert!( - !validator_positions.at(&ep).is_empty(storage).unwrap() - ); - assert!( - !all_validator_addresses.at(&ep).is_empty(storage).unwrap() - ); - } - }; - - // Check that there is validator data for epochs 0 - pipeline_len - check_is_data(&shell.wl_storage, Epoch(0), Epoch(params.pipeline_len)); - - // Advance to epoch `default_past_epochs` - let mut current_epoch = Epoch(0); - for _ in 0..default_past_epochs { - let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, - ); - current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; - } - assert_eq!(shell.wl_storage.storage.block.epoch.0, default_past_epochs); - assert_eq!(current_epoch.0, default_past_epochs); - - check_is_data( - &shell.wl_storage, - Epoch(0), - Epoch(params.pipeline_len + default_past_epochs), - ); - - // Advance one more epoch, which should purge the data for epoch 0 in - // everything except the consensus validator set - let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, - ); - current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; - assert_eq!(current_epoch.0, default_past_epochs + 1); - - check_is_data( - &shell.wl_storage, - Epoch(1), - Epoch(params.pipeline_len + default_past_epochs + 1), - ); - assert!( - !consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap() - ); - assert!( - validator_positions - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap() - ); - assert!( - all_validator_addresses - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap() - ); - - // Advance to the epoch `consensus_val_set_len` + 1 - loop { - assert!( - !consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap() - ); - let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, - ); - current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; - if current_epoch.0 == consensus_val_set_len + 1 { - break; - } - } - - assert!( - consensus_val_set - .at(&Epoch(0)) - .is_empty(&shell.wl_storage) - .unwrap() - ); - - // Advance one more epoch - let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, - ); - current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; - for ep in Epoch::default().iter_range(2) { - assert!( - consensus_val_set - .at(&ep) - .is_empty(&shell.wl_storage) - .unwrap() - ); - } - for ep in Epoch::iter_bounds_inclusive( - Epoch(2), - current_epoch + params.pipeline_len, - ) { - assert!( - !consensus_val_set - .at(&ep) - .is_empty(&shell.wl_storage) - .unwrap() - ); - } - - Ok(()) - } - #[test] fn test_jail_validator_for_inactivity() -> storage_api::Result<()> { let num_validators = 5_u64; @@ -4952,13 +4679,14 @@ mod test_finalize_block { Epoch::default(), ); - let validator_stake = namada_proof_of_stake::read_validator_stake( - &shell.wl_storage, - ¶ms, - &val2, - Epoch::default(), - ) - .unwrap(); + let validator_stake = + namada_proof_of_stake::storage::read_validator_stake( + &shell.wl_storage, + ¶ms, + &val2, + Epoch::default(), + ) + .unwrap(); let val3 = initial_consensus_set[2].clone(); let val4 = initial_consensus_set[3].clone(); @@ -5263,7 +4991,8 @@ mod test_finalize_block { misbehaviors: Option>, ) -> (Epoch, token::Amount) { let current_epoch = shell.wl_storage.storage.block.epoch; - let staking_token = staking_token_address(&shell.wl_storage); + let staking_token = + namada_proof_of_stake::staking_token_address(&shell.wl_storage); // NOTE: assumed that the only change in pos address balance by // advancing to the next epoch is minted inflation - no change occurs diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 0bbdac54ce..4991310d09 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -19,8 +19,9 @@ use namada::ledger::protocol; use namada::ledger::storage::types::encode; use namada::ledger::storage::{DBIter, StorageHasher, DB}; use namada::ledger::storage_api::{pgf, token, StorageWrite}; +use namada::proof_of_stake::bond_amount; use namada::proof_of_stake::parameters::PosParams; -use namada::proof_of_stake::{bond_amount, read_total_stake}; +use namada::proof_of_stake::storage::read_total_stake; use namada::proto::{Code, Data}; use namada::types::address::Address; use namada::types::storage::Epoch; diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index c50dfd67d1..074fe132ad 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -1,7 +1,11 @@ //! Implementation of chain initialization for the Shell use std::collections::HashMap; -use std::hash::Hash; +use std::ops::ControlFlow; +use masp_primitives::merkle_tree::CommitmentTree; +use masp_primitives::sapling::Node; +use masp_proofs::bls12_381; +use namada::core::types::storage::KeySeg; use namada::ledger::parameters::Parameters; use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::{DBIter, DB}; @@ -9,10 +13,14 @@ use namada::ledger::storage_api::token::{credit_tokens, write_denom}; use namada::ledger::storage_api::StorageWrite; use namada::ledger::{ibc, pos}; use namada::proof_of_stake::BecomeValidator; -use namada::types::address::Address; +use namada::types::address::{Address, MASP}; use namada::types::hash::Hash as CodeHash; use namada::types::key::*; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; +use namada::types::token::{ + MASP_CONVERT_ANCHOR_KEY, MASP_NOTE_COMMITMENT_ANCHOR_PREFIX, + MASP_NOTE_COMMITMENT_TREE_KEY, +}; use namada::vm::validate_untrusted_wasm; use namada_sdk::eth_bridge::EthBridgeStatus; use namada_sdk::proof_of_stake::PosParams; @@ -30,6 +38,45 @@ use crate::facade::tendermint::v0_37::abci::{request, response}; use crate::facade::tendermint_proto::google::protobuf; use crate::wasm_loader; +/// Errors that represent panics in normal flow but get demoted to errors +/// when dry-running genesis files in order to accumulate as many problems +/// as possible in a report. +#[derive(Error, Debug, Clone, PartialEq)] +enum Panic { + #[error( + "No VP found matching the expected implicit VP sha256 hash: \ + {0}\n(this will be `None` if no wasm file was found for the implicit \ + vp)" + )] + MissingImplicitVP(String), + #[error("Missing validity predicate for {0}")] + MissingVpWasmConfig(String), + #[error("Could not find checksums.json file")] + ChecksumsFile, + #[error("Invalid wasm code sha256 hash for {0}")] + Checksum(String), + #[error( + "Config for token '{0}' with configured balance not found in genesis" + )] + MissingTokenConfig(String), + #[error("Failed to read wasm {0} with reason: {1}")] + ReadingWasm(String, String), +} + +/// Warnings generated by problems in genesis files. +#[derive(Error, Debug, PartialEq)] +enum Warning { + #[error("The wasm {0} isn't whitelisted.")] + WhitelistedWasm(String), + #[error("Genesis init genesis validator tx for {0} failed with {1}.")] + Validator(String, String), + #[error( + "Genesis bond by {0} to validiator {1} of {2} NAM failed with reason: \ + {3}" + )] + FailedBond(String, String, token::DenominatedAmount, String), +} + impl Shell where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -73,7 +120,7 @@ where ))] let genesis = { let chain_dir = self.base_dir.join(chain_id); - genesis::make_dev_genesis(_num_validators, chain_dir) + genesis::make_dev_genesis(_num_validators, &chain_dir) }; #[cfg(all( any(test, feature = "benches"), @@ -84,7 +131,77 @@ where let native_token = genesis.get_native_token().clone(); self.wl_storage.storage.native_token = native_token; } + let mut validation = InitChainValidation::new(self, false); + validation.run( + init, + genesis, + #[cfg(any(test, feature = "testing"))] + _num_validators, + ); + // propogate errors or panic + validation.error_out()?; + + // Init masp commitment tree and anchor + let empty_commitment_tree: CommitmentTree = + CommitmentTree::empty(); + let anchor = empty_commitment_tree.root(); + let note_commitment_tree_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_TREE_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + self.wl_storage + .write(¬e_commitment_tree_key, empty_commitment_tree) + .unwrap(); + let commitment_tree_anchor_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_ANCHOR_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") + .push(&namada::core::types::hash::Hash( + bls12_381::Scalar::from(anchor).to_bytes(), + )) + .expect("Cannot obtain a storage key"); + self.wl_storage + .write(&commitment_tree_anchor_key, ()) + .unwrap(); + + // Init masp convert anchor + let convert_anchor_key = Key::from(MASP.to_db_key()) + .push(&MASP_CONVERT_ANCHOR_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + self.wl_storage.write( + &convert_anchor_key, + namada::core::types::hash::Hash( + bls12_381::Scalar::from( + self.wl_storage.storage.conversion_state.tree.root(), + ) + .to_bytes(), + ), + )?; + + // Set the initial validator set + response.validators = self + .get_abci_validator_updates(true, |pk, power| { + let pub_key: crate::facade::tendermint::PublicKey = pk.into(); + let power = + crate::facade::tendermint::vote::Power::try_from(power) + .unwrap(); + validator::Update { pub_key, power } + }) + .expect("Must be able to set genesis validator set"); + debug_assert!(!response.validators.is_empty()); + Ok(response) + } +} +impl<'shell, D, H> InitChainValidation<'shell, D, H> +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + pub fn run( + &mut self, + init: request::InitChain, + genesis: genesis::chain::Finalized, + #[cfg(any(test, feature = "testing"))] _num_validators: u64, + ) -> ControlFlow<()> { let ts: protobuf::Timestamp = init.time.into(); let initial_height = init .initial_height @@ -100,11 +217,11 @@ where // Initialize protocol parameters let parameters = genesis.get_chain_parameters(&self.wasm_dir); self.store_wasms(¶meters)?; - parameters.init_storage(&mut self.wl_storage)?; + parameters.init_storage(&mut self.wl_storage).unwrap(); // Initialize governance parameters let gov_params = genesis.get_gov_params(); - gov_params.init_storage(&mut self.wl_storage)?; + gov_params.init_storage(&mut self.wl_storage).unwrap(); // configure the Ethereum bridge if the configuration is set. if let Some(config) = genesis.get_eth_bridge_params() { @@ -169,39 +286,49 @@ where .expect("Must be able to copy PoS genesis validator sets"); ibc::init_genesis_storage(&mut self.wl_storage); - - // Set the initial validator set - response.validators = self - .get_abci_validator_updates(true, |pk, power| { - let pub_key: crate::facade::tendermint::PublicKey = pk.into(); - let power = - crate::facade::tendermint::vote::Power::try_from(power) - .unwrap(); - validator::Update { pub_key, power } - }) - .expect("Must be able to set genesis validator set"); - debug_assert!(!response.validators.is_empty()); - Ok(response) + ControlFlow::Continue(()) } /// Look-up WASM code of a genesis VP by its name fn lookup_vp( - &self, + &mut self, name: &str, genesis: &genesis::chain::Finalized, vp_cache: &mut HashMap>, - ) -> Vec { - let config = - genesis.vps.wasm.get(name).unwrap_or_else(|| { - panic!("Missing validity predicate for {name}") - }); - let vp_filename = &config.filename; - vp_cache.get_or_insert_with(vp_filename.clone(), || { - wasm_loader::read_wasm(&self.wasm_dir, vp_filename).unwrap() - }) + ) -> ControlFlow<(), Vec> { + use std::collections::hash_map::Entry; + let Some(vp_filename) = + self.validate( + genesis + .vps + .wasm + .get(name) + .map(|conf| conf.filename.clone()) + .ok_or_else(|| { + Panic::MissingVpWasmConfig(name.to_string()) + })) + .or_placeholder(None)? else { + return self.proceed_with(vec![]); + }; + let code = match vp_cache.entry(vp_filename.clone()) { + Entry::Occupied(o) => o.get().clone(), + Entry::Vacant(v) => { + let code = self + .validate( + wasm_loader::read_wasm(&self.wasm_dir, &vp_filename) + .map_err(|e| { + Panic::ReadingWasm(vp_filename, e.to_string()) + }), + ) + .or_placeholder(Some(vec![]))? + .unwrap(); + v.insert(code).clone() + } + }; + self.proceed_with(code) } - fn store_wasms(&mut self, params: &Parameters) -> Result<()> { + fn store_wasms(&mut self, params: &Parameters) -> ControlFlow<()> { let Parameters { tx_whitelist, vp_whitelist, @@ -209,31 +336,64 @@ where .. } = params; let mut is_implicit_vp_stored = false; - let checksums = wasm_loader::Checksums::read_checksums(&self.wasm_dir); + + let Some(checksums) = self.validate( + wasm_loader::Checksums::read_checksums(&self.wasm_dir) + .map_err(|_| Panic::ChecksumsFile) + ).or_placeholder(None)? else { + return self.proceed_with(()); + }; + for (name, full_name) in checksums.0.iter() { - let code = wasm_loader::read_wasm(&self.wasm_dir, name) - .map_err(Error::ReadingWasm)?; + let code = self + .validate( + wasm_loader::read_wasm(&self.wasm_dir, name) + .map_err(Error::ReadingWasm), + ) + .or_placeholder(Some(vec![]))? + .unwrap(); + let code_hash = CodeHash::sha256(&code); - let code_len = u64::try_from(code.len()) - .map_err(|e| Error::LoadingWasm(e.to_string()))?; + let code_len = self + .validate( + u64::try_from(code.len()) + .map_err(|e| Error::LoadingWasm(e.to_string())), + ) + .or_placeholder(Some(1))? + .unwrap(); let elements = full_name.split('.').collect::>(); - let checksum = elements.get(1).ok_or_else(|| { - Error::LoadingWasm(format!("invalid full name: {}", full_name)) - })?; - assert_eq!( - code_hash.to_string(), - checksum.to_uppercase(), - "Invalid wasm code sha256 hash for {}", - name - ); + let checksum = self + .validate( + elements + .get(1) + .map(|c| c.to_string().to_uppercase()) + .ok_or_else(|| { + Error::LoadingWasm(format!( + "invalid full name: {}", + full_name + )) + }), + ) + .or_placeholder(Some(code_hash.to_string()))? + .unwrap(); + + self.validate(if checksum == code_hash.to_string() { + Ok(()) + } else { + Err(Panic::Checksum(name.to_string())) + }) + .or_placeholder(None)?; if (tx_whitelist.is_empty() && vp_whitelist.is_empty()) || tx_whitelist.contains(&code_hash.to_string().to_lowercase()) || vp_whitelist.contains(&code_hash.to_string().to_lowercase()) { - validate_untrusted_wasm(&code) - .map_err(|e| Error::LoadingWasm(e.to_string()))?; + self.validate( + validate_untrusted_wasm(&code) + .map_err(|e| Error::LoadingWasm(e.to_string())), + ) + .or_placeholder(None)?; #[cfg(not(test))] if name.starts_with("tx_") { @@ -247,25 +407,32 @@ where let hash_key = Key::wasm_hash(name); let code_name_key = Key::wasm_code_name(name.to_owned()); - self.wl_storage.write_bytes(&code_key, code)?; - self.wl_storage.write(&code_len_key, code_len)?; - self.wl_storage.write_bytes(&hash_key, code_hash)?; - if &code_hash == implicit_vp_code_hash { + self.wl_storage.write_bytes(&code_key, code).unwrap(); + self.wl_storage.write(&code_len_key, code_len).unwrap(); + self.wl_storage.write_bytes(&hash_key, code_hash).unwrap(); + if &Some(code_hash) == implicit_vp_code_hash { is_implicit_vp_stored = true; } - self.wl_storage.write_bytes(&code_name_key, code_hash)?; + self.wl_storage + .write_bytes(&code_name_key, code_hash) + .unwrap(); } else { tracing::warn!("The wasm {name} isn't whitelisted."); + self.warn(Warning::WhitelistedWasm(name.to_string())); } } // check if implicit_vp wasm is stored - assert!( - is_implicit_vp_stored, - "No VP found matching the expected implicit VP sha256 hash: {}", - implicit_vp_code_hash - ); - Ok(()) + if !is_implicit_vp_stored { + self.register_err(Panic::MissingImplicitVP( + match implicit_vp_code_hash { + None => "None".to_string(), + Some(h) => h.to_string(), + }, + )); + } + + self.proceed_with(()) } /// Init genesis token accounts @@ -294,16 +461,24 @@ where } /// Init genesis token balances - fn init_token_balances(&mut self, genesis: &genesis::chain::Finalized) { + fn init_token_balances( + &mut self, + genesis: &genesis::chain::Finalized, + ) -> ControlFlow<()> { for (token_alias, TokenBalances(balances)) in &genesis.balances.token { tracing::debug!("Initializing token balances {token_alias}"); - let token_address = &genesis + let Some(token_address) = self.validate(genesis .tokens .token .get(token_alias) - .expect("Token with configured balance not found in genesis.") - .address; + .ok_or_else(|| Panic::MissingTokenConfig(token_alias.to_string())) + .map(|conf| &conf.address) + ) + .or_placeholder(None)? else { + continue + }; + let mut total_token_balance = token::Amount::zero(); for (owner, balance) in balances { if let genesis::GenesisAddress::PublicKey(pk) = owner { @@ -325,10 +500,10 @@ where &mut self.wl_storage, token_address, &owner.address(), - balance.amount, + balance.amount(), ) .expect("Couldn't credit initial balance"); - total_token_balance += balance.amount; + total_token_balance += balance.amount(); } // Write the total amount of tokens for the ratio self.wl_storage @@ -338,6 +513,7 @@ where ) .unwrap(); } + self.proceed_with(()) } /// Apply genesis txs to initialize established accounts @@ -345,7 +521,7 @@ where &mut self, genesis: &genesis::chain::Finalized, vp_cache: &mut HashMap>, - ) { + ) -> ControlFlow<()> { if let Some(txs) = genesis.transactions.established_account.as_ref() { for FinalizedEstablishedAccountTx { address, @@ -361,7 +537,7 @@ where "Applying genesis tx to init an established account \ {address}" ); - let vp_code = self.lookup_vp(vp, genesis, vp_cache); + let vp_code = self.lookup_vp(vp, genesis, vp_cache)?; let code_hash = CodeHash::sha256(&vp_code); self.wl_storage .write_bytes(&Key::validity_predicate(address), code_hash) @@ -378,6 +554,7 @@ where .unwrap(); } } + self.proceed_with(()) } /// Apply genesis txs to initialize validator accounts @@ -387,7 +564,7 @@ where vp_cache: &mut HashMap>, params: &PosParams, current_epoch: namada::types::storage::Epoch, - ) { + ) -> ControlFlow<()> { if let Some(txs) = genesis.transactions.validator_account.as_ref() { for FinalizedValidatorAccountTx { tx: @@ -417,7 +594,7 @@ where "Applying genesis tx to init a validator account {address}" ); - let vp_code = self.lookup_vp(vp, genesis, vp_cache); + let vp_code = self.lookup_vp(vp, genesis, vp_cache)?; let code_hash = CodeHash::sha256(&vp_code); self.wl_storage .write_bytes(&Key::validity_predicate(address), code_hash) @@ -449,10 +626,15 @@ where "Genesis init genesis validator tx for {address} \ failed with {err}. Skipping." ); + self.warn(Warning::Validator( + address.to_string(), + err.to_string(), + )); continue; } } } + self.proceed_with(()) } /// Apply genesis txs to transfer tokens @@ -476,13 +658,19 @@ where &mut self.wl_storage, Some(&source.address()), validator, - amount.amount, + amount.amount(), current_epoch, Some(0), ) { tracing::warn!( "Genesis bond tx failed with: {err}. Skipping." ); + self.warn(Warning::FailedBond( + source.to_string(), + validator.to_string(), + *amount, + err.to_string(), + )); continue; }; } @@ -490,37 +678,284 @@ where } } -trait HashMapExt +/// A helper struct to accumulate errors in genesis files while +/// attempting to initialize the ledger +#[derive(Debug)] +pub struct InitChainValidation<'shell, D, H> where - K: Eq + Hash, - V: Clone, + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, { - /// Inserts a value computed from `f` into the map if the given `key` is not - /// present, then returns a clone of the value from the map. - fn get_or_insert_with(&mut self, key: K, f: impl FnOnce() -> V) -> V; + /// Errors that can be encountered while initializing chain + /// and are propagated up the stack in normal flow. Ultimately + /// these are reported back to Comet BFT + errors: Vec, + /// Errors that cause `init_chain` to panic in normal flow but are not + /// `expect` calls, so they could reasonably occur. These are demoted + /// to errors while validating correctness of genesis files pre-network + /// launch. + panics: Vec, + /// Events that should not occur but would not prevent the chain from + /// being successfully initialized. However, we don't reasonably expect + /// to get any as these are checked as part of validating genesis + /// templates. + warnings: Vec, + dry_run: bool, + shell: &'shell mut Shell, } -impl HashMapExt for HashMap +impl<'shell, D, H> std::ops::Deref for InitChainValidation<'shell, D, H> where - K: Eq + Hash, - V: Clone, + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, { - fn get_or_insert_with(&mut self, key: K, f: impl FnOnce() -> V) -> V { - use std::collections::hash_map::Entry; - match self.entry(key) { - Entry::Occupied(o) => o.get().clone(), - Entry::Vacant(v) => v.insert(f()).clone(), + type Target = Shell; + + fn deref(&self) -> &Self::Target { + self.shell + } +} + +impl<'shell, D, H> std::ops::DerefMut for InitChainValidation<'shell, D, H> +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + self.shell + } +} + +impl<'shell, D, H> InitChainValidation<'shell, D, H> +where + D: DB + for<'iter> DBIter<'iter> + Sync + 'static, + H: StorageHasher + Sync + 'static, +{ + pub fn new( + shell: &'shell mut Shell, + dry_run: bool, + ) -> InitChainValidation { + Self { + shell, + errors: vec![], + panics: vec![], + warnings: vec![], + dry_run, + } + } + + pub fn run_validation( + &mut self, + chain_id: String, + genesis: config::genesis::chain::Finalized, + ) { + use crate::facade::tendermint::block::Size; + use crate::facade::tendermint::consensus::params::ValidatorParams; + use crate::facade::tendermint::consensus::Params; + use crate::facade::tendermint::evidence::{ + Duration, Params as Evidence, + }; + use crate::facade::tendermint::time::Time; + + // craft a request to initialize the chain + let init = request::InitChain { + time: Time::now(), + chain_id, + consensus_params: Params { + block: Size { + max_bytes: 0, + max_gas: 0, + time_iota_ms: 0, + }, + evidence: Evidence { + max_age_num_blocks: 0, + max_age_duration: Duration(Default::default()), + max_bytes: 0, + }, + validator: ValidatorParams { + pub_key_types: vec![], + }, + version: None, + abci: Default::default(), + }, + validators: vec![], + app_state_bytes: Default::default(), + initial_height: 0u32.into(), + }; + self.run( + init, + genesis, + #[cfg(any(test, feature = "testing"))] + 1, + ); + } + + /// Print out a report of errors encountered while dry-running + /// genesis files + pub fn report(&self) { + use color_eyre::owo_colors::{OwoColorize, Style}; + let separator: String = ["="; 60].into_iter().collect(); + println!( + "\n\n{}\n{}\n{}\n\n", + separator, + "Report".bold().underline(), + separator + ); + if self.errors.is_empty() + && self.panics.is_empty() + && self.warnings.is_empty() + { + println!( + "{}\n", + "Genesis files were dry-run successfully" + .bright_green() + .underline() + ); + return; + } + + if !self.warnings.is_empty() { + println!("{}\n\n", "Warnings".yellow().underline()); + let warnings = Style::new().yellow(); + for warning in &self.warnings { + println!("{}\n", warning.to_string().style(warnings)); + } + } + + if !self.errors.is_empty() { + println!("{}\n\n", "Errors".magenta().underline()); + let errors = Style::new().magenta(); + for error in &self.errors { + println!("{}\n", error.to_string().style(errors)); + } + } + + if !self.panics.is_empty() { + println!("{}\n\n", "Panics".bright_red().underline()); + let panics = Style::new().bright_red(); + for panic in &self.panics { + println!("{}\n", panic.to_string().style(panics)); + } + } + } + + /// Add a warning + fn warn(&mut self, warning: Warning) { + self.warnings.push(warning); + } + + /// Categorize an error as normal or something that would panic. + fn register_err>(&mut self, err: E) { + match err.into() { + ErrorType::Runtime(e) => self.errors.push(e), + ErrorType::DryRun(e) => self.panics.push(e), + } + } + + /// Categorize the error encountered and return a handle to allow + /// the code to specify how to proceed. + fn validate(&mut self, res: std::result::Result) -> Policy + where + E: Into, + { + match res { + Ok(data) => Policy { + result: Some(data), + dry_run: self.dry_run, + }, + Err(e) => { + self.register_err(e); + Policy { + result: None, + dry_run: self.dry_run, + } + } + } + } + + /// Check if any errors have been encountered + fn is_ok(&self) -> bool { + self.errors.is_empty() && self.panics.is_empty() + } + + /// This should only be called after checking that `is_ok` returned false. + fn error_out(mut self) -> Result<()> { + if self.is_ok() { + return Ok(()); + } + if !self.panics.is_empty() { + panic!( + "Namada ledger failed to initialize due to: {}", + self.panics.remove(0) + ); + } else { + Err(self.errors.remove(0)) + } + } + + /// Used to indicate to the functions up the stack to begin panicking + /// if not dry running a genesis file + fn proceed_with(&self, value: T) -> ControlFlow<(), T> { + if self.dry_run || self.is_ok() { + ControlFlow::Continue(value) + } else { + ControlFlow::Break(()) } } } -#[cfg(test)] +enum ErrorType { + Runtime(Error), + DryRun(Panic), +} + +impl From for ErrorType { + fn from(err: Error) -> Self { + Self::Runtime(err) + } +} + +impl From for ErrorType { + fn from(err: Panic) -> Self { + Self::DryRun(err) + } +} + +/// Changes the control flow of `init_chain` depending on whether +/// or not it is a dry-run. If so, errors / panics are accumulated to make +/// a report rather than immediately exiting. +struct Policy { + result: Option, + dry_run: bool, +} + +impl Policy { + /// A default value to return if an error / panic is encountered + /// during a dry-run. This allows `init_chain` to continue. + fn or_placeholder(self, value: Option) -> ControlFlow<(), Option> { + if let Some(data) = self.result { + ControlFlow::Continue(Some(data)) + } else if self.dry_run { + ControlFlow::Continue(value) + } else { + ControlFlow::Break(()) + } + } +} + +#[cfg(all(test, not(feature = "integration")))] mod test { use std::collections::BTreeMap; + use std::str::FromStr; + use namada::core::types::string_encoding::StringEncoded; use namada::ledger::storage::DBIter; + use namada_sdk::wallet::alias::Alias; + use super::*; + use crate::config::genesis::{transactions, GenesisAddress}; use crate::node::ledger::shell::test_utils::{self, TestShell}; + use crate::wallet::defaults; /// Test that the init-chain handler never commits changes directly to the /// DB. @@ -553,4 +988,218 @@ mod test { storage_state.iter(), ); } + + /// Tests validation works properly on `lookup_vp`. + /// This function can fail if + /// *the wasm requested has no config in the genesis files + /// * cannot be read from disk. + #[test] + fn test_dry_run_lookup_vp() { + let (mut shell, _x, _y, _z) = TestShell::new_at_height(0); + shell.wasm_dir = PathBuf::new(); + let mut genesis = genesis::make_dev_genesis(1, &shell.base_dir); + let mut initializer = InitChainValidation::new(&mut shell, true); + + let mut vp_cache = HashMap::new(); + let code = initializer.lookup_vp("vp_user", &genesis, &mut vp_cache); + assert_eq!(code, ControlFlow::Continue(vec![])); + assert_eq!( + *vp_cache.get("vp_user.wasm").expect("Test failed"), + Vec::::new() + ); + let [Panic::ReadingWasm(_, _)]: [Panic; 1] = initializer.panics + .clone() + .try_into() + .expect("Test failed") else { + panic!("Test failed") + }; + + initializer.panics.clear(); + genesis.vps.wasm.remove("vp_user").expect("Test failed"); + let code = initializer.lookup_vp("vp_user", &genesis, &mut vp_cache); + assert_eq!(code, ControlFlow::Continue(vec![])); + let [Panic::MissingVpWasmConfig(_)]: [Panic; 1] = initializer.panics + .clone() + .try_into() + .expect("Test failed") else { + panic!("Test failed") + }; + } + + /// Test validation of `store_wasms`. + /// This can fail if + /// * The checksums file cannot be found. + /// * A wasm file in the checksums file cannot be read from disk + /// * A checksum entry is invalid + /// * A wasm's code hash does not match it's checksum entry + /// * the wasm isn't whitelisted + /// * no vp_implicit wasm is stored + #[test] + fn test_dry_run_store_wasms() { + let (mut shell, _x, _y, _z) = TestShell::new_at_height(0); + let test_dir = tempfile::tempdir().unwrap(); + shell.wasm_dir = test_dir.path().into(); + + let genesis = genesis::make_dev_genesis(1, &shell.base_dir); + let mut initializer = InitChainValidation::new(&mut shell, true); + + let res = initializer + .store_wasms(&genesis.get_chain_parameters(PathBuf::new())); + assert_eq!(res, ControlFlow::Continue(())); + let expected = vec![Panic::ChecksumsFile]; + assert_eq!(expected, initializer.panics); + initializer.panics.clear(); + + let checksums_file = test_dir.path().join("checksums.json"); + std::fs::write( + &checksums_file, + r#"{ + "tx_get_rich.wasm": "tx_get_rich.moneymoneymoney" + }"#, + ) + .expect("Test failed"); + let res = initializer + .store_wasms(&genesis.get_chain_parameters(test_dir.path())); + assert_eq!(res, ControlFlow::Continue(())); + let errors = initializer.errors.iter().collect::>(); + let [ + Error::ReadingWasm(_), + Error::LoadingWasm(_), + ]: [&Error; 2] = errors.try_into().expect("Test failed") else { + panic!("Test failed"); + }; + let expected_panics = vec![ + Panic::Checksum("tx_get_rich.wasm".into()), + Panic::MissingImplicitVP("None".into()), + ]; + assert_eq!(initializer.panics, expected_panics); + + initializer.panics.clear(); + initializer.errors.clear(); + + std::fs::write( + checksums_file, + r#"{ + "tx_stuff.wasm": "tx_stuff" + }"#, + ) + .expect("Test failed"); + let res = initializer + .store_wasms(&genesis.get_chain_parameters(test_dir.path())); + assert_eq!(res, ControlFlow::Continue(())); + let errors = initializer.errors.iter().collect::>(); + let [ + Error::ReadingWasm(_), + Error::LoadingWasm(_), + Error::LoadingWasm(_), + ]: [&Error; 3] = errors.try_into().expect("Test failed") else { + panic!("Test failed"); + }; + let expected_panics = vec![Panic::MissingImplicitVP("None".into())]; + assert_eq!(initializer.panics, expected_panics); + } + + /// Test validation of `init_token_balance`. + /// This can fail if a token alias with no + /// corresponding config is encountered. + #[test] + fn test_dry_run_init_token_balance() { + let (mut shell, _x, _y, _z) = TestShell::new_at_height(0); + shell.wasm_dir = PathBuf::new(); + let mut genesis = genesis::make_dev_genesis(1, &shell.base_dir); + let mut initializer = InitChainValidation::new(&mut shell, true); + let token_alias = Alias::from_str("apfel").unwrap(); + genesis + .tokens + .token + .remove(&token_alias) + .expect("Test failed"); + let res = initializer.init_token_balances(&genesis); + assert_eq!(res, ControlFlow::Continue(())); + let [Panic::MissingTokenConfig(_)]: [Panic; 1] = initializer.panics + .clone() + .try_into() + .expect("Test failed") else { + panic!("Test failed") + }; + } + + /// Test validation of `apply_genesis_txs_bonds` + /// This can fail for + /// * insufficient funds + /// * bonding to a non-validator + #[test] + fn test_dry_run_genesis_bonds() { + let (mut shell, _x, _y, _z) = TestShell::new_at_height(0); + shell.wasm_dir = PathBuf::new(); + let mut genesis = genesis::make_dev_genesis(1, &shell.base_dir); + let mut initializer = InitChainValidation::new(&mut shell, true); + let default_addresses: HashMap = + defaults::addresses().into_iter().collect(); + let albert_address = if let Some(Address::Established(albert)) = + default_addresses.get(&Alias::from_str("albert").unwrap()) + { + albert.clone() + } else { + panic!("Test failed") + }; + // Initialize governance parameters + let gov_params = genesis.get_gov_params(); + gov_params + .init_storage(&mut initializer.wl_storage) + .unwrap(); + // PoS system depends on epoch being initialized + let pos_params = genesis.get_pos_params(); + let (current_epoch, _gas) = + initializer.wl_storage.storage.get_current_epoch(); + pos::namada_proof_of_stake::init_genesis( + &mut initializer.wl_storage, + &pos_params, + current_epoch, + ) + .expect("Must be able to initialize PoS genesis storage"); + + genesis.transactions.bond = Some(vec![transactions::BondTx { + source: GenesisAddress::EstablishedAddress(albert_address.clone()), + validator: defaults::albert_address(), + amount: token::DenominatedAmount::new( + token::Amount::from_uint(1, 6).unwrap(), + 6.into(), + ), + }]); + + // bonds should fail since no balances have been initialized + let albert_address_str = StringEncoded::new(albert_address).to_string(); + initializer.apply_genesis_txs_bonds(&genesis); + let expected = vec![Warning::FailedBond( + albert_address_str.clone(), + albert_address_str.clone(), + token::DenominatedAmount::new( + token::Amount::from_uint(1, 6).unwrap(), + 6.into(), + ), + "Insufficient source balance".to_string(), + )]; + assert_eq!(expected, initializer.warnings); + initializer.warnings.clear(); + + // initialize balances + let res = initializer.init_token_balances(&genesis); + assert_eq!(res, ControlFlow::Continue(())); + + initializer.apply_genesis_txs_bonds(&genesis); + let expected = vec![Warning::FailedBond( + albert_address_str.clone(), + albert_address_str.clone(), + token::DenominatedAmount::new( + token::Amount::from_uint(1, 6).unwrap(), + 6.into(), + ), + format!( + "The given address {} is not a validator address", + albert_address_str + ), + )]; + assert_eq!(expected, initializer.warnings); + } } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 88797d8486..e8d5da8b83 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -9,6 +9,7 @@ pub mod block_alloc; mod finalize_block; mod governance; mod init_chain; +pub use init_chain::InitChainValidation; pub mod prepare_proposal; pub mod process_proposal; pub(super) mod queries; @@ -31,6 +32,7 @@ use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use namada::core::hints; use namada::core::ledger::eth_bridge; +pub use namada::core::types::transaction::ResultCode; use namada::ledger::events::log::EventLog; use namada::ledger::events::Event; use namada::ledger::gas::{Gas, TxGasMeter}; @@ -51,7 +53,9 @@ use namada::ledger::storage::{ use namada::ledger::storage_api::tx::validate_tx_bytes; use namada::ledger::storage_api::{self, StorageRead}; use namada::ledger::{parameters, pos, protocol}; -use namada::proof_of_stake::{self, process_slashes, read_pos_params, slash}; +use namada::proof_of_stake::slashing::{process_slashes, slash}; +use namada::proof_of_stake::storage::read_pos_params; +use namada::proof_of_stake::{self}; use namada::proto::{self, Section, Tx}; use namada::types::address::Address; use namada::types::chain::ChainId; @@ -67,8 +71,6 @@ use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::{WasmCacheAccess, WasmCacheRwAccess}; use namada_sdk::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use namada_sdk::tendermint::AppHash; -use num_derive::{FromPrimitive, ToPrimitive}; -use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; use tokio::sync::mpsc::{Receiver, UnboundedSender}; @@ -136,63 +138,6 @@ impl From for TxResult { } } -/// The different error codes that the ledger may -/// send back to a client indicating the status -/// of their submitted tx -#[derive(Debug, Copy, Clone, FromPrimitive, ToPrimitive, PartialEq, Eq)] -pub enum ErrorCodes { - Ok = 0, - WasmRuntimeError = 1, - InvalidTx = 2, - InvalidSig = 3, - InvalidOrder = 4, - ExtraTxs = 5, - Undecryptable = 6, - AllocationError = 7, - ReplayTx = 8, - InvalidChainId = 9, - ExpiredTx = 10, - TxGasLimit = 11, - FeeError = 12, - InvalidVoteExtension = 13, - TooLarge = 14, -} - -impl ErrorCodes { - /// Checks if the given [`ErrorCodes`] value is a protocol level error, - /// that can be recovered from at the finalize block stage. - pub const fn is_recoverable(&self) -> bool { - use ErrorCodes::*; - // NOTE: pattern match on all `ErrorCodes` variants, in order - // to catch potential bugs when adding new codes - match self { - Ok | WasmRuntimeError => true, - InvalidTx | InvalidSig | InvalidOrder | ExtraTxs - | Undecryptable | AllocationError | ReplayTx | InvalidChainId - | ExpiredTx | TxGasLimit | FeeError | InvalidVoteExtension - | TooLarge => false, - } - } -} - -impl From for u32 { - fn from(code: ErrorCodes) -> u32 { - code.to_u32().unwrap() - } -} - -impl From for String { - fn from(code: ErrorCodes) -> String { - u32::from(code).to_string() - } -} - -impl From for crate::facade::tendermint::abci::Code { - fn from(value: ErrorCodes) -> Self { - Self::from(u32::from(value)) - } -} - pub type Result = std::result::Result; pub fn reset(config: config::Ledger) -> Result<()> { @@ -396,7 +341,7 @@ where byzantine_validators: Vec, /// Path to the base directory with DB data and configs #[allow(dead_code)] - base_dir: PathBuf, + pub(crate) base_dir: PathBuf, /// Path to the WASM directory for files used in the genesis block. pub(super) wasm_dir: PathBuf, /// Information about the running shell instance @@ -749,7 +694,7 @@ where let validator_raw_hash = tm_raw_hash_to_string(evidence.validator.address); let validator = - match proof_of_stake::find_validator_by_raw_hash( + match proof_of_stake::storage::find_validator_by_raw_hash( &self.wl_storage, &validator_raw_hash, ) @@ -1122,7 +1067,7 @@ where if !validate_tx_bytes(&self.wl_storage, tx_bytes.len()) .expect("Failed to get max tx bytes param from storage") { - response.code = ErrorCodes::TooLarge.into(); + response.code = ResultCode::TooLarge.into(); response.log = format!("{INVALID_MSG}: Tx too large"); return response; } @@ -1131,7 +1076,7 @@ where let tx = match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { Ok(t) => t, Err(msg) => { - response.code = ErrorCodes::InvalidTx.into(); + response.code = ResultCode::InvalidTx.into(); response.log = format!("{INVALID_MSG}: {msg}"); return response; } @@ -1139,7 +1084,7 @@ where // Tx chain id if tx.header.chain_id != self.chain_id { - response.code = ErrorCodes::InvalidChainId.into(); + response.code = ResultCode::InvalidChainId.into(); response.log = format!( "{INVALID_MSG}: Tx carries a wrong chain id: expected {}, \ found {}", @@ -1153,7 +1098,7 @@ where let last_block_timestamp = self.get_block_timestamp(None); if last_block_timestamp > exp { - response.code = ErrorCodes::ExpiredTx.into(); + response.code = ResultCode::ExpiredTx.into(); response.log = format!( "{INVALID_MSG}: Tx expired at {exp:#?}, last committed \ block time: {last_block_timestamp:#?}", @@ -1166,7 +1111,7 @@ where let tx_type = match tx.validate_tx() { Ok(_) => tx.header(), Err(msg) => { - response.code = ErrorCodes::InvalidSig.into(); + response.code = ResultCode::InvalidSig.into(); response.log = format!("{INVALID_MSG}: {msg}"); return response; } @@ -1179,7 +1124,7 @@ where match $result { Ok(ext) => ext, Err(err) => { - $rsp.code = ErrorCodes::InvalidVoteExtension.into(); + $rsp.code = ResultCode::InvalidVoteExtension.into(); $rsp.log = format!( "{INVALID_MSG}: Invalid {} vote extension: {err}", $kind, @@ -1204,7 +1149,7 @@ where self.wl_storage.storage.get_last_block_height(), ) { - response.code = ErrorCodes::InvalidVoteExtension.into(); + response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( "{INVALID_MSG}: Invalid Ethereum events vote \ extension: {err}", @@ -1227,7 +1172,7 @@ where self.wl_storage.storage.get_last_block_height(), ) { - response.code = ErrorCodes::InvalidVoteExtension.into(); + response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( "{INVALID_MSG}: Invalid Bridge pool roots vote \ extension: {err}", @@ -1258,7 +1203,7 @@ where self.wl_storage.storage.last_epoch, ) { - response.code = ErrorCodes::InvalidVoteExtension.into(); + response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( "{INVALID_MSG}: Invalid validator set update vote \ extension: {err}", @@ -1271,7 +1216,7 @@ where } } _ => { - response.code = ErrorCodes::InvalidTx.into(); + response.code = ResultCode::InvalidTx.into(); response.log = format!( "{INVALID_MSG}: The given protocol tx cannot be added \ to the mempool" @@ -1282,7 +1227,7 @@ where // Tx gas limit let mut gas_meter = TxGasMeter::new(wrapper.gas_limit); if gas_meter.add_wrapper_gas(tx_bytes).is_err() { - response.code = ErrorCodes::TxGasLimit.into(); + response.code = ResultCode::TxGasLimit.into(); response.log = "{INVALID_MSG}: Wrapper transactions \ exceeds its gas limit" .to_string(); @@ -1297,7 +1242,7 @@ where .unwrap(), ); if gas_meter.tx_gas_limit > block_gas_limit { - response.code = ErrorCodes::AllocationError.into(); + response.code = ResultCode::AllocationError.into(); response.log = "{INVALID_MSG}: Wrapper transaction \ exceeds the maximum block gas limit" .to_string(); @@ -1312,7 +1257,7 @@ where .has_replay_protection_entry(&tx.raw_header_hash()) .expect("Error while checking inner tx hash key in storage") { - response.code = ErrorCodes::ReplayTx.into(); + response.code = ResultCode::ReplayTx.into(); response.log = format!( "{INVALID_MSG}: Inner transaction hash {} already in \ storage, replay attempt", @@ -1332,7 +1277,7 @@ where "Error while checking wrapper tx hash key in storage", ) { - response.code = ErrorCodes::ReplayTx.into(); + response.code = ResultCode::ReplayTx.into(); response.log = format!( "{INVALID_MSG}: Wrapper transaction hash {} already \ in storage, replay attempt", @@ -1351,27 +1296,27 @@ where None, false, ) { - response.code = ErrorCodes::FeeError.into(); + response.code = ResultCode::FeeError.into(); response.log = format!("{INVALID_MSG}: {e}"); return response; } } TxType::Raw => { - response.code = ErrorCodes::InvalidTx.into(); + response.code = ResultCode::InvalidTx.into(); response.log = format!( "{INVALID_MSG}: Raw transactions cannot be accepted into \ the mempool" ); } TxType::Decrypted(_) => { - response.code = ErrorCodes::InvalidTx.into(); + response.code = ResultCode::InvalidTx.into(); response.log = format!( "{INVALID_MSG}: Decrypted txs cannot be sent by clients" ); } } - if response.code == ErrorCodes::Ok.into() { + if response.code == ResultCode::Ok.into() { response.log = VALID_MSG.into(); } response @@ -1432,15 +1377,35 @@ where } }; - if wrapper.fee.amount_per_gas_unit < minimum_gas_price { - // The fees do not match the minimum required - return Err(Error::TxApply(protocol::Error::FeeError(format!( - "Fee amount {:?} do not match the minimum required amount \ - {:?} for token {}", - wrapper.fee.amount_per_gas_unit, - minimum_gas_price, - wrapper.fee.token - )))); + match wrapper + .fee + .amount_per_gas_unit + .to_amount(&wrapper.fee.token, &self.wl_storage) + { + Ok(amount_per_gas_unit) + if amount_per_gas_unit < minimum_gas_price => + { + // The fees do not match the minimum required + return Err(Error::TxApply(protocol::Error::FeeError( + format!( + "Fee amount {:?} do not match the minimum required \ + amount {:?} for token {}", + wrapper.fee.amount_per_gas_unit, + minimum_gas_price, + wrapper.fee.token + ), + ))); + } + Ok(_) => {} + Err(err) => { + return Err(Error::TxApply(protocol::Error::FeeError( + format!( + "The precision of the fee amount {:?} is higher than \ + the denomination for token {}: {}", + wrapper.fee.amount_per_gas_unit, wrapper.fee.token, err, + ), + ))); + } } if let Some(transaction) = masp_transaction { @@ -1536,13 +1501,13 @@ where let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); let pos_params = - namada_proof_of_stake::read_pos_params(&self.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&self.wl_storage) .expect("Could not find the PoS parameters"); let validator_set_update_fn = if is_genesis { namada_proof_of_stake::genesis_validator_set_tendermint } else { - namada_proof_of_stake::validator_set_update_tendermint + namada_proof_of_stake::validator_set_update::validator_set_update_tendermint }; validator_set_update_fn( @@ -1589,7 +1554,7 @@ mod test_utils { use namada::ledger::storage::{LastBlock, Sha256Hasher}; use namada::ledger::storage_api::StorageWrite; use namada::proof_of_stake::parameters::PosParams; - use namada::proof_of_stake::validator_consensus_key_handle; + use namada::proof_of_stake::storage::validator_consensus_key_handle; use namada::proto::{Code, Data}; use namada::tendermint::abci::types::VoteInfo; use namada::types::address; @@ -1612,6 +1577,7 @@ mod test_utils { use crate::facade::tendermint_proto::v0_37::abci::{ RequestPrepareProposal, RequestProcessProposal, }; + use crate::node::ledger::shell::token::DenominatedAmount; use crate::node::ledger::shims::abcipp_shim_types; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, @@ -2118,7 +2084,7 @@ mod test_utils { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: native_token, }, keypair.ref_to(), @@ -2157,8 +2123,6 @@ mod test_utils { implicit_vp_code_hash: Default::default(), epochs_per_year: 365, max_signatures_per_transaction: 10, - pos_gain_p: Default::default(), - pos_gain_d: Default::default(), staked_ratio: Default::default(), pos_inflation_amount: Default::default(), fee_unshielding_gas_limit: 0, @@ -2294,6 +2258,7 @@ mod test_utils { #[cfg(test)] mod shell_tests { use namada::core::ledger::replay_protection; + use namada::ledger::storage_api::token::read_denom; use namada::proto::{ Code, Data, Section, SignableEthMessage, Signature, Signed, Tx, }; @@ -2308,6 +2273,7 @@ mod shell_tests { use super::*; use crate::node::ledger::shell::test_utils; + use crate::node::ledger::shell::token::DenominatedAmount; use crate::wallet; const GAS_LIMIT_MULTIPLIER: u64 = 100_000; @@ -2433,7 +2399,7 @@ mod shell_tests { .to_bytes(); let rsp = shell.mempool_validate(&tx, Default::default()); assert!( - rsp.code != ErrorCodes::Ok.into(), + rsp.code != ResultCode::Ok.into(), "Validation should have failed" ); } @@ -2463,7 +2429,7 @@ mod shell_tests { .to_bytes(); let rsp = shell.mempool_validate(&tx, Default::default()); assert!( - rsp.code == ErrorCodes::Ok.into(), + rsp.code == ResultCode::Ok.into(), "Validation should have passed" ); } @@ -2517,7 +2483,7 @@ mod shell_tests { for (tx_bytes, err_msg) in txs_to_validate { let rsp = shell.mempool_validate(&tx_bytes, Default::default()); assert!( - rsp.code == ErrorCodes::InvalidVoteExtension.into(), + rsp.code == ResultCode::InvalidVoteExtension.into(), "{err_msg}" ); } @@ -2599,7 +2565,7 @@ mod shell_tests { } .to_bytes(); let rsp = shell.mempool_validate(&tx, Default::default()); - assert_eq!(rsp.code, ErrorCodes::InvalidVoteExtension.into()); + assert_eq!(rsp.code, ResultCode::InvalidVoteExtension.into()); } /// Mempool validation must reject unsigned wrappers @@ -2612,8 +2578,10 @@ mod shell_tests { let mut unsigned_wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: token::Amount::from_uint(100, 0) - .expect("This can't fail"), + amount_per_gas_unit: DenominatedAmount::native( + token::Amount::from_uint(100, 0) + .expect("This can't fail"), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2631,12 +2599,12 @@ mod shell_tests { unsigned_wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::InvalidSig.into()); + assert_eq!(result.code, ResultCode::InvalidSig.into()); result = shell.mempool_validate( unsigned_wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, ErrorCodes::InvalidSig.into()); + assert_eq!(result.code, ResultCode::InvalidSig.into()); } /// Mempool validation must reject wrappers with an invalid signature @@ -2649,8 +2617,10 @@ mod shell_tests { let mut invalid_wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: token::Amount::from_uint(100, 0) - .expect("This can't fail"), + amount_per_gas_unit: DenominatedAmount::native( + token::Amount::from_uint(100, 0) + .expect("This can't fail"), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2672,19 +2642,20 @@ mod shell_tests { // we mount a malleability attack to try and remove the fee let mut new_wrapper = invalid_wrapper.header().wrapper().expect("Test failed"); - new_wrapper.fee.amount_per_gas_unit = Default::default(); + new_wrapper.fee.amount_per_gas_unit = + DenominatedAmount::native(0.into()); invalid_wrapper.update_header(TxType::Wrapper(Box::new(new_wrapper))); let mut result = shell.mempool_validate( invalid_wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::InvalidSig.into()); + assert_eq!(result.code, ResultCode::InvalidSig.into()); result = shell.mempool_validate( invalid_wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, ErrorCodes::InvalidSig.into()); + assert_eq!(result.code, ResultCode::InvalidSig.into()); } /// Mempool validation must reject non-wrapper txs @@ -2699,7 +2670,7 @@ mod shell_tests { tx.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::InvalidTx.into()); + assert_eq!(result.code, ResultCode::InvalidTx.into()); assert_eq!( result.log, "Mempool validation failed: Raw transactions cannot be accepted \ @@ -2718,8 +2689,10 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: token::Amount::from_uint(100, 0) - .expect("This can't fail"), + amount_per_gas_unit: DenominatedAmount::native( + token::Amount::from_uint(100, 0) + .expect("This can't fail"), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2752,7 +2725,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::ReplayTx.into()); + assert_eq!(result.code, ResultCode::ReplayTx.into()); assert_eq!( result.log, format!( @@ -2766,7 +2739,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, ErrorCodes::ReplayTx.into()); + assert_eq!(result.code, ResultCode::ReplayTx.into()); assert_eq!( result.log, format!( @@ -2790,7 +2763,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::ReplayTx.into()); + assert_eq!(result.code, ResultCode::ReplayTx.into()); assert_eq!( result.log, format!( @@ -2804,7 +2777,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, ErrorCodes::ReplayTx.into()); + assert_eq!(result.code, ResultCode::ReplayTx.into()); assert_eq!( result.log, format!( @@ -2832,7 +2805,7 @@ mod shell_tests { tx.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::InvalidChainId.into()); + assert_eq!(result.code, ResultCode::InvalidChainId.into()); assert_eq!( result.log, format!( @@ -2860,7 +2833,7 @@ mod shell_tests { tx.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::ExpiredTx.into()); + assert_eq!(result.code, ResultCode::ExpiredTx.into()); } /// Check that a tx requiring more gas than the block limit gets rejected @@ -2876,7 +2849,7 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2897,7 +2870,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::AllocationError.into()); + assert_eq!(result.code, ResultCode::AllocationError.into()); } // Check that a tx requiring more gas than its limit gets rejected @@ -2909,7 +2882,7 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2930,7 +2903,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::TxGasLimit.into()); + assert_eq!(result.code, ResultCode::TxGasLimit.into()); } // Check that a wrapper using a non-whitelisted token for fee payment is @@ -2938,11 +2911,17 @@ mod shell_tests { #[test] fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); + let apfel_denom = read_denom(&shell.wl_storage, &address::apfel()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of apfels"); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::new( + 100.into(), + apfel_denom, + ), token: address::apfel(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -2965,7 +2944,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::FeeError.into()); + assert_eq!(result.code, ResultCode::FeeError.into()); } // Check that a wrapper setting a fee amount lower than the minimum required @@ -2977,7 +2956,7 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -3000,7 +2979,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::FeeError.into()); + assert_eq!(result.code, ResultCode::FeeError.into()); } // Check that a wrapper transactions whose fees cannot be paid is rejected @@ -3011,7 +2990,9 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1_000_000_000.into(), + amount_per_gas_unit: DenominatedAmount::native( + 1_000_000_000.into(), + ), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -3034,7 +3015,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::FeeError.into()); + assert_eq!(result.code, ResultCode::FeeError.into()); } // Check that a fee overflow in the wrapper transaction is rejected @@ -3045,7 +3026,9 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: token::Amount::max(), + amount_per_gas_unit: DenominatedAmount::native( + token::Amount::max(), + ), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -3068,7 +3051,7 @@ mod shell_tests { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::FeeError.into()); + assert_eq!(result.code, ResultCode::FeeError.into()); } /// Test max tx bytes parameter in CheckTx @@ -3090,7 +3073,9 @@ mod shell_tests { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native( + 100.into(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -3115,13 +3100,13 @@ mod shell_tests { new_tx(50).to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert!(result.code != ErrorCodes::TooLarge.into()); + assert!(result.code != ResultCode::TooLarge.into()); // max tx bytes + 1, on the other hand, is not let result = shell.mempool_validate( new_tx(max_tx_bytes + 1).to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, ErrorCodes::TooLarge.into()); + assert_eq!(result.code, ResultCode::TooLarge.into()); } } diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 2dd8cf57bf..1161401180 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -5,7 +5,7 @@ use namada::core::ledger::gas::TxGasMeter; use namada::ledger::pos::PosQueries; use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::{DBIter, StorageHasher, TempWlStorage, DB}; -use namada::proof_of_stake::find_validator_by_raw_hash; +use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::proto::Tx; use namada::types::address::Address; use namada::types::internal::TxInQueue; @@ -379,21 +379,23 @@ mod test_prepare_proposal { use namada::core::ledger::storage_api::collections::lazy_map::{ NestedSubKey, SubKey, }; + use namada::core::ledger::storage_api::token::read_denom; use namada::ledger::gas::Gas; use namada::ledger::pos::PosQueries; use namada::ledger::replay_protection; - use namada::proof_of_stake::types::WeightedValidator; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ consensus_validator_set_handle, - read_consensus_validator_set_addresses_with_stake, Epoch, + read_consensus_validator_set_addresses_with_stake, }; + use namada::proof_of_stake::types::WeightedValidator; + use namada::proof_of_stake::Epoch; use namada::proto::{Code, Data, Header, Section, Signature, Signed}; use namada::types::address::{self, Address}; use namada::types::ethereum_events::EthereumEvent; use namada::types::key::RefTo; use namada::types::storage::{BlockHeight, InnerEthEventsQueue}; use namada::types::token; - use namada::types::token::Amount; + use namada::types::token::{Amount, DenominatedAmount}; use namada::types::transaction::protocol::{ ethereum_tx_data_variants, EthereumTxData, }; @@ -451,7 +453,9 @@ mod test_prepare_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native( + Default::default(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -749,7 +753,9 @@ mod test_prepare_proposal { let mut tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native( + 1.into(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -818,7 +824,7 @@ mod test_prepare_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -863,7 +869,7 @@ mod test_prepare_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -898,7 +904,9 @@ mod test_prepare_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: DenominatedAmount::native( + Amount::zero(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -944,7 +952,7 @@ mod test_prepare_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -966,7 +974,7 @@ mod test_prepare_proposal { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair_2.ref_to(), @@ -996,7 +1004,7 @@ mod test_prepare_proposal { let mut wrapper_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1045,7 +1053,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1084,7 +1092,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1131,9 +1139,16 @@ mod test_prepare_proposal { }); } + let btc_denom = read_denom(&shell.wl_storage, &address::btc()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of btcs"); + let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::new( + 100.into(), + btc_denom, + ), token: address::btc(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1172,9 +1187,16 @@ mod test_prepare_proposal { fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); + let apfel_denom = read_denom(&shell.wl_storage, &address::apfel()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of apfels"); + let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::new( + 100.into(), + apfel_denom, + ), token: address::apfel(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1225,7 +1247,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 10.into(), + amount_per_gas_unit: DenominatedAmount::native(10.into()), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1265,7 +1287,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1304,7 +1326,9 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: 1_000_000_000.into(), + amount_per_gas_unit: DenominatedAmount::native( + 1_000_000_000.into(), + ), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1343,7 +1367,9 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { - amount_per_gas_unit: token::Amount::max(), + amount_per_gas_unit: DenominatedAmount::native( + token::Amount::max(), + ), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 465dd57fbc..5677e67fc0 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -8,7 +8,7 @@ use namada::ledger::pos::PosQueries; use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::TempWlStorage; use namada::ledger::storage_api::tx::validate_tx_bytes; -use namada::proof_of_stake::find_validator_by_raw_hash; +use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::types::internal::TxInQueue; use namada::types::transaction::protocol::{ ethereum_tx_data_variants, ProtocolTxType, @@ -113,7 +113,7 @@ where // deserialize properly, that have invalid signatures // and that have invalid wasm code to reach FinalizeBlock. let invalid_txs = tx_results.iter().any(|res| { - let error = ErrorCodes::from_u32(res.code).expect( + let error = ResultCode::from_u32(res.code).expect( "All error codes returned from process_single_tx are valid", ); !error.is_recoverable() @@ -182,8 +182,8 @@ where &mut tx_wasm_cache, block_proposer, ); - let error_code = ErrorCodes::from_u32(result.code).unwrap(); - if let ErrorCodes::Ok = error_code { + let error_code = ResultCode::from_u32(result.code).unwrap(); + if let ResultCode::Ok = error_code { temp_wl_storage.write_log.commit_tx(); } else { tracing::info!( @@ -217,12 +217,12 @@ where { if vote_extensions.all(|maybe_ext| maybe_ext.is_some()) { TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "Process proposal accepted this transaction".into(), } } else { TxResult { - code: ErrorCodes::InvalidVoteExtension.into(), + code: ResultCode::InvalidVoteExtension.into(), info: "Process proposal rejected this proposal because at \ least one of the vote extensions included was invalid." .into(), @@ -275,7 +275,7 @@ where .expect("Failed to get max tx bytes param from storage") { return TxResult { - code: ErrorCodes::TooLarge.into(), + code: ResultCode::TooLarge.into(), info: "Tx too large".into(), }; } @@ -283,7 +283,7 @@ where // try to allocate space for this tx if let Err(e) = metadata.txs_bin.try_dump(tx_bytes) { return TxResult { - code: ErrorCodes::AllocationError.into(), + code: ResultCode::AllocationError.into(), info: match e { AllocFailure::Rejected { .. } => { "No more space left in the block" @@ -305,7 +305,7 @@ where PrepareProposal" ); Err(TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ResultCode::InvalidTx.into(), info: "The submitted transaction was not deserializable" .into(), }) @@ -317,7 +317,7 @@ where // This occurs if the wrapper / protocol tx signature is // invalid return Err(TxResult { - code: ErrorCodes::InvalidSig.into(), + code: ResultCode::InvalidSig.into(), info: err.to_string(), }); } @@ -331,14 +331,14 @@ where if let Err(err) = tx.validate_tx() { return TxResult { - code: ErrorCodes::InvalidSig.into(), + code: ResultCode::InvalidSig.into(), info: err.to_string(), }; } match tx.header().tx_type { // If it is a raw transaction, we do no further validation TxType::Raw => TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ResultCode::InvalidTx.into(), info: "Transaction rejected: Non-encrypted transactions are \ not supported" .into(), @@ -347,7 +347,7 @@ where // Tx chain id if tx_chain_id != self.chain_id { return TxResult { - code: ErrorCodes::InvalidChainId.into(), + code: ResultCode::InvalidChainId.into(), info: format!( "Tx carries a wrong chain id: expected {}, found \ {}", @@ -360,7 +360,7 @@ where if let Some(exp) = tx_expiration { if block_time > exp { return TxResult { - code: ErrorCodes::ExpiredTx.into(), + code: ResultCode::ExpiredTx.into(), info: format!( "Tx expired at {:#?}, block time: {:#?}", exp, block_time @@ -380,7 +380,7 @@ where .get_last_block_height(), ) .map(|_| TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "Process Proposal accepted this \ transaction" .into(), @@ -388,7 +388,7 @@ where .map_err(|err| err.to_string()) }) .unwrap_or_else(|err| TxResult { - code: ErrorCodes::InvalidVoteExtension.into(), + code: ResultCode::InvalidVoteExtension.into(), info: format!( "Process proposal rejected this proposal \ because one of the included Ethereum \ @@ -407,7 +407,7 @@ where .get_last_block_height(), ) .map(|_| TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "Process Proposal accepted this \ transaction" .into(), @@ -415,7 +415,7 @@ where .map_err(|err| err.to_string()) }) .unwrap_or_else(|err| TxResult { - code: ErrorCodes::InvalidVoteExtension.into(), + code: ResultCode::InvalidVoteExtension.into(), info: format!( "Process proposal rejected this proposal \ because one of the included Bridge pool \ @@ -439,7 +439,7 @@ where self.wl_storage.storage.get_current_epoch().0, ) .map(|_| TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "Process Proposal accepted this \ transaction" .into(), @@ -448,7 +448,7 @@ where }) .unwrap_or_else(|err| { TxResult { - code: ErrorCodes::InvalidVoteExtension.into(), + code: ResultCode::InvalidVoteExtension.into(), info: format!( "Process proposal rejected this proposal \ because one of the included validator \ @@ -500,7 +500,7 @@ where .must_send_valset_upd(SendValsetUpd::AtPrevHeight) { return TxResult { - code: ErrorCodes::InvalidVoteExtension.into(), + code: ResultCode::InvalidVoteExtension.into(), info: "Process proposal rejected a validator \ set update vote extension issued at an \ invalid block height" @@ -528,7 +528,7 @@ where if wrapper.tx.raw_header_hash() != tx.raw_header_hash() { TxResult { - code: ErrorCodes::InvalidOrder.into(), + code: ResultCode::InvalidOrder.into(), info: "Process proposal rejected a decrypted \ transaction that violated the tx order \ determined in the previous block" @@ -540,22 +540,37 @@ where ) { // DKG is disabled, txs are not actually encrypted TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ResultCode::InvalidTx.into(), info: "The encrypted payload of tx was \ incorrectly marked as un-decryptable" .into(), } } else { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process Proposal accepted this \ - transaction" - .into(), + match tx.header().expiration { + Some(tx_expiration) + if block_time > tx_expiration => + { + TxResult { + code: ResultCode::ExpiredDecryptedTx + .into(), + info: format!( + "Tx expired at {:#?}, block time: \ + {:#?}", + tx_expiration, block_time + ), + } + } + _ => TxResult { + code: ResultCode::Ok.into(), + info: "Process Proposal accepted this \ + transaction" + .into(), + }, } } } None => TxResult { - code: ErrorCodes::ExtraTxs.into(), + code: ResultCode::ExtraTxs.into(), info: "Received more decrypted txs than expected" .into(), }, @@ -576,7 +591,7 @@ where .try_dump(tx_bytes, u64::from(wrapper.gas_limit)); return TxResult { - code: ErrorCodes::TxGasLimit.into(), + code: ResultCode::TxGasLimit.into(), info: "Wrapper transactions exceeds its gas limit" .to_string(), }; @@ -588,14 +603,14 @@ where .try_dump(tx_bytes, u64::from(wrapper.gas_limit)) { return TxResult { - code: ErrorCodes::AllocationError.into(), + code: ResultCode::AllocationError.into(), info: e, }; } // decrypted txs shouldn't show up before wrapper txs if metadata.has_decrypted_txs { return TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ResultCode::InvalidTx.into(), info: "Decrypted txs should not be proposed before \ wrapper txs" .into(), @@ -603,7 +618,7 @@ where } if hints::unlikely(self.encrypted_txs_not_allowed()) { return TxResult { - code: ErrorCodes::AllocationError.into(), + code: ResultCode::AllocationError.into(), info: "Wrapper txs not allowed at the current block \ height" .into(), @@ -613,7 +628,7 @@ where // ChainId check if tx_chain_id != self.chain_id { return TxResult { - code: ErrorCodes::InvalidChainId.into(), + code: ResultCode::InvalidChainId.into(), info: format!( "Tx carries a wrong chain id: expected {}, found \ {}", @@ -626,7 +641,7 @@ where if let Some(exp) = tx_expiration { if block_time > exp { return TxResult { - code: ErrorCodes::ExpiredTx.into(), + code: ResultCode::ExpiredTx.into(), info: format!( "Tx expired at {:#?}, block time: {:#?}", exp, block_time @@ -640,7 +655,7 @@ where self.replay_protection_checks(&tx, temp_wl_storage) { return TxResult { - code: ErrorCodes::ReplayTx.into(), + code: ResultCode::ReplayTx.into(), info: e.to_string(), }; } @@ -656,12 +671,12 @@ where false, ) { Ok(()) => TxResult { - code: ErrorCodes::Ok.into(), + code: ResultCode::Ok.into(), info: "Process proposal accepted this transaction" .into(), }, Err(e) => TxResult { - code: ErrorCodes::FeeError.into(), + code: ResultCode::FeeError.into(), info: e.to_string(), }, } @@ -691,6 +706,7 @@ where #[cfg(test)] mod test_process_proposal { use namada::ledger::replay_protection; + use namada::ledger::storage_api::token::read_denom; use namada::ledger::storage_api::StorageWrite; use namada::proto::{ Code, Data, Section, SignableEthMessage, Signature, Signed, @@ -700,7 +716,7 @@ mod test_process_proposal { use namada::types::storage::Epoch; use namada::types::time::DateTimeUtc; use namada::types::token; - use namada::types::token::Amount; + use namada::types::token::{Amount, DenominatedAmount}; use namada::types::transaction::protocol::EthereumTxData; use namada::types::transaction::{Fee, WrapperTx}; use namada::types::vote_extensions::{bridge_pool_roots, ethereum_events}; @@ -742,7 +758,7 @@ mod test_process_proposal { .expect("Test failed") .try_into() .expect("Test failed"); - assert_eq!(resp.result.code, u32::from(ErrorCodes::Ok)); + assert_eq!(resp.result.code, u32::from(ResultCode::Ok)); deactivate_bridge(&mut shell); let response = if let Err(TestError::RejectProposal(resp)) = shell.process_proposal(request) @@ -757,7 +773,7 @@ mod test_process_proposal { }; assert_eq!( response.result.code, - u32::from(ErrorCodes::InvalidVoteExtension) + u32::from(ResultCode::InvalidVoteExtension) ); } @@ -794,7 +810,7 @@ mod test_process_proposal { .try_into() .expect("Test failed"); - assert_eq!(resp.result.code, u32::from(ErrorCodes::Ok)); + assert_eq!(resp.result.code, u32::from(ResultCode::Ok)); deactivate_bridge(&mut shell); let response = if let Err(TestError::RejectProposal(resp)) = shell.process_proposal(request) @@ -809,7 +825,7 @@ mod test_process_proposal { }; assert_eq!( response.result.code, - u32::from(ErrorCodes::InvalidVoteExtension) + u32::from(ResultCode::InvalidVoteExtension) ); } @@ -835,7 +851,7 @@ mod test_process_proposal { }; assert_eq!( response.result.code, - u32::from(ErrorCodes::InvalidVoteExtension) + u32::from(ResultCode::InvalidVoteExtension) ); } @@ -935,7 +951,9 @@ mod test_process_proposal { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native( + Default::default(), + ), token: shell.wl_storage.storage.native_token.clone(), }, public_key, @@ -966,7 +984,7 @@ mod test_process_proposal { println!("{}", response.result.info); - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); + assert_eq!(response.result.code, u32::from(ResultCode::InvalidSig)); assert_eq!( response.result.info, String::from( @@ -985,8 +1003,9 @@ mod test_process_proposal { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::from_uint(100, 0) - .expect("Test failed"), + amount_per_gas_unit: DenominatedAmount::native( + Amount::from_uint(100, 0).expect("Test failed"), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1005,7 +1024,8 @@ mod test_process_proposal { let mut new_tx = outer_tx.clone(); if let TxType::Wrapper(wrapper) = &mut new_tx.header.tx_type { // we mount a malleability attack to try and remove the fee - wrapper.fee.amount_per_gas_unit = Default::default(); + wrapper.fee.amount_per_gas_unit = + DenominatedAmount::native(Default::default()); } else { panic!("Test failed") }; @@ -1026,7 +1046,7 @@ mod test_process_proposal { invalid."; assert_eq!( response.result.code, - u32::from(ErrorCodes::InvalidSig) + u32::from(ResultCode::InvalidSig) ); assert!( response.result.info.contains(expected_error), @@ -1057,8 +1077,9 @@ mod test_process_proposal { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::from_uint(1, 0) - .expect("Test failed"), + amount_per_gas_unit: DenominatedAmount::native( + Amount::from_uint(1, 0).expect("Test failed"), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1091,7 +1112,7 @@ mod test_process_proposal { panic!("Test failed") } }; - assert_eq!(response.result.code, u32::from(ErrorCodes::FeeError)); + assert_eq!(response.result.code, u32::from(ResultCode::FeeError)); assert_eq!( response.result.info, String::from( @@ -1124,7 +1145,9 @@ mod test_process_proposal { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::native_whole(1_000_100), + amount_per_gas_unit: DenominatedAmount::native( + Amount::native_whole(1_000_100), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1157,7 +1180,7 @@ mod test_process_proposal { panic!("Test failed") } }; - assert_eq!(response.result.code, u32::from(ErrorCodes::FeeError)); + assert_eq!(response.result.code, u32::from(ResultCode::FeeError)); assert_eq!( response.result.info, String::from( @@ -1180,7 +1203,9 @@ mod test_process_proposal { let mut outer_tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::native_whole(i as u64), + amount_per_gas_unit: DenominatedAmount::native( + Amount::native_whole(i as u64), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1220,7 +1245,7 @@ mod test_process_proposal { panic!("Test failed") } }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidOrder)); + assert_eq!(response.result.code, u32::from(ResultCode::InvalidOrder)); assert_eq!( response.result.info, String::from( @@ -1239,7 +1264,9 @@ mod test_process_proposal { let mut tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native( + Default::default(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1273,7 +1300,7 @@ mod test_process_proposal { panic!("Test failed") } }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(response.result.code, u32::from(ResultCode::InvalidTx)); assert_eq!( response.result.info, String::from( @@ -1292,7 +1319,9 @@ mod test_process_proposal { // not valid tx bytes let wrapper = WrapperTx { fee: Fee { - amount_per_gas_unit: Default::default(), + amount_per_gas_unit: DenominatedAmount::native( + Default::default(), + ), token: shell.wl_storage.storage.native_token.clone(), }, pk: keypair.ref_to(), @@ -1340,7 +1369,7 @@ mod test_process_proposal { } else { panic!("Test failed") }; - assert_eq!(response.result.code, u32::from(ErrorCodes::ExtraTxs)); + assert_eq!(response.result.code, u32::from(ResultCode::ExtraTxs)); assert_eq!( response.result.info, String::from("Received more decrypted txs than expected"), @@ -1375,7 +1404,7 @@ mod test_process_proposal { panic!("Test failed") } }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(response.result.code, u32::from(ResultCode::InvalidTx)); assert_eq!( response.result.info, String::from( @@ -1396,7 +1425,9 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: DenominatedAmount::native( + Amount::zero(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1434,7 +1465,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::ReplayTx) + u32::from(ResultCode::ReplayTx) ); assert_eq!( response[0].result.info, @@ -1469,7 +1500,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1493,10 +1524,10 @@ mod test_process_proposal { match shell.process_proposal(request) { Ok(_) => panic!("Test failed"), Err(TestError::RejectProposal(response)) => { - assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!(response[0].result.code, u32::from(ResultCode::Ok)); assert_eq!( response[1].result.code, - u32::from(ErrorCodes::ReplayTx) + u32::from(ResultCode::ReplayTx) ); assert_eq!( response[1].result.info, @@ -1521,7 +1552,9 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: DenominatedAmount::native( + Amount::zero(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1557,7 +1590,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::ReplayTx) + u32::from(ResultCode::ReplayTx) ); assert_eq!( response[0].result.info, @@ -1583,7 +1616,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1603,7 +1636,7 @@ mod test_process_proposal { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair_2.ref_to(), @@ -1637,7 +1670,9 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::zero(), + amount_per_gas_unit: DenominatedAmount::native( + Amount::zero(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1674,7 +1709,7 @@ mod test_process_proposal { for res in response { assert_eq!( res.result.code, - u32::from(ErrorCodes::InvalidChainId) + u32::from(ResultCode::InvalidChainId) ); assert_eq!( res.result.info, @@ -1698,7 +1733,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1725,9 +1760,58 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::ExpiredTx) + u32::from(ResultCode::ExpiredTx) + ); + } + } + } + + /// Test that an expired decrypted transaction is marked as rejected but + /// still allows the block to be accepted + #[test] + fn test_expired_decrypted() { + let (mut shell, _recv, _, _) = test_utils::setup(); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let mut wrapper = + Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( + Fee { + amount_per_gas_unit: DenominatedAmount::native(1.into()), + token: shell.wl_storage.storage.native_token.clone(), + }, + keypair.ref_to(), + Epoch(0), + GAS_LIMIT_MULTIPLIER.into(), + None, + )))); + wrapper.header.chain_id = shell.chain_id.clone(); + wrapper.header.expiration = Some(DateTimeUtc::default()); + wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); + wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); + wrapper.add_section(Section::Signature(Signature::new( + wrapper.sechashes(), + [(0, keypair)].into_iter().collect(), + None, + ))); + + shell.enqueue_tx(wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); + + let decrypted = + wrapper.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); + + // Run validation + let request = ProcessProposal { + txs: vec![decrypted.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(txs) => { + assert_eq!(txs.len(), 1); + assert_eq!( + txs[0].result.code, + u32::from(ResultCode::ExpiredDecryptedTx) ); } + Err(_) => panic!("Test failed"), } } @@ -1745,7 +1829,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1771,7 +1855,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::AllocationError) + u32::from(ResultCode::AllocationError) ); } } @@ -1787,7 +1871,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native(100.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -1813,7 +1897,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::TxGasLimit) + u32::from(ResultCode::TxGasLimit) ); } } @@ -1825,10 +1909,17 @@ mod test_process_proposal { fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); + let apfel_denom = read_denom(&shell.wl_storage, &address::apfel()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of apfels"); + let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::new( + 100.into(), + apfel_denom, + ), token: address::apfel(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1856,7 +1947,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::FeeError) + u32::from(ResultCode::FeeError) ); } } @@ -1871,7 +1962,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1899,7 +1990,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::FeeError) + u32::from(ResultCode::FeeError) ); } } @@ -1914,7 +2005,9 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 1_000_000_000.into(), + amount_per_gas_unit: DenominatedAmount::native( + 1_000_000_000.into(), + ), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1942,7 +2035,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::FeeError) + u32::from(ResultCode::FeeError) ); } } @@ -1957,7 +2050,9 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: token::Amount::max(), + amount_per_gas_unit: DenominatedAmount::native( + token::Amount::max(), + ), token: shell.wl_storage.storage.native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), @@ -1985,7 +2080,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::FeeError) + u32::from(ResultCode::FeeError) ); } } @@ -2003,7 +2098,7 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 0.into(), + amount_per_gas_unit: DenominatedAmount::native(0.into()), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2039,7 +2134,7 @@ mod test_process_proposal { }; assert_eq!( response.result.code, - u32::from(ErrorCodes::AllocationError) + u32::from(ResultCode::AllocationError) ); assert_eq!( response.result.info, @@ -2070,7 +2165,9 @@ mod test_process_proposal { let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: 100.into(), + amount_per_gas_unit: DenominatedAmount::native( + 100.into(), + ), token: shell.wl_storage.storage.native_token.clone(), }, keypair.ref_to(), @@ -2098,7 +2195,7 @@ mod test_process_proposal { Err(TestError::RejectProposal(response)) => { assert_eq!( response[0].result.code, - u32::from(ErrorCodes::TooLarge) + u32::from(ResultCode::TooLarge) ); } } @@ -2110,7 +2207,7 @@ mod test_process_proposal { Ok(_) => panic!("Test failed"), Err(TestError::RejectProposal(response)) => { assert!( - response[0].result.code != u32::from(ErrorCodes::TooLarge) + response[0].result.code != u32::from(ResultCode::TooLarge) ); } } diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 84c494faca..6adb53969d 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -68,7 +68,7 @@ where mod test_queries { use namada::core::ledger::storage::EPOCH_SWITCH_BLOCKS_DELAY; use namada::ledger::pos::PosQueries; - use namada::proof_of_stake::read_consensus_validator_set_addresses_with_stake; + use namada::proof_of_stake::storage::read_consensus_validator_set_addresses_with_stake; use namada::proof_of_stake::types::WeightedValidator; use namada::tendermint::abci::types::VoteInfo; use namada::types::storage::Epoch; diff --git a/apps/src/lib/node/ledger/shell/testing/node.rs b/apps/src/lib/node/ledger/shell/testing/node.rs index 4f8fa13342..b3ef78027e 100644 --- a/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/apps/src/lib/node/ledger/shell/testing/node.rs @@ -10,6 +10,7 @@ use data_encoding::HEXUPPER; use itertools::Either; use lazy_static::lazy_static; use namada::core::types::ethereum_structs; +use namada::core::types::transaction::ResultCode; use namada::eth_bridge::oracle::config::Config as OracleConfig; use namada::ledger::dry_run_tx; use namada::ledger::events::log::dumb_queries; @@ -20,11 +21,11 @@ use namada::ledger::storage::{ LastBlock, Sha256Hasher, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::proof_of_stake::pos_queries::PosQueries; -use namada::proof_of_stake::types::WeightedValidator; -use namada::proof_of_stake::{ +use namada::proof_of_stake::storage::{ read_consensus_validator_set_addresses_with_stake, validator_consensus_key_handle, }; +use namada::proof_of_stake::types::WeightedValidator; use namada::tendermint::abci::response::Info; use namada::tendermint::abci::types::VoteInfo; use namada::tendermint_rpc::SimpleRequest; @@ -35,7 +36,6 @@ use namada::types::key::tm_consensus_key_raw_hash; use namada::types::storage::{BlockHash, BlockHeight, Epoch, Header}; use namada::types::time::DateTimeUtc; use namada_sdk::queries::Client; -use num_traits::cast::FromPrimitive; use regex::Regex; use tokio::sync::mpsc; @@ -51,7 +51,7 @@ use crate::node::ledger::ethereum_oracle::{ control, last_processed_block, try_process_eth_events, }; use crate::node::ledger::shell::testing::utils::TestDir; -use crate::node::ledger::shell::{ErrorCodes, EthereumOracleChannels, Shell}; +use crate::node::ledger::shell::{EthereumOracleChannels, Shell}; use crate::node::ledger::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; @@ -239,7 +239,7 @@ pub enum NodeResults { /// Rejected by Process Proposal Rejected(TxResult), /// Failure in application in Finalize Block - Failed(ErrorCodes), + Failed(ResultCode), } pub struct MockNode { @@ -509,14 +509,14 @@ impl MockNode { .events .into_iter() .map(|e| { - let code = ErrorCodes::from_u32( + let code = ResultCode::from_u32( e.attributes .get("code") .map(|e| u32::from_str(e).unwrap()) .unwrap_or_default(), ) .unwrap(); - if code == ErrorCodes::Ok { + if code == ResultCode::Ok { NodeResults::Ok } else { NodeResults::Failed(code) diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 3aded3035a..629cce4e8d 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -203,14 +203,14 @@ mod test_bp_vote_extensions { use namada::core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada::ledger::pos::PosQueries; use namada::ledger::storage_api::StorageWrite; + use namada::proof_of_stake::storage::{ + consensus_validator_set_handle, + read_consensus_validator_set_addresses_with_stake, + }; use namada::proof_of_stake::types::{ Position as ValidatorPosition, WeightedValidator, }; - use namada::proof_of_stake::{ - become_validator, consensus_validator_set_handle, - read_consensus_validator_set_addresses_with_stake, BecomeValidator, - Epoch, - }; + use namada::proof_of_stake::{become_validator, BecomeValidator, Epoch}; use namada::proto::{SignableEthMessage, Signed}; use namada::tendermint::abci::types::VoteInfo; use namada::types::ethereum_events::Uint; diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 127c4c4d0e..23c3013458 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -299,11 +299,11 @@ mod test_vote_extensions { use namada::eth_bridge::storage::bridge_pool; use namada::ledger::eth_bridge::EthBridgeQueries; use namada::ledger::pos::PosQueries; - use namada::proof_of_stake::types::WeightedValidator; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ consensus_validator_set_handle, read_consensus_validator_set_addresses_with_stake, }; + use namada::proof_of_stake::types::WeightedValidator; use namada::tendermint::abci::types::VoteInfo; use namada::types::address::testing::gen_established_address; use namada::types::ethereum_events::{ diff --git a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index b4b5595a0e..91ecadbdc5 100644 --- a/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -258,11 +258,12 @@ mod test_vote_extensions { NestedSubKey, SubKey, }; use namada::ledger::pos::PosQueries; - use namada::proof_of_stake::types::WeightedValidator; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ consensus_validator_set_handle, - read_consensus_validator_set_addresses_with_stake, Epoch, + read_consensus_validator_set_addresses_with_stake, }; + use namada::proof_of_stake::types::WeightedValidator; + use namada::proof_of_stake::Epoch; use namada::tendermint::abci::types::VoteInfo; use namada::types::key::RefTo; use namada::types::vote_extensions::validator_set_update; diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 563d88ac59..a8140ce56a 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -5,7 +5,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; -use namada::proof_of_stake::find_validator_by_raw_hash; +use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::proto::Tx; use namada::types::hash::Hash; use namada::types::key::tm_raw_hash_to_string; diff --git a/apps/src/lib/node/ledger/storage/mod.rs b/apps/src/lib/node/ledger/storage/mod.rs index 4a02b2bcf9..58ae90ce88 100644 --- a/apps/src/lib/node/ledger/storage/mod.rs +++ b/apps/src/lib/node/ledger/storage/mod.rs @@ -162,8 +162,6 @@ mod tests { implicit_vp_code_hash: Default::default(), epochs_per_year: 365, max_signatures_per_transaction: 10, - pos_gain_p: Default::default(), - pos_gain_d: Default::default(), staked_ratio: Default::default(), pos_inflation_amount: Default::default(), fee_unshielding_gas_limit: 0, diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index 9845b65c50..87616d9df2 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -11,9 +11,11 @@ use namada::types::time::DateTimeUtc; use serde_json::json; use sha2::{Digest, Sha256}; use thiserror::Error; -use tokio::fs::{self, File, OpenOptions}; +use tokio::fs::{File, OpenOptions}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::process::Command; +use tokio::process::{Child, Command}; +use tokio::sync::oneshot::error::RecvError; +use tokio::sync::oneshot::{Receiver, Sender}; use crate::cli::namada_version; use crate::config; @@ -46,6 +48,12 @@ pub enum Error { RollBack(String), #[error("Failed to convert to String: {0:?}")] TendermintPath(std::ffi::OsString), + #[error("Couldn't write {0}")] + CantWrite(String), + #[error("Couldn't create {0}")] + CantCreate(String), + #[error("Couldn't encode {0}")] + CantEncode(&'static str), } pub type Result = std::result::Result; @@ -74,10 +82,26 @@ pub async fn run( genesis_time: DateTimeUtc, proxy_app_address: String, config: config::Ledger, - abort_recv: tokio::sync::oneshot::Receiver< - tokio::sync::oneshot::Sender<()>, - >, + abort_recv: Receiver>, ) -> Result<()> { + let (home_dir_string, tendermint_path) = + initalize_config(home_dir, chain_id, genesis_time, config).await?; + let tendermint_node = + start_node(proxy_app_address, home_dir_string, tendermint_path)?; + + tracing::info!("CometBFT node started"); + + handle_node_response(tendermint_node, abort_recv).await +} + +/// Setup the tendermint configuration. We return the tendermint path and home +/// directory +async fn initalize_config( + home_dir: PathBuf, + chain_id: ChainId, + genesis_time: DateTimeUtc, + config: config::Ledger, +) -> Result<(String, String)> { let home_dir_string = home_dir.to_string_lossy().to_string(); let tendermint_path = from_env_or_default()?; let mode = config.shell.tendermint_mode.to_str().to_owned(); @@ -92,11 +116,19 @@ pub async fn run( panic!("Tendermint failed to initialize with {:#?}", output); } - write_tm_genesis(&home_dir, chain_id, genesis_time).await; + write_tm_genesis(&home_dir, chain_id, genesis_time).await?; update_tendermint_config(&home_dir, config.cometbft).await?; + Ok((home_dir_string, tendermint_path)) +} - let mut tendermint_node = Command::new(&tendermint_path); +/// Startup the node +fn start_node( + proxy_app_address: String, + home_dir_string: String, + tendermint_path: String, +) -> Result { + let mut tendermint_node = Command::new(tendermint_path); tendermint_node.args([ "start", "--proxy_app", @@ -113,12 +145,17 @@ pub async fn run( tendermint_node.stdout(Stdio::null()); } - let mut tendermint_node = tendermint_node + tendermint_node .kill_on_drop(true) .spawn() - .map_err(Error::StartUp)?; - tracing::info!("CometBFT node started"); + .map_err(Error::StartUp) +} +/// Handle the node response +async fn handle_node_response( + mut tendermint_node: Child, + abort_recv: Receiver>, +) -> Result<()> { tokio::select! { status = tendermint_node.wait() => { match status { @@ -135,22 +172,30 @@ pub async fn run( } }, resp_sender = abort_recv => { - match resp_sender { - Ok(resp_sender) => { - tracing::info!("Shutting down Tendermint node..."); - tendermint_node.kill().await.unwrap(); - resp_sender.send(()).unwrap(); - }, - Err(err) => { - tracing::error!("The Tendermint abort sender has unexpectedly dropped: {}", err); - tracing::info!("Shutting down Tendermint node..."); - tendermint_node.kill().await.unwrap(); - } - } + handle_abort(resp_sender, &mut tendermint_node).await; Ok(()) } } } +// Handle tendermint aborting +async fn handle_abort( + resp_sender: std::result::Result, RecvError>, + node: &mut Child, +) { + match resp_sender { + Ok(resp_sender) => { + tracing_kill(node).await; + resp_sender.send(()).unwrap(); + } + Err(err) => { + tracing::error!( + "The Tendermint abort sender has unexpectedly dropped: {}", + err + ); + tracing_kill(node).await; + } + } +} pub fn reset(tendermint_dir: impl AsRef) -> Result<()> { let tendermint_path = from_env_or_default()?; @@ -251,76 +296,55 @@ fn validator_key_to_json( })) } -/// Initialize validator private key for Tendermint -pub async fn write_validator_key_async( - home_dir: impl AsRef, - consensus_key: &common::SecretKey, -) { - let home_dir = home_dir.as_ref(); - let path = home_dir.join("config").join("priv_validator_key.json"); - // Make sure the dir exists - let wallet_dir = path.parent().unwrap(); - fs::create_dir_all(wallet_dir) - .await - .expect("Couldn't create private validator key directory"); - let mut file = OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(&path) - .await - .expect("Couldn't create private validator key file"); - let key = validator_key_to_json(consensus_key).unwrap(); - let data = serde_json::to_vec_pretty(&key) - .expect("Couldn't encode private validator key file"); - file.write_all(&data[..]) - .await - .expect("Couldn't write private validator key file"); -} - /// Initialize validator private key for Tendermint pub fn write_validator_key( home_dir: impl AsRef, consensus_key: &common::SecretKey, -) { - let home_dir = home_dir.as_ref(); - let path = home_dir.join("config").join("priv_validator_key.json"); - // Make sure the dir exists - let wallet_dir = path.parent().unwrap(); - std::fs::create_dir_all(wallet_dir) - .expect("Couldn't create private validator key directory"); - let file = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(&path) - .expect("Couldn't create private validator key file"); +) -> Result<()> { let key = validator_key_to_json(consensus_key).unwrap(); - serde_json::to_writer_pretty(file, &key) - .expect("Couldn't write private validator key file"); + write_validator(validator_key(home_dir), KEY_DIR, KEY_FILE, key) } /// Initialize validator private state for Tendermint -pub fn write_validator_state(home_dir: impl AsRef) { - let home_dir = home_dir.as_ref(); - let path = home_dir.join("data").join("priv_validator_state.json"); - // Make sure the dir exists - let wallet_dir = path.parent().unwrap(); - std::fs::create_dir_all(wallet_dir) - .expect("Couldn't create private validator state directory"); - let file = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(&path) - .expect("Couldn't create private validator state file"); +pub fn write_validator_state(home_dir: impl AsRef) -> Result<()> { let state = json!({ "height": "0", "round": 0, "step": 0 }); - serde_json::to_writer_pretty(file, &state) - .expect("Couldn't write private validator state file"); + write_validator(validator_state(home_dir), STATE_DIR, STATE_FILE, state) +} + +/// Abstract over the initialization of validator data for Tendermint +fn write_validator( + path: PathBuf, + err_dir: &'static str, + err_file: &'static str, + data: serde_json::Value, +) -> Result<()> { + let parent_dir = path.parent().unwrap(); + // Make sure the dir exists + std::fs::create_dir_all(parent_dir).map_err(|err| { + Error::CantCreate(format!( + "{} at {}. Caused by {err}", + err_dir, + parent_dir.to_string_lossy() + )) + })?; + let file = ensure_empty(&path).map_err(|err| { + Error::CantCreate(format!( + "{} at {}. Caused by {err}", + err_dir, + path.to_string_lossy() + )) + })?; + serde_json::to_writer_pretty(file, &data).map_err(|err| { + Error::CantWrite(format!( + "{} to {}. Caused by {err}", + err_file, + path.to_string_lossy() + )) + }) } /// Length of a Tendermint Node ID in bytes @@ -349,8 +373,7 @@ async fn update_tendermint_config( home_dir: impl AsRef, mut config: TendermintConfig, ) -> Result<()> { - let home_dir = home_dir.as_ref(); - let path = home_dir.join("config").join("config.toml"); + let path = configuration(home_dir); config.moniker = Moniker::from_str(&format!("{}-{}", config.moniker, namada_version())) @@ -408,9 +431,8 @@ async fn write_tm_genesis( home_dir: impl AsRef, chain_id: ChainId, genesis_time: DateTimeUtc, -) { - let home_dir = home_dir.as_ref(); - let path = home_dir.join("config").join("genesis.json"); +) -> Result<()> { + let path = genesis(home_dir); let mut file = File::open(&path).await.unwrap_or_else(|err| { panic!( "Couldn't open the genesis file at {:?}, error: {}", @@ -456,8 +478,55 @@ async fn write_tm_genesis( ) }); let data = serde_json::to_vec_pretty(&genesis) - .expect("Couldn't encode the CometBFT genesis file"); - file.write_all(&data[..]) - .await - .expect("Couldn't write the CometBFT genesis file"); + .map_err(|_| Error::CantEncode(GENESIS_FILE))?; + file.write_all(&data[..]).await.map_err(|err| { + Error::CantWrite(format!( + "{} to {}. Caused by {err}", + GENESIS_FILE, + path.to_string_lossy() + )) + }) +} + +async fn tracing_kill(node: &mut Child) { + tracing::info!("Shutting down Tendermint node..."); + node.kill().await.unwrap(); +} + +fn ensure_empty(path: &PathBuf) -> std::io::Result { + std::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path) +} +fn validator_key(home_dir: impl AsRef) -> PathBuf { + home_dir + .as_ref() + .join("config") + .join("priv_validator_key.json") } + +fn validator_state(home_dir: impl AsRef) -> PathBuf { + home_dir + .as_ref() + .join("data") + .join("priv_validator_state.json") +} + +fn configuration(home_dir: impl AsRef) -> PathBuf { + home_dir.as_ref().join("config").join("config.toml") +} +fn genesis(home_dir: impl AsRef) -> PathBuf { + home_dir.as_ref().join("config").join("genesis.json") +} + +// Constant strings to avoid repeating our magic words + +const KEY_FILE: &str = "private validator key file"; +const KEY_DIR: &str = "private validator key directory"; + +const STATE_FILE: &str = "private validator state file"; +const STATE_DIR: &str = "private validator state directory"; + +const GENESIS_FILE: &str = "CometBFT genesis file"; diff --git a/apps/src/lib/wasm_loader/mod.rs b/apps/src/lib/wasm_loader/mod.rs index 163b777f03..f7248d74e4 100644 --- a/apps/src/lib/wasm_loader/mod.rs +++ b/apps/src/lib/wasm_loader/mod.rs @@ -39,16 +39,21 @@ const DEFAULT_WASM_SERVER: &str = "https://artifacts.heliax.click/namada-wasm"; impl Checksums { /// Read WASM checksums from the given path - pub fn read_checksums_file(checksums_path: impl AsRef) -> Self { + pub fn read_checksums_file( + checksums_path: impl AsRef, + ) -> Result { match fs::File::open(&checksums_path) { Ok(file) => match serde_json::from_reader(file) { - Ok(result) => result, + Ok(result) => Ok(result), Err(_) => { eprintln!( "Can't read checksums from {}", checksums_path.as_ref().to_string_lossy() ); - safe_exit(1); + Err(eyre!( + "Can't read checksums from {}", + checksums_path.as_ref().to_string_lossy() + )) } }, Err(_) => { @@ -56,13 +61,18 @@ impl Checksums { "Can't find checksums at {}", checksums_path.as_ref().to_string_lossy() ); - safe_exit(1); + Err(eyre!( + "Can't find checksums at {}", + checksums_path.as_ref().to_string_lossy() + )) } } } /// Read WASM checksums from "checksums.json" in the given directory - pub fn read_checksums(wasm_directory: impl AsRef) -> Self { + pub fn read_checksums( + wasm_directory: impl AsRef, + ) -> Result { let checksums_path = wasm_directory.as_ref().join(DEFAULT_WASM_CHECKSUMS_FILE); Self::read_checksums_file(checksums_path) @@ -203,7 +213,7 @@ pub fn read_wasm( file_path: impl AsRef, ) -> eyre::Result> { // load json with wasm hashes - let checksums = Checksums::read_checksums(&wasm_directory); + let checksums = Checksums::read_checksums(&wasm_directory)?; if let Some(os_name) = file_path.as_ref().file_name() { if let Some(name) = os_name.to_str() { diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 57de47dba4..b467253e2d 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -42,6 +42,7 @@ path = "host_env.rs" [dev-dependencies] namada = { path = "../shared", features = ["testing"] } namada_apps = { path = "../apps", features = ["benches"] } +masp_primitives.workspace = true borsh.workspace = true borsh-ext.workspace = true criterion = { version = "0.5", features = ["html_reports"] } diff --git a/benches/native_vps.rs b/benches/native_vps.rs index 7981216918..a1cf7e42f1 100644 --- a/benches/native_vps.rs +++ b/benches/native_vps.rs @@ -4,6 +4,8 @@ use std::rc::Rc; use std::str::FromStr; use criterion::{criterion_group, criterion_main, Criterion}; +use masp_primitives::bls12_381; +use masp_primitives::sapling::Node; use namada::core::ledger::governance::storage::proposal::ProposalType; use namada::core::ledger::governance::storage::vote::{ StorageProposalVote, VoteType, @@ -39,14 +41,18 @@ use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::pgf::PgfVp; use namada::ledger::pos::PosVP; use namada::namada_sdk::masp::verify_shielded_tx; +use namada::namada_sdk::masp_primitives::merkle_tree::CommitmentTree; use namada::namada_sdk::masp_primitives::transaction::Transaction; use namada::proof_of_stake; use namada::proof_of_stake::KeySeg; use namada::proto::{Code, Section, Tx}; -use namada::types::address::InternalAddress; +use namada::types::address::{InternalAddress, MASP}; use namada::types::eth_bridge_pool::{GasFee, PendingTransfer}; use namada::types::masp::{TransferSource, TransferTarget}; use namada::types::storage::{Epoch, TxIndex}; +use namada::types::token::{ + MASP_NOTE_COMMITMENT_ANCHOR_PREFIX, MASP_NOTE_COMMITMENT_TREE_KEY, +}; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; @@ -110,7 +116,8 @@ fn governance(c: &mut Criterion) { let content_section = Section::ExtraData(Code::new(vec![], None)); let params = - proof_of_stake::read_pos_params(&shell.wl_storage).unwrap(); + proof_of_stake::storage::read_pos_params(&shell.wl_storage) + .unwrap(); let voting_start_epoch = Epoch(2 + params.pipeline_len + params.unbonding_len); // Must start after current epoch @@ -161,7 +168,8 @@ fn governance(c: &mut Criterion) { )); let params = - proof_of_stake::read_pos_params(&shell.wl_storage).unwrap(); + proof_of_stake::storage::read_pos_params(&shell.wl_storage) + .unwrap(); let voting_start_epoch = Epoch(2 + params.pipeline_len + params.unbonding_len); // Must start after current epoch @@ -501,6 +509,30 @@ fn setup_storage_for_masp_verification( ); shielded_ctx.shell.execute_tx(&shield_tx); shielded_ctx.shell.wl_storage.commit_tx(); + + // Update the anchor in storage + let tree_key = namada::core::types::storage::Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_TREE_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let updated_tree: CommitmentTree = shielded_ctx + .shell + .wl_storage + .read(&tree_key) + .unwrap() + .unwrap(); + let anchor_key = namada::core::types::storage::Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_ANCHOR_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") + .push(&namada::core::types::hash::Hash( + bls12_381::Scalar::from(updated_tree.root()).to_bytes(), + )) + .expect("Cannot obtain a storage key"); + shielded_ctx + .shell + .wl_storage + .write(&anchor_key, ()) + .unwrap(); + shielded_ctx.shell.commit(); let (mut shielded_ctx, signed_tx) = match bench_name { diff --git a/benches/process_wrapper.rs b/benches/process_wrapper.rs index d8187e11ad..6f3c812ffb 100644 --- a/benches/process_wrapper.rs +++ b/benches/process_wrapper.rs @@ -6,6 +6,7 @@ use namada::proto::Signature; use namada::types::key::RefTo; use namada::types::storage::BlockHeight; use namada::types::time::DateTimeUtc; +use namada::types::token::DenominatedAmount; use namada::types::transaction::{Fee, WrapperTx}; use namada_apps::bench_utils::{BenchShell, TX_TRANSFER_WASM}; use namada_apps::node::ledger::shell::process_proposal::ValidationMeta; @@ -37,7 +38,7 @@ fn process_tx(c: &mut Criterion) { WrapperTx::new( Fee { token: address::nam(), - amount_per_gas_unit: 1.into(), + amount_per_gas_unit: DenominatedAmount::native(1.into()), }, defaults::albert_keypair().ref_to(), 0.into(), diff --git a/benches/txs.rs b/benches/txs.rs index 454b50f401..523cd48489 100644 --- a/benches/txs.rs +++ b/benches/txs.rs @@ -25,8 +25,9 @@ use namada::ibc::core::host::types::identifiers::{ }; use namada::ledger::eth_bridge::read_native_erc20_address; use namada::ledger::storage_api::{StorageRead, StorageWrite}; +use namada::proof_of_stake::storage::read_pos_params; use namada::proof_of_stake::types::SlashType; -use namada::proof_of_stake::{self, read_pos_params, KeySeg}; +use namada::proof_of_stake::{self, KeySeg}; use namada::proto::{Code, Section}; use namada::types::address::{self, Address}; use namada::types::eth_bridge_pool::{GasFee, PendingTransfer}; @@ -285,9 +286,10 @@ fn withdraw(c: &mut Criterion) { shell.wl_storage.commit_tx(); // Advance Epoch for pipeline and unbonding length - let params = - proof_of_stake::read_pos_params(&shell.wl_storage) - .unwrap(); + let params = proof_of_stake::storage::read_pos_params( + &shell.wl_storage, + ) + .unwrap(); let advance_epochs = params.pipeline_len + params.unbonding_len; @@ -330,7 +332,7 @@ fn redelegate(c: &mut Criterion) { let shell = BenchShell::default(); // Find the other genesis validator let current_epoch = shell.wl_storage.get_block_epoch().unwrap(); - let validators = namada::proof_of_stake::read_consensus_validator_set_addresses(&shell.inner.wl_storage, current_epoch).unwrap(); + let validators = namada::proof_of_stake::storage::read_consensus_validator_set_addresses(&shell.inner.wl_storage, current_epoch).unwrap(); let validator_2 = validators.into_iter().find(|addr| addr != &defaults::validator_address()).expect("There must be another validator to redelegate to"); // Prepare the redelegation tx (shell, redelegation(validator_2)) @@ -686,21 +688,21 @@ fn change_validator_commission(c: &mut Criterion) { fn change_consensus_key(c: &mut Criterion) { let mut csprng = rand::rngs::OsRng {}; - let consensus_key = ed25519::SigScheme::generate(&mut csprng) + let consensus_sk = ed25519::SigScheme::generate(&mut csprng) .try_to_sk::() - .unwrap() - .to_public(); + .unwrap(); + let consensus_pk = consensus_sk.to_public(); let shell = BenchShell::default(); let signed_tx = shell.generate_tx( TX_CHANGE_CONSENSUS_KEY_WASM, ConsensusKeyChange { validator: defaults::validator_address(), - consensus_key, + consensus_key: consensus_pk, }, None, None, - vec![&defaults::validator_keypair()], + vec![&defaults::validator_keypair(), &consensus_sk], ); c.bench_function("change_consensus_key", |b| { @@ -835,7 +837,7 @@ fn unjail_validator(c: &mut Criterion) { let pos_params = read_pos_params(&shell.wl_storage).unwrap(); let current_epoch = shell.wl_storage.storage.block.epoch; let evidence_epoch = current_epoch.prev(); - proof_of_stake::slash( + proof_of_stake::slashing::slash( &mut shell.wl_storage, &pos_params, current_epoch, @@ -1055,9 +1057,10 @@ fn claim_rewards(c: &mut Criterion) { let mut shell = BenchShell::default(); // Advance Epoch for pipeline and unbonding length - let params = - proof_of_stake::read_pos_params(&shell.wl_storage) - .unwrap(); + let params = proof_of_stake::storage::read_pos_params( + &shell.wl_storage, + ) + .unwrap(); let advance_epochs = params.pipeline_len + params.unbonding_len; diff --git a/benches/vps.rs b/benches/vps.rs index 472ffb3faa..620d14c43f 100644 --- a/benches/vps.rs +++ b/benches/vps.rs @@ -182,8 +182,7 @@ fn vp_implicit(c: &mut Criterion) { .try_to_sk() .unwrap(); - let foreign_key_write = - generate_foreign_key_tx(&defaults::albert_keypair()); + let foreign_key_write = generate_foreign_key_tx(&implicit_account); let shell = BenchShell::default(); let transfer = shell.generate_tx( @@ -327,7 +326,7 @@ fn vp_validator(c: &mut Criterion) { let mut group = c.benchmark_group("vp_validator"); let foreign_key_write = - generate_foreign_key_tx(&defaults::albert_keypair()); + generate_foreign_key_tx(&defaults::validator_account_keypair()); let transfer = shell.generate_tx( TX_TRANSFER_WASM, diff --git a/core/Cargo.toml b/core/Cargo.toml index 7f5d6b7c61..5c6ae0f6a1 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -52,6 +52,7 @@ itertools.workspace = true k256.workspace = true masp_primitives.workspace = true num256.workspace = true +num-derive.workspace = true num_enum = "0.7.0" num-integer = "0.1.45" num-rational.workspace = true diff --git a/core/src/ledger/gas.rs b/core/src/ledger/gas.rs index 7d88c3404b..f2c1e72844 100644 --- a/core/src/ledger/gas.rs +++ b/core/src/ledger/gas.rs @@ -5,6 +5,7 @@ use std::fmt::Display; use std::ops::Div; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use serde::{Deserialize, Serialize}; use thiserror::Error; use super::parameters; @@ -89,6 +90,8 @@ pub fn get_max_block_gas( BorshDeserialize, BorshSerialize, BorshSchema, + Serialize, + Deserialize, )] pub struct Gas { sub: u64, @@ -223,7 +226,14 @@ pub struct VpGasMeter { /// Gas meter for VPs parallel runs #[derive( - Clone, Debug, Default, BorshSerialize, BorshDeserialize, BorshSchema, + Clone, + Debug, + Default, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Serialize, + Deserialize, )] pub struct VpsGas { max: Gas, diff --git a/core/src/ledger/governance/storage/proposal.rs b/core/src/ledger/governance/storage/proposal.rs index 014b24a16c..13e1aecae1 100644 --- a/core/src/ledger/governance/storage/proposal.rs +++ b/core/src/ledger/governance/storage/proposal.rs @@ -311,3 +311,66 @@ impl Display for StorageProposal { ) } } + +#[cfg(any(test, feature = "testing"))] +/// Testing helpers and and strategies for governance proposals +pub mod testing { + use proptest::prelude::Strategy; + use proptest::{collection, option, prop_compose}; + + use super::*; + use crate::types::address::testing::arb_non_internal_address; + use crate::types::hash::testing::arb_hash; + use crate::types::token::testing::arb_amount; + + /// Generate an arbitrary add or removal of what's generated by the supplied + /// strategy + pub fn arb_add_remove( + strategy: X, + ) -> impl Strategy::Value>> { + (0..2, strategy).prop_map(|(discriminant, val)| match discriminant { + 0 => AddRemove::Add(val), + 1 => AddRemove::Remove(val), + _ => unreachable!(), + }) + } + + prop_compose! { + /// Generate an arbitrary PGF target + pub fn arb_pgf_target()( + target in arb_non_internal_address(), + amount in arb_amount(), + ) -> PGFTarget { + PGFTarget { + target, + amount, + } + } + } + + /// Generate an arbitrary PGF action + pub fn arb_pgf_action() -> impl Strategy { + arb_add_remove(arb_pgf_target()) + .prop_map(PGFAction::Continuous) + .boxed() + .prop_union(arb_pgf_target().prop_map(PGFAction::Retro).boxed()) + } + + /// Generate an arbitrary proposal type + pub fn arb_proposal_type() -> impl Strategy { + option::of(arb_hash()) + .prop_map(ProposalType::Default) + .boxed() + .prop_union( + collection::hash_set( + arb_add_remove(arb_non_internal_address()), + 0..10, + ) + .prop_map(ProposalType::PGFSteward) + .boxed(), + ) + .or(collection::vec(arb_pgf_action(), 0..10) + .prop_map(ProposalType::PGFPayment) + .boxed()) + } +} diff --git a/core/src/ledger/governance/storage/vote.rs b/core/src/ledger/governance/storage/vote.rs index 780d4fdf2e..9be49fa9b7 100644 --- a/core/src/ledger/governance/storage/vote.rs +++ b/core/src/ledger/governance/storage/vote.rs @@ -144,3 +144,33 @@ impl PartialEq for ProposalType { } } } + +#[cfg(any(test, feature = "testing"))] +/// Testing helpers and strategies for governance votes +pub mod testing { + use proptest::prelude::{Just, Strategy}; + use proptest::prop_compose; + + use super::*; + + prop_compose! { + /// Geerate an arbitrary vote type + pub fn arb_vote_type()(discriminant in 0..3) -> VoteType { + match discriminant { + 0 => VoteType::Default, + 1 => VoteType::PGFSteward, + 2 => VoteType::PGFPayment, + _ => unreachable!(), + } + } + } + + /// Generate an arbitrary proposal vote + pub fn arb_proposal_vote() -> impl Strategy { + arb_vote_type() + .prop_map(StorageProposalVote::Yay) + .boxed() + .prop_union(Just(StorageProposalVote::Nay).boxed()) + .or(Just(StorageProposalVote::Abstain).boxed()) + } +} diff --git a/core/src/ledger/ibc/context/token_transfer.rs b/core/src/ledger/ibc/context/token_transfer.rs index c358049bc2..3e0cf5bc23 100644 --- a/core/src/ledger/ibc/context/token_transfer.rs +++ b/core/src/ledger/ibc/context/token_transfer.rs @@ -63,7 +63,7 @@ where .into(), ) })?; - let amount = token::DenominatedAmount { amount, denom }; + let amount = token::DenominatedAmount::new(amount, denom); Ok((token, amount)) } diff --git a/core/src/ledger/ibc/mod.rs b/core/src/ledger/ibc/mod.rs index 1b01017d3f..b695aef259 100644 --- a/core/src/ledger/ibc/mod.rs +++ b/core/src/ledger/ibc/mod.rs @@ -8,6 +8,7 @@ use std::fmt::Debug; use std::rc::Rc; use std::str::FromStr; +use borsh::BorshDeserialize; pub use context::common::IbcCommonContext; use context::router::IbcRouter; pub use context::storage::{IbcStorageContext, ProofSpec}; @@ -37,16 +38,16 @@ use crate::ibc::core::router::types::module::ModuleId; use crate::ibc::primitives::proto::Any; use crate::types::address::{Address, MASP}; use crate::types::ibc::{ - get_shielded_transfer, is_ibc_denom, EVENT_TYPE_DENOM_TRACE, - EVENT_TYPE_PACKET, + get_shielded_transfer, is_ibc_denom, MsgShieldedTransfer, + EVENT_TYPE_DENOM_TRACE, EVENT_TYPE_PACKET, }; use crate::types::masp::PaymentAddress; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Decoding IBC data error: {0}")] - DecodingData(prost::DecodeError), + #[error("Decoding IBC data error")] + DecodingData, #[error("Decoding message error: {0}")] DecodingMessage(RouterError), #[error("IBC context error: {0}")] @@ -99,28 +100,37 @@ where /// Execute according to the message in an IBC transaction or VP pub fn execute(&mut self, tx_data: &[u8]) -> Result<(), Error> { - let any_msg = Any::decode(tx_data).map_err(Error::DecodingData)?; - match MsgTransfer::try_from(any_msg.clone()) { - Ok(msg) => { + let message = decode_message(tx_data)?; + match &message { + IbcMessage::Transfer(msg) => { let mut token_transfer_ctx = TokenTransferContext::new(self.ctx.inner.clone()); send_transfer_execute( &mut self.ctx, &mut token_transfer_ctx, - msg, + msg.clone(), ) .map_err(Error::TokenTransfer) } - Err(_) => { - let envelope = MsgEnvelope::try_from(any_msg) - .map_err(Error::DecodingMessage)?; + IbcMessage::ShieldedTransfer(msg) => { + let mut token_transfer_ctx = + TokenTransferContext::new(self.ctx.inner.clone()); + send_transfer_execute( + &mut self.ctx, + &mut token_transfer_ctx, + msg.message.clone(), + ) + .map_err(Error::TokenTransfer)?; + self.handle_masp_tx(message) + } + IbcMessage::Envelope(envelope) => { execute(&mut self.ctx, &mut self.router, envelope.clone()) .map_err(|e| Error::Context(Box::new(e)))?; - // For receiving the token to a shielded address - self.handle_masp_tx(&envelope)?; // the current ibc-rs execution doesn't store the denom for the // token hash when transfer with MsgRecvPacket - self.store_denom(&envelope) + self.store_denom(envelope)?; + // For receiving the token to a shielded address + self.handle_masp_tx(message) } } } @@ -218,17 +228,25 @@ where /// Validate according to the message in IBC VP pub fn validate(&self, tx_data: &[u8]) -> Result<(), Error> { - let any_msg = Any::decode(tx_data).map_err(Error::DecodingData)?; - match MsgTransfer::try_from(any_msg.clone()) { - Ok(msg) => { + let message = decode_message(tx_data)?; + match message { + IbcMessage::Transfer(msg) => { let token_transfer_ctx = TokenTransferContext::new(self.ctx.inner.clone()); send_transfer_validate(&self.ctx, &token_transfer_ctx, msg) .map_err(Error::TokenTransfer) } - Err(_) => { - let envelope = MsgEnvelope::try_from(any_msg) - .map_err(Error::DecodingMessage)?; + IbcMessage::ShieldedTransfer(msg) => { + let token_transfer_ctx = + TokenTransferContext::new(self.ctx.inner.clone()); + send_transfer_validate( + &self.ctx, + &token_transfer_ctx, + msg.message, + ) + .map_err(Error::TokenTransfer) + } + IbcMessage::Envelope(envelope) => { validate(&self.ctx, &self.router, envelope) .map_err(|e| Error::Context(Box::new(e))) } @@ -236,9 +254,9 @@ where } /// Handle the MASP transaction if needed - fn handle_masp_tx(&mut self, envelope: &MsgEnvelope) -> Result<(), Error> { - let shielded_transfer = match envelope { - MsgEnvelope::Packet(PacketMsg::Recv(_)) => { + fn handle_masp_tx(&mut self, message: IbcMessage) -> Result<(), Error> { + let shielded_transfer = match message { + IbcMessage::Envelope(MsgEnvelope::Packet(PacketMsg::Recv(_))) => { let event = self .ctx .inner @@ -257,6 +275,7 @@ where None => return Ok(()), } } + IbcMessage::ShieldedTransfer(msg) => Some(msg.shielded_transfer), _ => return Ok(()), }; if let Some(shielded_transfer) = shielded_transfer { @@ -272,6 +291,31 @@ where } } +enum IbcMessage { + Envelope(MsgEnvelope), + Transfer(MsgTransfer), + ShieldedTransfer(MsgShieldedTransfer), +} + +fn decode_message(tx_data: &[u8]) -> Result { + // ibc-rs message + if let Ok(any_msg) = Any::decode(tx_data) { + if let Ok(transfer_msg) = MsgTransfer::try_from(any_msg.clone()) { + return Ok(IbcMessage::Transfer(transfer_msg)); + } + if let Ok(envelope) = MsgEnvelope::try_from(any_msg) { + return Ok(IbcMessage::Envelope(envelope)); + } + } + + // Message with Transfer for the shielded transfer + if let Ok(msg) = MsgShieldedTransfer::try_from_slice(tx_data) { + return Ok(IbcMessage::ShieldedTransfer(msg)); + } + + Err(Error::DecodingData) +} + /// Get the IbcToken from the source/destination ports and channels pub fn received_ibc_token( ibc_denom: &PrefixedDenom, @@ -296,3 +340,179 @@ pub fn received_ibc_token( } Ok(storage::ibc_token(ibc_denom.to_string())) } + +#[cfg(any(test, feature = "testing"))] +/// Testing helpers ans strategies for IBC +pub mod testing { + use std::str::FromStr; + + use ibc::apps::transfer::types::packet::PacketData; + use ibc::apps::transfer::types::{ + Amount, BaseDenom, Memo, PrefixedCoin, PrefixedDenom, TracePath, + TracePrefix, + }; + use ibc::core::channel::types::timeout::TimeoutHeight; + use ibc::core::client::types::Height; + use ibc::core::host::types::identifiers::{ChannelId, PortId}; + use ibc::core::primitives::Signer; + use ibc::primitives::proto::Any; + use ibc::primitives::{Msg, Timestamp}; + use proptest::prelude::{Just, Strategy}; + use proptest::{collection, prop_compose}; + + use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; + + prop_compose! { + /// Generate an arbitrary port ID + pub fn arb_ibc_port_id()(id in "[a-zA-Z0-9_+.\\-\\[\\]#<>]{2,128}") -> PortId { + PortId::new(id).expect("generated invalid port ID") + } + } + + prop_compose! { + /// Generate an arbitrary channel ID + pub fn arb_ibc_channel_id()(id: u64) -> ChannelId { + ChannelId::new(id) + } + } + + prop_compose! { + /// Generate an arbitrary IBC height + pub fn arb_ibc_height()( + revision_number: u64, + revision_height in 1u64.., + ) -> Height { + Height::new(revision_number, revision_height) + .expect("generated invalid IBC height") + } + } + + /// Generate arbitrary timeout data + pub fn arb_ibc_timeout_data() -> impl Strategy { + arb_ibc_height() + .prop_map(TimeoutHeight::At) + .boxed() + .prop_union(Just(TimeoutHeight::Never).boxed()) + } + + prop_compose! { + /// Generate an arbitrary IBC timestamp + pub fn arb_ibc_timestamp()(nanoseconds: u64) -> Timestamp { + Timestamp::from_nanoseconds(nanoseconds).expect("generated invalid IBC timestamp") + } + } + + prop_compose! { + /// Generate an arbitrary IBC memo + pub fn arb_ibc_memo()(memo in "[a-zA-Z0-9_]*") -> Memo { + memo.into() + } + } + + prop_compose! { + /// Generate an arbitrary IBC memo + pub fn arb_ibc_signer()(signer in "[a-zA-Z0-9_]*") -> Signer { + signer.into() + } + } + + prop_compose! { + /// Generate an arbitrary IBC trace prefix + pub fn arb_ibc_trace_prefix()( + port_id in arb_ibc_port_id(), + channel_id in arb_ibc_channel_id(), + ) -> TracePrefix { + TracePrefix::new(port_id, channel_id) + } + } + + prop_compose! { + /// Generate an arbitrary IBC trace path + pub fn arb_ibc_trace_path()(path in collection::vec(arb_ibc_trace_prefix(), 0..10)) -> TracePath { + TracePath::from(path) + } + } + + prop_compose! { + /// Generate an arbitrary IBC base denomination + pub fn arb_ibc_base_denom()(base_denom in "[a-zA-Z0-9_]+") -> BaseDenom { + BaseDenom::from_str(&base_denom).expect("generated invalid IBC base denomination") + } + } + + prop_compose! { + /// Generate an arbitrary IBC prefixed denomination + pub fn arb_ibc_prefixed_denom()( + trace_path in arb_ibc_trace_path(), + base_denom in arb_ibc_base_denom(), + ) -> PrefixedDenom { + PrefixedDenom { + trace_path, + base_denom, + } + } + } + + prop_compose! { + /// Generate an arbitrary IBC amount + pub fn arb_ibc_amount()(value: [u64; 4]) -> Amount { + value.into() + } + } + + prop_compose! { + /// Generate an arbitrary prefixed coin + pub fn arb_ibc_prefixed_coin()( + denom in arb_ibc_prefixed_denom(), + amount in arb_ibc_amount(), + ) -> PrefixedCoin { + PrefixedCoin { + denom, + amount, + } + } + } + + prop_compose! { + /// Generate arbitrary packet data + pub fn arb_ibc_packet_data()( + token in arb_ibc_prefixed_coin(), + sender in arb_ibc_signer(), + receiver in arb_ibc_signer(), + memo in arb_ibc_memo(), + ) -> PacketData { + PacketData { + token, + sender, + receiver, + memo, + } + } + } + + prop_compose! { + /// Generate an arbitrary IBC transfer message + pub fn arb_ibc_msg_transfer()( + port_id_on_a in arb_ibc_port_id(), + chan_id_on_a in arb_ibc_channel_id(), + packet_data in arb_ibc_packet_data(), + timeout_height_on_b in arb_ibc_timeout_data(), + timeout_timestamp_on_b in arb_ibc_timestamp(), + ) -> MsgTransfer { + MsgTransfer { + port_id_on_a, + chan_id_on_a, + packet_data, + timeout_height_on_b, + timeout_timestamp_on_b, + } + } + } + + prop_compose! { + /// Generate an arbitrary IBC any object + pub fn arb_ibc_any()(msg_transfer in arb_ibc_msg_transfer()) -> Any { + msg_transfer.to_any() + } + } +} diff --git a/core/src/ledger/masp_conversions.rs b/core/src/ledger/masp_conversions.rs index 1b9facc614..fc98c6ff65 100644 --- a/core/src/ledger/masp_conversions.rs +++ b/core/src/ledger/masp_conversions.rs @@ -154,10 +154,7 @@ where 0, ) .unwrap(); - let denom_amount = DenominatedAmount { - amount: inflation_amount, - denom: denomination, - }; + let denom_amount = DenominatedAmount::new(inflation_amount, denomination); tracing::info!("MASP inflation for {addr} is {denom_amount}"); tracing::debug!( @@ -207,6 +204,7 @@ where { use std::cmp::Ordering; + use masp_primitives::bls12_381; use masp_primitives::ff::PrimeField; use masp_primitives::transaction::components::I128Sum as MaspAmount; use rayon::iter::{ @@ -214,29 +212,18 @@ where }; use rayon::prelude::ParallelSlice; - use crate::types::address; + use crate::types::storage::{Key, KeySeg}; + use crate::types::token::MASP_CONVERT_ANCHOR_KEY; // The derived conversions will be placed in MASP address space let masp_addr = MASP; - let tokens = address::tokens(); - let mut masp_reward_keys: Vec<_> = tokens - .into_keys() - .map(|k| { - wl_storage - .storage - .conversion_state - .tokens - .get(k) - .unwrap_or_else(|| { - panic!( - "Could not find token alias {} in MASP conversion \ - state.", - k - ) - }) - .clone() - }) + let mut masp_reward_keys: Vec<_> = wl_storage + .storage + .conversion_state + .tokens + .values() + .cloned() .collect(); // Put the native rewards first because other inflation computations depend // on it @@ -459,6 +446,19 @@ where // obtained wl_storage.storage.conversion_state.tree = FrozenCommitmentTree::merge(&tree_parts); + // Update the anchor in storage + let anchor_key = Key::from(MASP.to_db_key()) + .push(&MASP_CONVERT_ANCHOR_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + wl_storage.write( + &anchor_key, + crate::types::hash::Hash( + bls12_381::Scalar::from( + wl_storage.storage.conversion_state.tree.root(), + ) + .to_bytes(), + ), + )?; // Add purely decoding entries to the assets map. These will be // overwritten before the creation of the next commitment tree @@ -550,8 +550,6 @@ mod tests { implicit_vp_code_hash: Default::default(), epochs_per_year: 365, max_signatures_per_transaction: 10, - pos_gain_p: Default::default(), - pos_gain_d: Default::default(), staked_ratio: Default::default(), pos_inflation_amount: Default::default(), fee_unshielding_gas_limit: 0, diff --git a/core/src/ledger/masp_utils.rs b/core/src/ledger/masp_utils.rs new file mode 100644 index 0000000000..f37b725531 --- /dev/null +++ b/core/src/ledger/masp_utils.rs @@ -0,0 +1,113 @@ +//! MASP utilities + +use masp_primitives::merkle_tree::CommitmentTree; +use masp_primitives::sapling::Node; +use masp_primitives::transaction::Transaction; + +use super::storage_api::{StorageRead, StorageWrite}; +use crate::ledger::storage_api::{Error, Result}; +use crate::types::address::MASP; +use crate::types::hash::Hash; +use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use crate::types::token::{ + Transfer, HEAD_TX_KEY, MASP_NOTE_COMMITMENT_TREE_KEY, MASP_NULLIFIERS_KEY, + PIN_KEY_PREFIX, TX_KEY_PREFIX, +}; + +// Writes the nullifiers of the provided masp transaction to storage +fn reveal_nullifiers( + ctx: &mut impl StorageWrite, + transaction: &Transaction, +) -> Result<()> { + for description in transaction + .sapling_bundle() + .map_or(&vec![], |description| &description.shielded_spends) + { + let nullifier_key = Key::from(MASP.to_db_key()) + .push(&MASP_NULLIFIERS_KEY.to_owned()) + .expect("Cannot obtain a storage key") + .push(&Hash(description.nullifier.0)) + .expect("Cannot obtain a storage key"); + ctx.write(&nullifier_key, ())?; + } + + Ok(()) +} + +/// Appends the note commitments of the provided transaction to the merkle tree +/// and updates the anchor +/// NOTE: this function is public as a temporary workaround because of an issue +/// when running this function in WASM +pub fn update_note_commitment_tree( + ctx: &mut (impl StorageRead + StorageWrite), + transaction: &Transaction, +) -> Result<()> { + if let Some(bundle) = transaction.sapling_bundle() { + if !bundle.shielded_outputs.is_empty() { + let tree_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_TREE_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let mut commitment_tree: CommitmentTree = + ctx.read(&tree_key)?.ok_or(Error::SimpleMessage( + "Missing note commitment tree in storage", + ))?; + + for description in &bundle.shielded_outputs { + // Add cmu to the merkle tree + commitment_tree + .append(Node::from_scalar(description.cmu)) + .map_err(|_| { + Error::SimpleMessage("Note commitment tree is full") + })?; + } + + ctx.write(&tree_key, commitment_tree)?; + } + } + + Ok(()) +} + +/// Handle a MASP transaction. +pub fn handle_masp_tx( + ctx: &mut (impl StorageRead + StorageWrite), + transfer: &Transfer, + shielded: &Transaction, +) -> Result<()> { + let masp_addr = MASP; + let head_tx_key = Key::from(masp_addr.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let current_tx_idx: u64 = + ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); + let current_tx_key = Key::from(masp_addr.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) + .expect("Cannot obtain a storage key"); + // Save the Transfer object and its location within the blockchain + // so that clients do not have to separately look these + // up + let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( + ctx.get_block_epoch()?, + ctx.get_block_height()?, + ctx.get_tx_index()?, + transfer.clone(), + shielded.clone(), + ); + ctx.write(¤t_tx_key, record)?; + ctx.write(&head_tx_key, current_tx_idx + 1)?; + // TODO: temporarily disabled because of the node aggregation issue in WASM. + // Using the host env tx_update_masp_note_commitment_tree or directly the + // update_note_commitment_tree function as a workaround instead + // update_note_commitment_tree(ctx, shielded)?; + reveal_nullifiers(ctx, shielded)?; + + // If storage key has been supplied, then pin this transaction to it + if let Some(key) = &transfer.key { + let pin_key = Key::from(masp_addr.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) + .expect("Cannot obtain a storage key"); + ctx.write(&pin_key, current_tx_idx)?; + } + + Ok(()) +} diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index debeaba7db..96cf4d2331 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -6,6 +6,7 @@ pub mod governance; pub mod ibc; pub mod inflation; pub mod masp_conversions; +pub mod masp_utils; pub mod parameters; pub mod pgf; pub mod replay_protection; diff --git a/core/src/ledger/parameters/mod.rs b/core/src/ledger/parameters/mod.rs index f8f33d9768..d80f75aa4b 100644 --- a/core/src/ledger/parameters/mod.rs +++ b/core/src/ledger/parameters/mod.rs @@ -50,15 +50,11 @@ pub struct Parameters { /// Whitelisted tx hashes (read only) pub tx_whitelist: Vec, /// Implicit accounts validity predicate WASM code hash - pub implicit_vp_code_hash: Hash, + pub implicit_vp_code_hash: Option, /// Expected number of epochs per year (read only) pub epochs_per_year: u64, /// Maximum number of signature per transaction pub max_signatures_per_transaction: u8, - /// PoS gain p (read only) - pub pos_gain_p: Dec, - /// PoS gain d (read only) - pub pos_gain_d: Dec, /// PoS staked ratio (read + write for every epoch) pub staked_ratio: Dec, /// PoS inflation amount from the last epoch (read + write for every epoch) @@ -129,8 +125,6 @@ impl Parameters { implicit_vp_code_hash, epochs_per_year, max_signatures_per_transaction, - pos_gain_p, - pos_gain_d, staked_ratio, pos_inflation_amount, minimum_gas_price, @@ -196,7 +190,10 @@ impl Parameters { let implicit_vp_key = storage::get_implicit_vp_key(); // Using `fn write_bytes` here, because implicit_vp code hash doesn't // need to be encoded, it's bytes already. - storage.write_bytes(&implicit_vp_key, implicit_vp_code_hash)?; + storage.write_bytes( + &implicit_vp_key, + implicit_vp_code_hash.unwrap_or_default(), + )?; let epochs_per_year_key = storage::get_epochs_per_year_key(); storage.write(&epochs_per_year_key, epochs_per_year)?; @@ -208,12 +205,6 @@ impl Parameters { max_signatures_per_transaction, )?; - let pos_gain_p_key = storage::get_pos_gain_p_key(); - storage.write(&pos_gain_p_key, pos_gain_p)?; - - let pos_gain_d_key = storage::get_pos_gain_d_key(); - storage.write(&pos_gain_d_key, pos_gain_d)?; - let staked_ratio_key = storage::get_staked_ratio_key(); storage.write(&staked_ratio_key, staked_ratio)?; @@ -315,32 +306,6 @@ where storage.write(&key, value) } -/// Update the PoS P-gain parameter in storage. Returns the parameters and gas -/// cost. -pub fn update_pos_gain_p_parameter( - storage: &mut S, - value: &Dec, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = storage::get_pos_gain_p_key(); - storage.write(&key, value) -} - -/// Update the PoS D-gain parameter in storage. Returns the parameters and gas -/// cost. -pub fn update_pos_gain_d_parameter( - storage: &mut S, - value: &Dec, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = storage::get_pos_gain_d_key(); - storage.write(&key, value) -} - /// Update the PoS staked ratio parameter in storage. Returns the parameters and /// gas cost. pub fn update_staked_ratio_parameter( @@ -514,20 +479,6 @@ where .ok_or(ReadError::ParametersMissing) .into_storage_result()?; - // read PoS gain P - let pos_gain_p_key = storage::get_pos_gain_p_key(); - let value = storage.read(&pos_gain_p_key)?; - let pos_gain_p = value - .ok_or(ReadError::ParametersMissing) - .into_storage_result()?; - - // read PoS gain D - let pos_gain_d_key = storage::get_pos_gain_d_key(); - let value = storage.read(&pos_gain_d_key)?; - let pos_gain_d = value - .ok_or(ReadError::ParametersMissing) - .into_storage_result()?; - // read staked ratio let staked_ratio_key = storage::get_staked_ratio_key(); let value = storage.read(&staked_ratio_key)?; @@ -564,11 +515,9 @@ where max_block_gas, vp_whitelist, tx_whitelist, - implicit_vp_code_hash, + implicit_vp_code_hash: Some(implicit_vp_code_hash), epochs_per_year, max_signatures_per_transaction, - pos_gain_p, - pos_gain_d, staked_ratio, pos_inflation_amount, minimum_gas_price, diff --git a/core/src/ledger/parameters/storage.rs b/core/src/ledger/parameters/storage.rs index 19bd7784ac..6f065fa854 100644 --- a/core/src/ledger/parameters/storage.rs +++ b/core/src/ledger/parameters/storage.rs @@ -26,8 +26,6 @@ struct Keys { // ======================================== // PoS parameters // ======================================== - pos_gain_d: &'static str, - pos_gain_p: &'static str, pos_inflation_amount: &'static str, staked_ratio: &'static str, // ======================================== @@ -96,16 +94,6 @@ pub fn is_epochs_per_year_key(key: &Key) -> bool { is_epochs_per_year_key_at_addr(key, &ADDRESS) } -/// Returns if the key is the pos_gain_p key. -pub fn is_pos_gain_p_key(key: &Key) -> bool { - is_pos_gain_p_key_at_addr(key, &ADDRESS) -} - -/// Returns if the key is the pos_gain_d key. -pub fn is_pos_gain_d_key(key: &Key) -> bool { - is_pos_gain_d_key_at_addr(key, &ADDRESS) -} - /// Returns if the key is the staked ratio key. pub fn is_staked_ratio_key(key: &Key) -> bool { is_staked_ratio_key_at_addr(key, &ADDRESS) @@ -166,16 +154,6 @@ pub fn get_epochs_per_year_key() -> Key { get_epochs_per_year_key_at_addr(ADDRESS) } -/// Storage key used for pos_gain_p parameter. -pub fn get_pos_gain_p_key() -> Key { - get_pos_gain_p_key_at_addr(ADDRESS) -} - -/// Storage key used for pos_gain_d parameter. -pub fn get_pos_gain_d_key() -> Key { - get_pos_gain_d_key_at_addr(ADDRESS) -} - /// Storage key used for staked ratio parameter. pub fn get_staked_ratio_key() -> Key { get_staked_ratio_key_at_addr(ADDRESS) diff --git a/core/src/ledger/pgf/inflation.rs b/core/src/ledger/pgf/inflation.rs new file mode 100644 index 0000000000..30dd4191fd --- /dev/null +++ b/core/src/ledger/pgf/inflation.rs @@ -0,0 +1,104 @@ +//! PGF lib code. + +use crate::ledger::parameters::storage as params_storage; +use crate::ledger::storage_api::pgf::{ + get_parameters, get_payments, get_stewards, +}; +use crate::ledger::storage_api::token::credit_tokens; +use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; +use crate::types::dec::Dec; +use crate::types::token; + +/// Apply the PGF inflation. +pub fn apply_inflation(storage: &mut S) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let pgf_parameters = get_parameters(storage)?; + let staking_token = storage.get_native_token()?; + + let epochs_per_year: u64 = storage + .read(¶ms_storage::get_epochs_per_year_key())? + .expect("Epochs per year should exist in storage"); + let total_tokens: token::Amount = storage + .read(&token::minted_balance_key(&staking_token))? + .expect("Total NAM balance should exist in storage"); + + let pgf_pd_rate = + pgf_parameters.pgf_inflation_rate / Dec::from(epochs_per_year); + let pgf_inflation = Dec::from(total_tokens) * pgf_pd_rate; + let pgf_inflation_amount = token::Amount::from(pgf_inflation); + + credit_tokens( + storage, + &staking_token, + &super::ADDRESS, + pgf_inflation_amount, + )?; + + tracing::info!( + "Minting {} tokens for PGF rewards distribution into the PGF account.", + pgf_inflation_amount.to_string_native() + ); + + let mut pgf_fundings = get_payments(storage)?; + // we want to pay first the oldest fundings + pgf_fundings.sort_by(|a, b| a.id.cmp(&b.id)); + + for funding in pgf_fundings { + if storage_api::token::transfer( + storage, + &staking_token, + &super::ADDRESS, + &funding.detail.target, + funding.detail.amount, + ) + .is_ok() + { + tracing::info!( + "Paying {} tokens for {} project.", + funding.detail.amount.to_string_native(), + &funding.detail.target, + ); + } else { + tracing::warn!( + "Failed to pay {} tokens for {} project.", + funding.detail.amount.to_string_native(), + &funding.detail.target, + ); + } + } + + // Pgf steward inflation + let stewards = get_stewards(storage)?; + let pgf_stewards_pd_rate = + pgf_parameters.stewards_inflation_rate / Dec::from(epochs_per_year); + let pgf_steward_inflation = Dec::from(total_tokens) * pgf_stewards_pd_rate; + + for steward in stewards { + for (address, percentage) in steward.reward_distribution { + let pgf_steward_reward = pgf_steward_inflation + .checked_mul(&percentage) + .unwrap_or_default(); + let reward_amount = token::Amount::from(pgf_steward_reward); + + if credit_tokens(storage, &staking_token, &address, reward_amount) + .is_ok() + { + tracing::info!( + "Minting {} tokens for steward {}.", + reward_amount.to_string_native(), + address, + ); + } else { + tracing::warn!( + "Failed minting {} tokens for steward {}.", + reward_amount.to_string_native(), + address, + ); + } + } + } + + Ok(()) +} diff --git a/core/src/ledger/pgf/mod.rs b/core/src/ledger/pgf/mod.rs index b7f8e3cecb..5d7d7eef24 100644 --- a/core/src/ledger/pgf/mod.rs +++ b/core/src/ledger/pgf/mod.rs @@ -4,6 +4,8 @@ use crate::types::address::{Address, InternalAddress}; /// Pgf CLI pub mod cli; +/// Pgf inflation code +pub mod inflation; /// Pgf parameters pub mod parameters; /// Pgf storage diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 528ea20e7d..3c5cf39c35 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -2,7 +2,6 @@ pub mod ics23_specs; pub mod merkle_tree; -#[cfg(any(test, feature = "testing"))] pub mod mockdb; pub mod traits; pub mod types; @@ -1453,11 +1452,9 @@ mod tests { max_expected_time_per_block: Duration::seconds(max_expected_time_per_block).into(), vp_whitelist: vec![], tx_whitelist: vec![], - implicit_vp_code_hash: Hash::zero(), + implicit_vp_code_hash: Some(Hash::zero()), epochs_per_year: 100, max_signatures_per_transaction: 15, - pos_gain_p: Dec::new(1,1).expect("Cannot fail"), - pos_gain_d: Dec::new(1,1).expect("Cannot fail"), staked_ratio: Dec::new(1,1).expect("Cannot fail"), pos_inflation_amount: token::Amount::zero(), fee_unshielding_gas_limit: 20_000, diff --git a/core/src/ledger/storage/write_log.rs b/core/src/ledger/storage/write_log.rs index 428ae86e3d..e24fcc54a2 100644 --- a/core/src/ledger/storage/write_log.rs +++ b/core/src/ledger/storage/write_log.rs @@ -180,7 +180,6 @@ impl WriteLog { &self, key: &storage::Key, ) -> (Option<&StorageModification>, u64) { - // try to read from tx write log first match self.block_write_log.get(key) { Some(v) => { let gas = match v { diff --git a/core/src/ledger/vp_env.rs b/core/src/ledger/vp_env.rs index 664e030786..728e66d9a6 100644 --- a/core/src/ledger/vp_env.rs +++ b/core/src/ledger/vp_env.rs @@ -8,7 +8,9 @@ use super::storage_api::{self, OptionExt, ResultExt, StorageRead}; use crate::proto::Tx; use crate::types::address::Address; use crate::types::hash::Hash; -use crate::types::ibc::{get_shielded_transfer, IbcEvent, EVENT_TYPE_PACKET}; +use crate::types::ibc::{ + get_shielded_transfer, IbcEvent, MsgShieldedTransfer, EVENT_TYPE_PACKET, +}; use crate::types::storage::{ BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; @@ -112,9 +114,8 @@ where tx_data: &Tx, ) -> Result<(Transfer, Transaction), storage_api::Error> { let signed = tx_data; - if let Ok(transfer) = - Transfer::try_from_slice(&signed.data().unwrap()[..]) - { + let data = signed.data().ok_or_err_msg("No transaction data")?; + if let Ok(transfer) = Transfer::try_from_slice(&data) { let shielded_hash = transfer .shielded .ok_or_err_msg("unable to find shielded hash")?; @@ -125,6 +126,13 @@ where return Ok((transfer, masp_tx)); } + if let Ok(message) = MsgShieldedTransfer::try_from_slice(&data) { + return Ok(( + message.shielded_transfer.transfer, + message.shielded_transfer.masp_tx, + )); + } + // Shielded transfer over IBC let events = self.get_ibc_events(EVENT_TYPE_PACKET.to_string())?; // The receiving event should be only one in the single IBC transaction diff --git a/core/src/types/address.rs b/core/src/types/address.rs index b348451996..1d3cd165d2 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -62,6 +62,8 @@ pub const POS_SLASH_POOL: Address = pub const GOV: Address = Address::Internal(InternalAddress::Governance); /// Internal MASP address pub const MASP: Address = Address::Internal(InternalAddress::Masp); +/// Internal Multitoken address +pub const MULTITOKEN: Address = Address::Internal(InternalAddress::Multitoken); /// Error from decoding address from string pub type DecodeError = string_encoding::DecodeError; diff --git a/core/src/types/dec.rs b/core/src/types/dec.rs index 4552acc84d..4ec4e223ef 100644 --- a/core/src/types/dec.rs +++ b/core/src/types/dec.rs @@ -520,6 +520,27 @@ pub mod testing { |(mantissa, scale)| Dec::new(mantissa.into(), scale).unwrap(), ) } + + prop_compose! { + /// Generate an arbitrary uint + pub fn arb_uint()(value: [u64; 4]) -> Uint { + Uint(value) + } + } + + prop_compose! { + /// Generate an arbitrary signed 256-bit integer + pub fn arb_i256()(value in arb_uint()) -> I256 { + I256(value) + } + } + + prop_compose! { + /// Generate an arbitrary decimal wih the native denomination + pub fn arb_dec()(value in arb_i256()) -> Dec { + Dec(value) + } + } } #[cfg(test)] diff --git a/core/src/types/eth_bridge_pool.rs b/core/src/types/eth_bridge_pool.rs index fcd89f76a9..340c0faa3d 100644 --- a/core/src/types/eth_bridge_pool.rs +++ b/core/src/types/eth_bridge_pool.rs @@ -55,6 +55,15 @@ pub enum TransferToEthereumKind { Nut, } +impl std::fmt::Display for TransferToEthereumKind { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Erc20 => write!(f, "ERC20"), + Self::Nut => write!(f, "NUT"), + } + } +} + /// Additional data appended to a [`TransferToEthereumEvent`] to /// construct a [`PendingTransfer`]. #[derive( @@ -293,6 +302,80 @@ pub struct GasFee { pub token: Address, } +#[cfg(any(test, feature = "testing"))] +/// Testing helpers and strategies for the Ethereum bridge pool +pub mod testing { + use proptest::prop_compose; + use proptest::strategy::Strategy; + + use super::*; + use crate::types::address::testing::{ + arb_established_address, arb_non_internal_address, + }; + use crate::types::ethereum_events::testing::arb_eth_address; + use crate::types::token::testing::arb_amount; + + prop_compose! { + /// Generate an arbitrary pending transfer + pub fn arb_pending_transfer()( + transfer in arb_transfer_to_ethereum(), + gas_fee in arb_gas_fee(), + ) -> PendingTransfer { + PendingTransfer { + transfer, + gas_fee, + } + } + } + + prop_compose! { + /// Generate an arbitrary Ethereum gas fee + pub fn arb_gas_fee()( + amount in arb_amount(), + payer in arb_non_internal_address(), + token in arb_established_address().prop_map(Address::Established), + ) -> GasFee { + GasFee { + amount, + payer, + token, + } + } + } + + prop_compose! { + /// Generate the kind of a transfer to ethereum + pub fn arb_transfer_to_ethereum_kind()( + discriminant in 0..2, + ) -> TransferToEthereumKind { + match discriminant { + 0 => TransferToEthereumKind::Erc20, + 1 => TransferToEthereumKind::Nut, + _ => unreachable!(), + } + } + } + + prop_compose! { + /// Generate an arbitrary transfer to Ethereum + pub fn arb_transfer_to_ethereum()( + kind in arb_transfer_to_ethereum_kind(), + asset in arb_eth_address(), + recipient in arb_eth_address(), + sender in arb_non_internal_address(), + amount in arb_amount(), + ) -> TransferToEthereum { + TransferToEthereum { + kind, + asset, + recipient, + sender, + amount, + } + } + } +} + #[cfg(test)] mod test_eth_bridge_pool_types { use super::*; diff --git a/core/src/types/ethereum_events.rs b/core/src/types/ethereum_events.rs index 2dc3601e5e..8569b118e4 100644 --- a/core/src/types/ethereum_events.rs +++ b/core/src/types/ethereum_events.rs @@ -447,6 +447,8 @@ pub mod tests { /// Test helpers #[cfg(any(test, feature = "testing"))] pub mod testing { + use proptest::prop_compose; + use super::*; use crate::types::token::{self, Amount}; @@ -498,4 +500,11 @@ pub mod testing { }], } } + + prop_compose! { + // Generate an arbitrary Ethereum address + pub fn arb_eth_address()(bytes: [u8; 20]) -> EthAddress { + EthAddress(bytes) + } + } } diff --git a/core/src/types/ethereum_structs.rs b/core/src/types/ethereum_structs.rs index f029edc4b6..5b29e0e588 100644 --- a/core/src/types/ethereum_structs.rs +++ b/core/src/types/ethereum_structs.rs @@ -9,6 +9,71 @@ pub use ethbridge_structs::*; use num256::Uint256; use serde::{Deserialize, Serialize}; +use crate::types::keccak::KeccakHash; + +/// Status of some Bridge pool transfer. +#[derive( + Hash, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, +)] +pub enum BpTransferStatus { + /// The transfer has been relayed. + Relayed, + /// The transfer has expired. + Expired, +} + +/// Ethereum bridge events on Namada's event log. +#[derive( + Hash, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, +)] +pub enum EthBridgeEvent { + /// Bridge pool transfer status update event. + BridgePool { + /// Hash of the Bridge pool transfer. + tx_hash: KeccakHash, + /// Status of the Bridge pool transfer. + status: BpTransferStatus, + }, +} + +impl EthBridgeEvent { + /// Return a new Bridge pool expired transfer event. + pub const fn new_bridge_pool_expired(tx_hash: KeccakHash) -> Self { + Self::BridgePool { + tx_hash, + status: BpTransferStatus::Expired, + } + } + + /// Return a new Bridge pool relayed transfer event. + pub const fn new_bridge_pool_relayed(tx_hash: KeccakHash) -> Self { + Self::BridgePool { + tx_hash, + status: BpTransferStatus::Relayed, + } + } +} + /// This type must be able to represent any valid Ethereum block height. It must /// also be Borsh serializeable, so that it can be stored in blockchain storage. /// diff --git a/core/src/types/hash.rs b/core/src/types/hash.rs index af2d94d8ed..bb6c6e3acc 100644 --- a/core/src/types/hash.rs +++ b/core/src/types/hash.rs @@ -146,8 +146,24 @@ impl From for crate::tendermint::Hash { } } +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for hashes +pub mod testing { + use proptest::prop_compose; + + use super::*; + + prop_compose! { + /// Generate an arbitrary hash + pub fn arb_hash()(bytes: [u8; 32]) -> Hash { + Hash(bytes) + } + } +} + #[cfg(test)] -mod tests { +/// Tests and strategies for hashes +pub mod tests { use proptest::prelude::*; use proptest::string::{string_regex, RegexGeneratorStrategy}; diff --git a/core/src/types/ibc.rs b/core/src/types/ibc.rs index 2f04e11709..cfb2357fef 100644 --- a/core/src/types/ibc.rs +++ b/core/src/types/ibc.rs @@ -11,12 +11,16 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use super::address::HASH_LEN; +use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use crate::ibc::apps::transfer::types::{Memo, PrefixedDenom, TracePath}; use crate::ibc::core::handler::types::events::{ Error as IbcEventError, IbcEvent as RawIbcEvent, }; +use crate::ibc::primitives::proto::Protobuf; +pub use crate::ledger::ibc::storage::is_ibc_key; use crate::tendermint::abci::Event as AbciEvent; use crate::types::masp::PaymentAddress; +use crate::types::token::Transfer; /// The event type defined in ibc-rs for receiving a token pub const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; @@ -60,7 +64,15 @@ impl FromStr for IbcTokenHash { /// Wrapped IbcEvent #[derive( - Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema, PartialEq, Eq, + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshSchema, + PartialEq, + Eq, + Serialize, + Deserialize, )] pub struct IbcEvent { /// The IBC event type @@ -98,11 +110,47 @@ impl std::fmt::Display for IbcEvent { } } +/// IBC transfer message to send from a shielded address +#[derive(Debug, Clone)] +pub struct MsgShieldedTransfer { + /// IBC transfer message + pub message: MsgTransfer, + /// MASP tx with token transfer + pub shielded_transfer: IbcShieldedTransfer, +} + +impl BorshSerialize for MsgShieldedTransfer { + fn serialize( + &self, + writer: &mut W, + ) -> std::io::Result<()> { + let encoded_msg = self.message.clone().encode_vec(); + let members = (encoded_msg, self.shielded_transfer.clone()); + BorshSerialize::serialize(&members, writer) + } +} + +impl BorshDeserialize for MsgShieldedTransfer { + fn deserialize_reader( + reader: &mut R, + ) -> std::io::Result { + use std::io::{Error, ErrorKind}; + let (msg, shielded_transfer): (Vec, IbcShieldedTransfer) = + BorshDeserialize::deserialize_reader(reader)?; + let message = MsgTransfer::decode_vec(&msg) + .map_err(|err| Error::new(ErrorKind::InvalidData, err))?; + Ok(Self { + message, + shielded_transfer, + }) + } +} + /// IBC shielded transfer #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] pub struct IbcShieldedTransfer { /// The IBC event type - pub transfer: crate::types::token::Transfer, + pub transfer: Transfer, /// The attributes of the IBC event pub masp_tx: masp_primitives::transaction::Transaction, } diff --git a/core/src/types/keccak.rs b/core/src/types/keccak.rs index 333646160e..f6fc15724d 100644 --- a/core/src/types/keccak.rs +++ b/core/src/types/keccak.rs @@ -2,12 +2,12 @@ //! hash function in a way that is compatible with smart contracts //! on Ethereum. use std::convert::{TryFrom, TryInto}; -use std::fmt::Display; +use std::fmt; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXUPPER; use ethabi::Token; -use serde::{Serialize, Serializer}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; pub use tiny_keccak::{Hasher, Keccak}; @@ -51,8 +51,8 @@ impl KeccakHash { } } -impl Display for KeccakHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for KeccakHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for byte in &self.0 { write!(f, "{:02X}", byte)?; } @@ -118,6 +118,34 @@ impl Serialize for KeccakHash { } } +impl<'de> Deserialize<'de> for KeccakHash { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct KeccakVisitor; + + impl<'de> de::Visitor<'de> for KeccakVisitor { + type Value = KeccakHash; + + fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "a string containing a keccak hash") + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + KeccakHash::try_from(s).map_err(|_| { + de::Error::invalid_value(de::Unexpected::Str(s), &self) + }) + } + } + + deserializer.deserialize_str(KeccakVisitor) + } +} + /// Hash bytes using Keccak pub fn keccak_hash>(bytes: T) -> KeccakHash { let mut output = [0; 32]; @@ -134,3 +162,23 @@ impl Encode<1> for KeccakHash { [Token::FixedBytes(self.0.to_vec())] } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_keccak_serde_roundtrip() { + let mut hash = KeccakHash([0; 32]); + + for i in 0..32 { + hash.0[i] = i as u8; + } + + let serialized = serde_json::to_string(&hash).unwrap(); + let deserialized: KeccakHash = + serde_json::from_str(&serialized).unwrap(); + + assert_eq!(deserialized, hash); + } +} diff --git a/core/src/types/key/mod.rs b/core/src/types/key/mod.rs index e4a5e04672..0782d32a6d 100644 --- a/core/src/types/key/mod.rs +++ b/core/src/types/key/mod.rs @@ -532,6 +532,23 @@ pub mod testing { use super::SigScheme; use crate::types::key::*; + /// Generate an arbitrary public key + pub fn arb_pk() + -> impl Strategy::PublicKey> { + arb_keypair::().prop_map(|x| x.ref_to()) + } + + /// Generate an arbitrary common key + pub fn arb_common_pk() -> impl Strategy { + let ed25519 = arb_pk::() + .prop_map(common::PublicKey::Ed25519) + .sboxed(); + let secp256k1 = arb_pk::() + .prop_map(common::PublicKey::Secp256k1) + .sboxed(); + ed25519.prop_union(secp256k1) + } + /// A keypair for tests pub fn keypair_1() -> ::SecretKey { // generated from `cargo test gen_keypair -- --nocapture` diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index 16d931ba40..87d1b80e50 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -1434,7 +1434,8 @@ impl GetEventNonce for InnerEthEventsQueue { } #[cfg(test)] -mod tests { +/// Tests and strategies for storage +pub mod tests { use proptest::prelude::*; use super::*; @@ -1828,6 +1829,7 @@ mod tests { } } + #[cfg(test)] fn test_address_in_storage_key_order_aux(addr1: Address, addr2: Address) { println!("addr1 {addr1}"); println!("addr2 {addr2}"); @@ -1870,6 +1872,13 @@ pub mod testing { arb_address, arb_non_internal_address, }; + prop_compose! { + /// Generate an arbitrary epoch + pub fn arb_epoch()(epoch: u64) -> Epoch { + Epoch(epoch) + } + } + /// Generate an arbitrary [`Key`]. pub fn arb_key() -> impl Strategy { prop_oneof![ diff --git a/core/src/types/token.rs b/core/src/types/token.rs index 5410b7c6a4..d738086e7b 100644 --- a/core/src/types/token.rs +++ b/core/src/types/token.rs @@ -169,7 +169,7 @@ impl Amount { .map(|result| Self { raw: result }) } - /// Checked division. Returns `None` on overflow. + /// Checked multiplication. Returns `None` on overflow. pub fn checked_mul(&self, amount: Amount) -> Option { self.raw .checked_mul(amount.raw) @@ -181,9 +181,7 @@ impl Amount { string: impl AsRef, denom: impl Into, ) -> Result { - DenominatedAmount::from_str(string.as_ref())? - .increase_precision(denom.into().into()) - .map(Into::into) + DenominatedAmount::from_str(string.as_ref())?.scale(denom) } /// Attempt to convert an unsigned integer to an `Amount` with the @@ -273,6 +271,12 @@ impl Amount { } } +impl Display for Amount { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.raw) + } +} + /// Given a number represented as `M*B^D`, then /// `M` is the matissa, `B` is the base and `D` /// is the denomination, represented by this struct. @@ -320,12 +324,17 @@ impl From for u8 { )] pub struct DenominatedAmount { /// The mantissa - pub amount: Amount, + amount: Amount, /// The number of decimal places in base ten. - pub denom: Denomination, + denom: Denomination, } impl DenominatedAmount { + /// Make a new denominated amount representing amount*10^(-denom) + pub const fn new(amount: Amount, denom: Denomination) -> Self { + Self { amount, denom } + } + /// Return a denominated native token amount. pub const fn native(amount: Amount) -> Self { Self { @@ -401,6 +410,79 @@ impl DenominatedAmount { }) .ok_or(AmountParseError::PrecisionOverflow) } + + /// Convert this denominated amount into a plain amount by increasing its + /// precision to the given token's denomination and then taking the + /// significand. + pub fn to_amount( + self, + token: &Address, + storage: &impl StorageRead, + ) -> storage_api::Result { + let denom = read_denom(storage, token)?.ok_or_else(|| { + storage_api::Error::SimpleMessage( + "No denomination found in storage for the given token", + ) + })?; + self.scale(denom).map_err(storage_api::Error::new) + } + + /// Multiply this number by 10^denom and return the computed integer if + /// possible. Otherwise error out. + pub fn scale( + self, + denom: impl Into, + ) -> Result { + self.increase_precision(Denomination(denom.into())) + .map(|x| x.amount) + } + + /// Checked multiplication. Returns `None` on overflow. + pub fn checked_mul(&self, rhs: DenominatedAmount) -> Option { + let amount = self.amount.checked_mul(rhs.amount)?; + let denom = self.denom.0.checked_add(rhs.denom.0)?.into(); + Some(Self { amount, denom }) + } + + /// Checked subtraction. Returns `None` on overflow. + pub fn checked_sub(&self, mut rhs: DenominatedAmount) -> Option { + let mut lhs = *self; + if lhs.denom < rhs.denom { + lhs = lhs.increase_precision(rhs.denom).ok()?; + } else { + rhs = rhs.increase_precision(lhs.denom).ok()?; + } + let amount = lhs.amount.checked_sub(rhs.amount)?; + Some(Self { + amount, + denom: lhs.denom, + }) + } + + /// Checked addition. Returns `None` on overflow. + pub fn checked_add(&self, mut rhs: DenominatedAmount) -> Option { + let mut lhs = *self; + if lhs.denom < rhs.denom { + lhs = lhs.increase_precision(rhs.denom).ok()?; + } else { + rhs = rhs.increase_precision(lhs.denom).ok()?; + } + let amount = lhs.amount.checked_add(rhs.amount)?; + Some(Self { + amount, + denom: lhs.denom, + }) + } + + /// Returns the significand of this number + pub const fn amount(&self) -> Amount { + self.amount + } + + /// Returns the denomination of this number + pub const fn denom(&self) -> Denomination { + self.denom + } } impl Display for DenominatedAmount { @@ -560,15 +642,9 @@ impl<'de> serde::Deserialize<'de> for DenominatedAmount { } } -impl<'a> From<&'a DenominatedAmount> for &'a Amount { - fn from(denom: &'a DenominatedAmount) -> Self { - &denom.amount - } -} - -impl From for Amount { - fn from(denom: DenominatedAmount) -> Self { - denom.amount +impl From for DenominatedAmount { + fn from(amount: Amount) -> Self { + DenominatedAmount::new(amount, 0.into()) } } @@ -906,6 +982,14 @@ pub const HEAD_TX_KEY: &str = "head-tx"; pub const TX_KEY_PREFIX: &str = "tx-"; /// Key segment prefix for pinned shielded transactions pub const PIN_KEY_PREFIX: &str = "pin-"; +/// Key segment prefix for the nullifiers +pub const MASP_NULLIFIERS_KEY: &str = "nullifiers"; +/// Key segment prefix for the note commitment merkle tree +pub const MASP_NOTE_COMMITMENT_TREE_KEY: &str = "commitment_tree"; +/// Key segment prefix for the note commitment anchor +pub const MASP_NOTE_COMMITMENT_ANCHOR_PREFIX: &str = "note_commitment_anchor"; +/// Key segment prefix for the convert anchor +pub const MASP_CONVERT_ANCHOR_KEY: &str = "convert_anchor"; /// Last calculated inflation value handed out pub const MASP_LAST_INFLATION_KEY: &str = "last_inflation"; /// The last locked ratio @@ -1129,11 +1213,55 @@ pub fn is_denom_key(token_addr: &Address, key: &Key) -> bool { /// Check if the given storage key is a masp key pub fn is_masp_key(key: &Key) -> bool { matches!(&key.segments[..], + [DbKeySeg::AddressSeg(addr), ..] if *addr == MASP + ) +} + +/// Check if the given storage key is allowed to be touched by a masp transfer +pub fn is_masp_allowed_key(key: &Key) -> bool { + match &key.segments[..] { [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if *addr == MASP && (key == HEAD_TX_KEY || key.starts_with(TX_KEY_PREFIX) - || key.starts_with(PIN_KEY_PREFIX))) + || key.starts_with(PIN_KEY_PREFIX) + || key == MASP_NOTE_COMMITMENT_TREE_KEY) => + { + true + } + + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(key), + DbKeySeg::StringSeg(_nullifier), + ] if *addr == MASP && key == MASP_NULLIFIERS_KEY => true, + _ => false, + } +} + +/// Check if the given storage key is a masp tx prefix key +pub fn is_masp_tx_prefix_key(key: &Key) -> bool { + matches!(&key.segments[..], + [DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + ] if *addr == MASP && prefix.starts_with(TX_KEY_PREFIX)) +} + +/// Check if the given storage key is a masp tx pin key +pub fn is_masp_tx_pin_key(key: &Key) -> bool { + matches!(&key.segments[..], + [DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + ] if *addr == MASP && prefix.starts_with(PIN_KEY_PREFIX)) +} + +/// Check if the given storage key is a masp nullifier key +pub fn is_masp_nullifier_key(key: &Key) -> bool { + matches!(&key.segments[..], + [DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::StringSeg(_nullifier) + ] if *addr == MASP && prefix == MASP_NULLIFIERS_KEY) } /// Obtain the storage key for the last locked ratio of a token @@ -1230,6 +1358,73 @@ pub enum TransferError { NoToken, } +#[cfg(any(test, feature = "testing"))] +/// Testing helpers and strategies for tokens +pub mod testing { + use proptest::option; + use proptest::prelude::*; + + use super::*; + use crate::types::address::testing::{ + arb_established_address, arb_non_internal_address, + }; + + prop_compose! { + /// Generate an arbitrary denomination + pub fn arb_denomination()(denom in 0u8..) -> Denomination { + Denomination(denom) + } + } + + prop_compose! { + /// Generate a denominated amount + pub fn arb_denominated_amount()( + amount in arb_amount(), + denom in arb_denomination(), + ) -> DenominatedAmount { + DenominatedAmount::new(amount, denom) + } + } + + prop_compose! { + /// Generate a transfer + pub fn arb_transfer()( + source in arb_non_internal_address(), + target in arb_non_internal_address(), + token in arb_established_address().prop_map(Address::Established), + amount in arb_denominated_amount(), + key in option::of("[a-zA-Z0-9_]*"), + ) -> Transfer { + Transfer { + source, + target, + token, + amount, + key, + shielded: None, + } + } + } + + /// Generate an arbitrary token amount + pub fn arb_amount() -> impl Strategy { + any::().prop_map(|val| Amount::from_uint(val, 0).unwrap()) + } + + /// Generate an arbitrary token amount up to and including given `max` value + pub fn arb_amount_ceiled(max: u64) -> impl Strategy { + (0..=max).prop_map(|val| Amount::from_uint(val, 0).unwrap()) + } + + /// Generate an arbitrary non-zero token amount up to and including given + /// `max` value + pub fn arb_amount_non_zero_ceiled( + max: u64, + ) -> impl Strategy { + (1..=max).prop_map(|val| Amount::from_uint(val, 0).unwrap()) + } +} + #[cfg(test)] mod tests { use super::*; @@ -1418,6 +1613,23 @@ mod tests { assert_eq!(three.mul_ceil(dec), two); } + #[test] + fn test_denominateed_arithmetic() { + let a = DenominatedAmount::new(10.into(), 3.into()); + let b = DenominatedAmount::new(10.into(), 2.into()); + let c = DenominatedAmount::new(110.into(), 3.into()); + let d = DenominatedAmount::new(90.into(), 3.into()); + let e = DenominatedAmount::new(100.into(), 5.into()); + let f = DenominatedAmount::new(100.into(), 3.into()); + let g = DenominatedAmount::new(0.into(), 3.into()); + assert_eq!(a.checked_add(b).unwrap(), c); + assert_eq!(b.checked_sub(a).unwrap(), d); + assert_eq!(a.checked_mul(b).unwrap(), e); + assert!(a.checked_sub(b).is_none()); + assert_eq!(c.checked_sub(a).unwrap(), f); + assert_eq!(c.checked_sub(c).unwrap(), g); + } + #[test] fn test_denominated_amt_ord() { let denom_1 = DenominatedAmount { @@ -1474,29 +1686,3 @@ mod tests { ); } } - -/// Helpers for testing with addresses. -#[cfg(any(test, feature = "testing"))] -pub mod testing { - use proptest::prelude::*; - - use super::*; - - /// Generate an arbitrary token amount - pub fn arb_amount() -> impl Strategy { - any::().prop_map(|val| Amount::from_uint(val, 0).unwrap()) - } - - /// Generate an arbitrary token amount up to and including given `max` value - pub fn arb_amount_ceiled(max: u64) -> impl Strategy { - (0..=max).prop_map(|val| Amount::from_uint(val, 0).unwrap()) - } - - /// Generate an arbitrary non-zero token amount up to and including given - /// `max` value - pub fn arb_amount_non_zero_ceiled( - max: u64, - ) -> impl Strategy { - (1..=max).prop_map(|val| Amount::from_uint(val, 0).unwrap()) - } -} diff --git a/core/src/types/transaction/account.rs b/core/src/types/transaction/account.rs index f2eaafe7ef..3e4271f6f0 100644 --- a/core/src/types/transaction/account.rs +++ b/core/src/types/transaction/account.rs @@ -50,3 +50,51 @@ pub struct UpdateAccount { /// The account signature threshold pub threshold: Option, } + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for accounts +pub mod tests { + use proptest::prelude::Just; + use proptest::{collection, option, prop_compose}; + + use super::*; + use crate::types::address::testing::arb_non_internal_address; + use crate::types::hash::testing::arb_hash; + use crate::types::key::testing::arb_common_pk; + + prop_compose! { + /// Generate an account initialization + pub fn arb_init_account()( + public_keys in collection::vec(arb_common_pk(), 0..10), + )( + threshold in 0..=public_keys.len() as u8, + public_keys in Just(public_keys), + vp_code_hash in arb_hash(), + ) -> InitAccount { + InitAccount { + public_keys, + vp_code_hash, + threshold, + } + } + } + + prop_compose! { + /// Generate an arbitrary account update + pub fn arb_update_account()( + public_keys in collection::vec(arb_common_pk(), 0..10), + )( + addr in arb_non_internal_address(), + vp_code_hash in option::of(arb_hash()), + threshold in option::of(0..=public_keys.len() as u8), + public_keys in Just(public_keys), + ) -> UpdateAccount { + UpdateAccount { + addr, + vp_code_hash, + public_keys, + threshold, + } + } + } +} diff --git a/core/src/types/transaction/governance.rs b/core/src/types/transaction/governance.rs index dbc0b4ae5e..8e43b488e3 100644 --- a/core/src/types/transaction/governance.rs +++ b/core/src/types/transaction/governance.rs @@ -164,3 +164,56 @@ impl TryFrom for InitProposalData { }) } } + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for governance +pub mod tests { + use proptest::{collection, prop_compose}; + + use super::*; + use crate::ledger::governance::storage::proposal::testing::arb_proposal_type; + use crate::ledger::governance::storage::vote::testing::arb_proposal_vote; + use crate::types::address::testing::arb_non_internal_address; + use crate::types::hash::testing::arb_hash; + use crate::types::storage::testing::arb_epoch; + + prop_compose! { + /// Generate a proposal initialization + pub fn arb_init_proposal()( + id: Option, + content in arb_hash(), + author in arb_non_internal_address(), + r#type in arb_proposal_type(), + voting_start_epoch in arb_epoch(), + voting_end_epoch in arb_epoch(), + grace_epoch in arb_epoch(), + ) -> InitProposalData { + InitProposalData { + id, + content, + author, + r#type, + voting_start_epoch, + voting_end_epoch, + grace_epoch, + } + } + } + + prop_compose! { + /// Generate an arbitrary vote proposal + pub fn arb_vote_proposal()( + id: u64, + vote in arb_proposal_vote(), + voter in arb_non_internal_address(), + delegations in collection::vec(arb_non_internal_address(), 0..10), + ) -> VoteProposalData { + VoteProposalData { + id, + vote, + voter, + delegations, + } + } + } +} diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 1ab89ba61f..95b32b473f 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -17,22 +17,146 @@ pub mod protocol; pub mod wrapper; use std::collections::BTreeSet; -use std::fmt; +use std::fmt::{self, Display}; +use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use borsh_ext::BorshSerializeExt; pub use decrypted::*; +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::{FromPrimitive, ToPrimitive}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; pub use wrapper::*; use crate::ledger::gas::{Gas, VpsGas}; use crate::types::address::Address; +use crate::types::ethereum_structs::EthBridgeEvent; use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; use crate::types::storage; use crate::types::transaction::protocol::ProtocolTx; +/// The different result codes that the ledger may send back to a client +/// indicating the status of their submitted tx. +/// The codes must not change with versions, only need ones may be added. +#[derive( + Debug, + Copy, + Clone, + FromPrimitive, + ToPrimitive, + PartialEq, + Eq, + Serialize, + Deserialize, +)] +pub enum ResultCode { + // WARN: These codes shouldn't be changed between version! + // ========================================================================= + /// Success + Ok = 0, + /// Error in WASM tx execution + WasmRuntimeError = 1, + /// Invalid tx + InvalidTx = 2, + /// Invalid signature + InvalidSig = 3, + /// Tx is in invalid order + InvalidOrder = 4, + /// Tx wasn't expected + ExtraTxs = 5, + /// Undecryptable + Undecryptable = 6, + /// The block is full + AllocationError = 7, + /// Replayed tx + ReplayTx = 8, + /// Invalid chain ID + InvalidChainId = 9, + /// Expired tx + ExpiredTx = 10, + /// Exceeded gas limit + TxGasLimit = 11, + /// Error in paying tx fee + FeeError = 12, + /// Invalid vote extension + InvalidVoteExtension = 13, + /// Tx is too large + TooLarge = 14, + /// Decrypted tx is expired + ExpiredDecryptedTx = 15, + // ========================================================================= + // WARN: These codes shouldn't be changed between version! +} + +impl ResultCode { + /// Checks if the given [`ResultCode`] value is a protocol level error, + /// that can be recovered from at the finalize block stage. + pub const fn is_recoverable(&self) -> bool { + use ResultCode::*; + // NOTE: pattern match on all `ResultCode` variants, in order + // to catch potential bugs when adding new codes + match self { + Ok | WasmRuntimeError | ExpiredDecryptedTx => true, + InvalidTx | InvalidSig | InvalidOrder | ExtraTxs + | Undecryptable | AllocationError | ReplayTx | InvalidChainId + | ExpiredTx | TxGasLimit | FeeError | InvalidVoteExtension + | TooLarge => false, + } + } + + /// Convert to `u32`. + pub fn to_u32(&self) -> u32 { + ToPrimitive::to_u32(self).unwrap() + } + + /// Convert from `u32`. + pub fn from_u32(raw: u32) -> Option { + FromPrimitive::from_u32(raw) + } +} + +impl From for String { + fn from(code: ResultCode) -> String { + code.to_string() + } +} + +impl From for u32 { + fn from(code: ResultCode) -> u32 { + code.to_u32() + } +} + +impl Display for ResultCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_u32()) + } +} + +impl FromStr for ResultCode { + type Err = std::io::Error; + + fn from_str(s: &str) -> Result { + let raw = u32::from_str(s).map_err(|e| { + std::io::Error::new(std::io::ErrorKind::InvalidData, e) + })?; + Self::from_u32(raw).ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Unexpected error code", + ) + }) + } +} + +impl From for crate::tendermint::abci::Code { + fn from(value: ResultCode) -> Self { + Self::from(value.to_u32()) + } +} + /// Get the hash of a transaction pub fn hash_tx(tx_bytes: &[u8]) -> Hash { let digest = Sha256::digest(tx_bytes); @@ -41,7 +165,15 @@ pub fn hash_tx(tx_bytes: &[u8]) -> Hash { /// Transaction application result // TODO derive BorshSchema after -#[derive(Clone, Debug, Default, BorshSerialize, BorshDeserialize)] +#[derive( + Clone, + Debug, + Default, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, +)] pub struct TxResult { /// Total gas used by the transaction (includes the gas used by VPs) pub gas_used: Gas, @@ -53,6 +185,8 @@ pub struct TxResult { pub initialized_accounts: Vec
, /// IBC events emitted by the transaction pub ibc_events: BTreeSet, + /// Ethereum bridge events emitted by the transaction + pub eth_bridge_events: BTreeSet, } impl TxResult { @@ -64,7 +198,15 @@ impl TxResult { /// Result of checking a transaction with validity predicates // TODO derive BorshSchema after -#[derive(Clone, Debug, Default, BorshSerialize, BorshDeserialize)] +#[derive( + Clone, + Debug, + Default, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, +)] pub struct VpsResult { /// The addresses whose VPs accepted the transaction pub accepted_vps: BTreeSet
, @@ -80,18 +222,30 @@ pub struct VpsResult { impl fmt::Display for TxResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "Transaction is {}. Gas used: {};{} VPs result: {}", - if self.is_accepted() { - "valid" - } else { - "invalid" - }, - self.gas_used, - iterable_to_string("Changed keys", self.changed_keys.iter()), - self.vps_result, - ) + if f.alternate() { + write!( + f, + "Transaction is {}. Gas used: {};{} VPs result: {}", + if self.is_accepted() { + "valid" + } else { + "invalid" + }, + self.gas_used, + iterable_to_string("Changed keys", self.changed_keys.iter()), + self.vps_result, + ) + } else { + write!(f, "{}", serde_json::to_string(self).unwrap()) + } + } +} + +impl FromStr for TxResult { + type Err = serde_json::Error; + + fn from_str(s: &str) -> Result { + serde_json::from_str(s) } } @@ -193,7 +347,7 @@ mod test_process_tx { use crate::types::address::nam; use crate::types::key::*; use crate::types::storage::Epoch; - use crate::types::token::Amount; + use crate::types::token::{Amount, DenominatedAmount}; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; @@ -278,8 +432,9 @@ mod test_process_tx { // the signed tx let mut tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::from_uint(10, 0) - .expect("Test failed"), + amount_per_gas_unit: DenominatedAmount::native( + Amount::from_uint(10, 0).expect("Test failed"), + ), token: nam(), }, keypair.ref_to(), @@ -306,8 +461,9 @@ mod test_process_tx { // the signed tx let mut tx = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { - amount_per_gas_unit: Amount::from_uint(10, 0) - .expect("Test failed"), + amount_per_gas_unit: DenominatedAmount::native( + Amount::from_uint(10, 0).expect("Test failed"), + ), token: nam(), }, keypair.ref_to(), diff --git a/core/src/types/transaction/pgf.rs b/core/src/types/transaction/pgf.rs index 5935d04ef9..02ee320253 100644 --- a/core/src/types/transaction/pgf.rs +++ b/core/src/types/transaction/pgf.rs @@ -30,3 +30,26 @@ pub struct UpdateStewardCommission { /// The new commission distribution pub commission: HashMap, } + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for PGF +pub mod tests { + use proptest::{collection, prop_compose}; + + use crate::types::address::testing::arb_non_internal_address; + use crate::types::dec::testing::arb_dec; + use crate::types::transaction::pgf::UpdateStewardCommission; + + prop_compose! { + /// Generate an arbitraary steward commission update + pub fn arb_update_steward_commission()( + steward in arb_non_internal_address(), + commission in collection::hash_map(arb_non_internal_address(), arb_dec(), 0..10), + ) -> UpdateStewardCommission { + UpdateStewardCommission { + steward, + commission, + } + } + } +} diff --git a/core/src/types/transaction/pos.rs b/core/src/types/transaction/pos.rs index f44be68d85..1ac5fc8a38 100644 --- a/core/src/types/transaction/pos.rs +++ b/core/src/types/transaction/pos.rs @@ -206,3 +206,138 @@ pub struct ConsensusKeyChange { /// The new consensus key pub consensus_key: common::PublicKey, } + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for proof-of-stake +pub mod tests { + use proptest::{option, prop_compose}; + + use super::*; + use crate::types::address::testing::arb_non_internal_address; + use crate::types::dec::testing::arb_dec; + use crate::types::key::testing::{arb_common_pk, arb_pk}; + use crate::types::token::testing::arb_amount; + + prop_compose! { + /// Generate a bond + pub fn arb_bond()( + validator in arb_non_internal_address(), + amount in arb_amount(), + source in option::of(arb_non_internal_address()), + ) -> Bond { + Bond { + validator, + amount, + source, + } + } + } + + prop_compose! { + /// Generate an arbitrary withdraw + pub fn arb_withdraw()( + validator in arb_non_internal_address(), + source in option::of(arb_non_internal_address()), + ) -> Withdraw { + Withdraw { + validator, + source, + } + } + } + + prop_compose! { + /// Generate an arbitrary commission change + pub fn arb_commission_change()( + validator in arb_non_internal_address(), + new_rate in arb_dec(), + ) -> CommissionChange { + CommissionChange { + validator, + new_rate, + } + } + } + + prop_compose! { + /// Generate an arbitrary metadata change + pub fn arb_metadata_change()( + validator in arb_non_internal_address(), + email in option::of("[a-zA-Z0-9_]*"), + description in option::of("[a-zA-Z0-9_]*"), + website in option::of("[a-zA-Z0-9_]*"), + discord_handle in option::of("[a-zA-Z0-9_]*"), + commission_rate in option::of(arb_dec()), + ) -> MetaDataChange { + MetaDataChange { + validator, + email, + description, + website, + discord_handle, + commission_rate, + } + } + } + + prop_compose! { + /// Generate an arbitrary consensus key change + pub fn arb_consensus_key_change()( + validator in arb_non_internal_address(), + consensus_key in arb_common_pk(), + ) -> ConsensusKeyChange { + ConsensusKeyChange { + validator, + consensus_key, + } + } + } + + prop_compose! { + /// Generate a validator initialization + pub fn arb_become_validator()( + address in arb_non_internal_address(), + consensus_key in arb_common_pk(), + eth_cold_key in arb_pk::(), + eth_hot_key in arb_pk::(), + protocol_key in arb_common_pk(), + commission_rate in arb_dec(), + max_commission_rate_change in arb_dec(), + email in "[a-zA-Z0-9_]*", + description in option::of("[a-zA-Z0-9_]*"), + website in option::of("[a-zA-Z0-9_]*"), + discord_handle in option::of("[a-zA-Z0-9_]*"), + ) -> BecomeValidator { + BecomeValidator { + address, + consensus_key, + eth_cold_key, + eth_hot_key, + protocol_key, + commission_rate, + max_commission_rate_change, + email, + description, + website, + discord_handle, + } + } + } + + prop_compose! { + /// Generate an arbitrary redelegation + pub fn arb_redelegation()( + src_validator in arb_non_internal_address(), + dest_validator in arb_non_internal_address(), + owner in arb_non_internal_address(), + amount in arb_amount(), + ) -> Redelegation { + Redelegation { + src_validator, + dest_validator, + owner, + amount, + } + } + } +} diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 92b3059a3f..926185788a 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -65,7 +65,7 @@ pub mod wrapper_tx { )] pub struct Fee { /// amount of fee per gas unit - pub amount_per_gas_unit: Amount, + pub amount_per_gas_unit: DenominatedAmount, /// address of the token /// TODO: This should support multi-tokens pub token: Address, @@ -301,10 +301,7 @@ pub mod wrapper_tx { source: MASP, target: self.fee_payer(), token: self.fee.token.clone(), - amount: DenominatedAmount { - amount: self.get_tx_fee()?, - denom: 0.into(), - }, + amount: self.get_tx_fee()?, key: None, shielded: Some(masp_hash), }; @@ -317,10 +314,10 @@ pub mod wrapper_tx { /// Get the [`Amount`] of fees to be paid by the given wrapper. Returns /// an error if the amount overflows - pub fn get_tx_fee(&self) -> Result { + pub fn get_tx_fee(&self) -> Result { self.fee .amount_per_gas_unit - .checked_mul(self.gas_limit.into()) + .checked_mul(Amount::from(self.gas_limit).into()) .ok_or(WrapperTxErr::OverflowingFee) } } diff --git a/ethereum_bridge/src/lib.rs b/ethereum_bridge/src/lib.rs index f06c19d69f..028dead0fa 100644 --- a/ethereum_bridge/src/lib.rs +++ b/ethereum_bridge/src/lib.rs @@ -1,10 +1,7 @@ extern crate core; -pub mod bridge_pool_vp; pub mod oracle; -pub mod parameters; pub mod protocol; pub mod storage; #[cfg(any(test, feature = "testing"))] pub mod test_utils; -pub mod vp; diff --git a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 42041d9666..bffcf013e1 100644 --- a/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -235,13 +235,14 @@ mod test_apply_bp_roots_to_storage { use namada_core::types::vote_extensions::bridge_pool_roots; use namada_core::types::voting_power::FractionalVotingPower; use namada_proof_of_stake::parameters::OwnedPosParams; - use namada_proof_of_stake::write_pos_params; + use namada_proof_of_stake::storage::write_pos_params; use super::*; use crate::protocol::transactions::votes::{ EpochedVotingPower, EpochedVotingPowerExt, }; - use crate::{bridge_pool_vp, test_utils}; + use crate::storage::vp; + use crate::test_utils; /// The data needed to run a test. struct TestPackage { @@ -273,7 +274,7 @@ mod test_apply_bp_roots_to_storage { wl_storage.storage.block.height = 1.into(); wl_storage.commit_block().unwrap(); - bridge_pool_vp::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut wl_storage); test_utils::commit_bridge_pool_root_at_height( &mut wl_storage, &KeccakHash([1; 32]), @@ -813,7 +814,7 @@ mod test_apply_bp_roots_to_storage { ); // set up the bridge pool's storage - bridge_pool_vp::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut wl_storage); test_utils::commit_bridge_pool_root_at_height( &mut wl_storage, &KeccakHash([1; 32]), diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index fa2be67104..af4c96e904 100644 --- a/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -18,6 +18,7 @@ use namada_core::ledger::storage::traits::StorageHasher; use namada_core::ledger::storage::{DBIter, WlStorage, DB}; use namada_core::ledger::storage_api::{StorageRead, StorageWrite}; use namada_core::types::address::Address; +use namada_core::types::eth_abi::Encode; use namada_core::types::eth_bridge_pool::{ PendingTransfer, TransferToEthereumKind, }; @@ -25,13 +26,14 @@ use namada_core::types::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, TransferToNamada, TransfersToNamada, }; +use namada_core::types::ethereum_structs::EthBridgeEvent; use namada_core::types::storage::{BlockHeight, Key, KeySeg}; use namada_core::types::token; use namada_core::types::token::{balance_key, minted_balance_key}; -use crate::parameters::read_native_erc20_address; use crate::protocol::transactions::update; use crate::storage::eth_bridge_queries::{EthAssetMint, EthBridgeQueries}; +use crate::storage::parameters::read_native_erc20_address; /// Updates storage based on the given confirmed `event`. For example, for a /// confirmed [`EthereumEvent::TransfersToNamada`], mint the corresponding @@ -39,7 +41,7 @@ use crate::storage::eth_bridge_queries::{EthAssetMint, EthBridgeQueries}; pub(super) fn act_on( wl_storage: &mut WlStorage, event: EthereumEvent, -) -> Result> +) -> Result<(BTreeSet, BTreeSet)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -58,7 +60,7 @@ where } => act_on_transfers_to_eth(wl_storage, transfers, relayer), _ => { tracing::debug!(?event, "No actions taken for Ethereum event"); - Ok(BTreeSet::default()) + Ok(Default::default()) } } } @@ -66,7 +68,7 @@ where fn act_on_transfers_to_namada<'tx, D, H>( wl_storage: &mut WlStorage, transfer_event: TransfersToNamada, -) -> Result> +) -> Result<(BTreeSet, BTreeSet)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -88,7 +90,11 @@ where transfers.iter(), )?; } - Ok(changed_keys) + Ok(( + changed_keys, + // no tx events when we get a transfer to namada + BTreeSet::new(), + )) } fn update_transfers_to_namada_state<'tx, D, H>( @@ -301,13 +307,14 @@ fn act_on_transfers_to_eth( wl_storage: &mut WlStorage, transfers: &[TransferToEthereum], relayer: &Address, -) -> Result> +) -> Result<(BTreeSet, BTreeSet)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { tracing::debug!(?transfers, "Acting on transfers to Ethereum"); let mut changed_keys = BTreeSet::default(); + let mut tx_events = BTreeSet::default(); // the BP nonce should always be incremented, even if no valid // transfers to Ethereum were relayed. failing to do this @@ -363,10 +370,13 @@ where _ = changed_keys.insert(key); _ = changed_keys.insert(pool_balance_key); _ = changed_keys.insert(relayer_rewards_key); + _ = tx_events.insert(EthBridgeEvent::new_bridge_pool_relayed( + pending_transfer.keccak256(), + )); } if pending_keys.is_empty() { - return Ok(changed_keys); + return Ok((changed_keys, tx_events)); } // TODO the timeout height is min_num_blocks of an epoch for now @@ -383,13 +393,15 @@ where ) .expect("BlockHeight should be decoded"); if inserted_height <= timeout_height { - let mut keys = refund_transfer(wl_storage, key)?; + let (mut keys, mut new_tx_events) = + refund_transfer(wl_storage, key)?; changed_keys.append(&mut keys); + tx_events.append(&mut new_tx_events); } } } - Ok(changed_keys) + Ok((changed_keys, tx_events)) } fn increment_bp_nonce( @@ -412,12 +424,13 @@ where fn refund_transfer( wl_storage: &mut WlStorage, key: Key, -) -> Result> +) -> Result<(BTreeSet, BTreeSet)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { let mut changed_keys = BTreeSet::default(); + let mut tx_events = BTreeSet::default(); let transfer = match wl_storage.read_bytes(&key)? { Some(v) => PendingTransfer::try_from_slice(&v[..])?, @@ -430,7 +443,12 @@ where wl_storage.delete(&key)?; _ = changed_keys.insert(key); - Ok(changed_keys) + // Emit expiration event + _ = tx_events.insert(EthBridgeEvent::new_bridge_pool_expired( + transfer.keccak256(), + )); + + Ok((changed_keys, tx_events)) } fn refund_transfer_fees( @@ -1034,7 +1052,7 @@ mod tests { .expect("Test failed"), ) .expect("Test failed"); - let mut changed_keys = act_on(&mut wl_storage, event).unwrap(); + let (mut changed_keys, _) = act_on(&mut wl_storage, event).unwrap(); for erc20 in [ random_erc20_token, diff --git a/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index e9ff768fa0..00f0bb2084 100644 --- a/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -12,6 +12,7 @@ use namada_core::ledger::storage::traits::StorageHasher; use namada_core::ledger::storage::{DBIter, WlStorage, DB}; use namada_core::types::address::Address; use namada_core::types::ethereum_events::EthereumEvent; +use namada_core::types::ethereum_structs::EthBridgeEvent; use namada_core::types::internal::ExpiredTx; use namada_core::types::storage::{BlockHeight, Epoch, Key}; use namada_core::types::token::Amount; @@ -77,14 +78,13 @@ where let voting_powers = utils::get_voting_powers(wl_storage, &updates)?; - changed_keys.append(&mut apply_updates( - wl_storage, - updates, - voting_powers, - )?); + let (mut apply_updates_keys, eth_bridge_events) = + apply_updates(wl_storage, updates, voting_powers)?; + changed_keys.append(&mut apply_updates_keys); Ok(TxResult { changed_keys, + eth_bridge_events, ..Default::default() }) } @@ -98,7 +98,7 @@ pub(super) fn apply_updates( wl_storage: &mut WlStorage, updates: HashSet, voting_powers: HashMap<(Address, BlockHeight), Amount>, -) -> Result +) -> Result<(ChangedKeys, BTreeSet)> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -110,6 +110,7 @@ where ); let mut changed_keys = BTreeSet::default(); + let mut tx_events = BTreeSet::default(); let mut confirmed = vec![]; for update in updates { // The order in which updates are applied to storage does not matter. @@ -123,17 +124,19 @@ where } if confirmed.is_empty() { tracing::debug!("No events were newly confirmed"); - return Ok(changed_keys); + return Ok((changed_keys, tx_events)); } tracing::debug!(n = confirmed.len(), "Events were newly confirmed",); // Right now, the order in which events are acted on does not matter. // For `TransfersToNamada` events, they can happen in any order. for event in confirmed { - let mut changed = events::act_on(wl_storage, event)?; + let (mut changed, mut new_tx_events) = + events::act_on(wl_storage, event)?; changed_keys.append(&mut changed); + tx_events.append(&mut new_tx_events); } - Ok(changed_keys) + Ok((changed_keys, tx_events)) } /// Apply an [`EthMsgUpdate`] to storage. Returns any keys changed and whether @@ -366,7 +369,7 @@ mod tests { )], ); - let changed_keys = + let (changed_keys, _) = apply_updates(&mut wl_storage, updates, voting_powers)?; let eth_msg_keys: vote_tallies::Keys = (&body).into(); @@ -708,10 +711,11 @@ mod tests { // commit then update the epoch wl_storage.storage.commit_block(MockDBWriteBatch).unwrap(); - let unbonding_len = namada_proof_of_stake::read_pos_params(&wl_storage) - .expect("Test failed") - .unbonding_len - + 1; + let unbonding_len = + namada_proof_of_stake::storage::read_pos_params(&wl_storage) + .expect("Test failed") + .unbonding_len + + 1; wl_storage.storage.last_epoch = wl_storage.storage.last_epoch + unbonding_len; wl_storage.storage.block.epoch = wl_storage.storage.last_epoch + 1_u64; @@ -844,10 +848,11 @@ mod tests { // commit then update the epoch wl_storage.storage.commit_block(MockDBWriteBatch).unwrap(); - let unbonding_len = namada_proof_of_stake::read_pos_params(&wl_storage) - .expect("Test failed") - .unbonding_len - + 1; + let unbonding_len = + namada_proof_of_stake::storage::read_pos_params(&wl_storage) + .expect("Test failed") + .unbonding_len + + 1; wl_storage.storage.last_epoch = wl_storage.storage.last_epoch + unbonding_len; wl_storage.storage.block.epoch = wl_storage.storage.last_epoch + 1_u64; diff --git a/ethereum_bridge/src/protocol/transactions/votes.rs b/ethereum_bridge/src/protocol/transactions/votes.rs index cc029e28f5..cdf7461047 100644 --- a/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/ethereum_bridge/src/protocol/transactions/votes.rs @@ -190,7 +190,7 @@ mod tests { use namada_core::types::storage::BlockHeight; use namada_core::types::{address, token}; use namada_proof_of_stake::parameters::OwnedPosParams; - use namada_proof_of_stake::write_pos_params; + use namada_proof_of_stake::storage::write_pos_params; use super::*; use crate::test_utils; diff --git a/ethereum_bridge/src/protocol/transactions/votes/update.rs b/ethereum_bridge/src/protocol/transactions/votes/update.rs index a98be1859d..b6ac1d102d 100644 --- a/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -220,7 +220,7 @@ mod tests { use crate::test_utils; mod helpers { - use namada_proof_of_stake::total_consensus_stake_key_handle; + use namada_proof_of_stake::storage::total_consensus_stake_handle; use super::*; @@ -279,7 +279,7 @@ mod tests { > FractionalVotingPower::TWO_THIRDS * total_stake, }; votes::storage::write(wl_storage, &keys, event, &tally, false)?; - total_consensus_stake_key_handle().set( + total_consensus_stake_handle().set( wl_storage, total_stake, 0u64.into(), diff --git a/ethereum_bridge/src/storage/eth_bridge_queries.rs b/ethereum_bridge/src/storage/eth_bridge_queries.rs index 06cb1ed024..5e132592de 100644 --- a/ethereum_bridge/src/storage/eth_bridge_queries.rs +++ b/ethereum_bridge/src/storage/eth_bridge_queries.rs @@ -22,7 +22,7 @@ use namada_core::types::voting_power::{ EthBridgeVotingPower, FractionalVotingPower, }; use namada_proof_of_stake::pos_queries::{ConsensusValidators, PosQueries}; -use namada_proof_of_stake::{ +use namada_proof_of_stake::storage::{ validator_eth_cold_key_handle, validator_eth_hot_key_handle, }; diff --git a/ethereum_bridge/src/storage/mod.rs b/ethereum_bridge/src/storage/mod.rs index 0c2be3048a..b6e62979de 100644 --- a/ethereum_bridge/src/storage/mod.rs +++ b/ethereum_bridge/src/storage/mod.rs @@ -1,7 +1,9 @@ //! Functionality for accessing the storage subspace pub mod eth_bridge_queries; +pub mod parameters; pub mod proof; pub mod vote_tallies; +pub mod vp; pub use namada_core::ledger::eth_bridge::storage::{ bridge_pool, wrapped_erc20s, *, }; diff --git a/ethereum_bridge/src/parameters.rs b/ethereum_bridge/src/storage/parameters.rs similarity index 96% rename from ethereum_bridge/src/parameters.rs rename to ethereum_bridge/src/storage/parameters.rs index 62d1f9a7a1..ada16e87d4 100644 --- a/ethereum_bridge/src/parameters.rs +++ b/ethereum_bridge/src/storage/parameters.rs @@ -14,10 +14,11 @@ use namada_core::types::storage::Key; use namada_core::types::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use serde::{Deserialize, Serialize}; +use crate::storage as bridge_storage; use crate::storage::eth_bridge_queries::{ EthBridgeEnabled, EthBridgeQueries, EthBridgeStatus, }; -use crate::{bridge_pool_vp, storage as bridge_storage, vp}; +use crate::storage::vp; /// An ERC20 token whitelist entry. #[derive( @@ -208,11 +209,12 @@ impl EthereumBridgeParams { .unwrap(); for Erc20WhitelistEntry { token_address: addr, - token_cap: DenominatedAmount { amount: cap, denom }, + token_cap, } in erc20_whitelist { - if addr == native_erc20 - && denom != &NATIVE_MAX_DECIMAL_PLACES.into() + let cap = token_cap.amount(); + let denom = token_cap.denom(); + if addr == native_erc20 && denom != NATIVE_MAX_DECIMAL_PLACES.into() { panic!( "Error writing Ethereum bridge config: The native token \ @@ -232,19 +234,19 @@ impl EthereumBridgeParams { suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write_bytes(&key, encode(cap)).unwrap(); + wl_storage.write_bytes(&key, encode(&cap)).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write_bytes(&key, encode(denom)).unwrap(); + wl_storage.write_bytes(&key, encode(&denom)).unwrap(); } // Initialize the storage for the Ethereum Bridge VP. - vp::init_storage(wl_storage); + vp::ethereum_bridge::init_storage(wl_storage); // Initialize the storage for the Bridge Pool VP. - bridge_pool_vp::init_storage(wl_storage); + vp::bridge_pool::init_storage(wl_storage); } } @@ -373,10 +375,6 @@ mod tests { use namada_core::types::ethereum_events::EthAddress; use super::*; - use crate::parameters::{ - ContractVersion, Contracts, EthereumBridgeParams, MinimumConfirmations, - UpgradeableContract, - }; /// Ensure we can serialize and deserialize a [`Config`] struct to and from /// TOML. This can fail if complex fields are ordered before simple fields diff --git a/ethereum_bridge/src/storage/vp.rs b/ethereum_bridge/src/storage/vp.rs new file mode 100644 index 0000000000..95abf8595a --- /dev/null +++ b/ethereum_bridge/src/storage/vp.rs @@ -0,0 +1,4 @@ +//! Validity predicate storage + +pub mod bridge_pool; +pub mod ethereum_bridge; diff --git a/ethereum_bridge/src/bridge_pool_vp.rs b/ethereum_bridge/src/storage/vp/bridge_pool.rs similarity index 100% rename from ethereum_bridge/src/bridge_pool_vp.rs rename to ethereum_bridge/src/storage/vp/bridge_pool.rs diff --git a/ethereum_bridge/src/vp.rs b/ethereum_bridge/src/storage/vp/ethereum_bridge.rs similarity index 100% rename from ethereum_bridge/src/vp.rs rename to ethereum_bridge/src/storage/vp/ethereum_bridge.rs diff --git a/ethereum_bridge/src/test_utils.rs b/ethereum_bridge/src/test_utils.rs index b74eb14bc3..feeb76fb75 100644 --- a/ethereum_bridge/src/test_utils.rs +++ b/ethereum_bridge/src/test_utils.rs @@ -23,7 +23,7 @@ use namada_proof_of_stake::{ staking_token_address, BecomeValidator, }; -use crate::parameters::{ +use crate::storage::parameters::{ ContractVersion, Contracts, EthereumBridgeParams, MinimumConfirmations, UpgradeableContract, }; diff --git a/examples/Cargo.toml b/examples/Cargo.toml new file mode 100644 index 0000000000..fba6785cec --- /dev/null +++ b/examples/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "namada_examples" +description = "Namada examples" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +[[example]] +name = "generate-txs" +path = "generate_txs.rs" + +[dev-dependencies] +masp_proofs = { workspace = true, default-features = false, features = ["local-prover", "download-params"] } +namada_sdk = { path = "../sdk", default-features = false, features = ["namada-sdk", "std", "testing"] } +proptest.workspace = true +serde_json.workspace = true +tokio = {workspace = true, default-features = false} diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..7ff0b4fbc2 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,17 @@ +# Examples +This directory contains examples and additional tooling to help in the +development of Namada. The currently provided examples are listed below: +## `generate-txs` +This utility serves to randomly generate Namada transaction test vectors +offline. These test vectors are useful for ensuring compatability with hardware +wallets. This example is included in the Namada repository in order to ensure +that the test vector generation logic is maintained and remains up to date with +the latest changes in transaction formats. +### Usage +This example is run as follows: +``` +cargo run --example generate-txs -- +``` +where `` is the path where the JSON test vectors will be stored +and `` is where rust `Debug` representations oof this data will be +stored. diff --git a/examples/generate_txs.rs b/examples/generate_txs.rs new file mode 100644 index 0000000000..384f092900 --- /dev/null +++ b/examples/generate_txs.rs @@ -0,0 +1,35 @@ +use std::path::PathBuf; + +use namada_sdk::signing::to_ledger_vector; +use namada_sdk::testing::arb_tx; +use namada_sdk::wallet::fs::FsWalletUtils; +use proptest::strategy::{Strategy, ValueTree}; +use proptest::test_runner::{Reason, TestRunner}; + +#[tokio::main] +async fn main() -> Result<(), Reason> { + let mut runner = TestRunner::default(); + let wallet = FsWalletUtils::new(PathBuf::from("wallet.toml")); + let mut debug_vectors = vec![]; + let mut test_vectors = vec![]; + for i in 0..1000 { + let (tx, tx_data) = arb_tx().new_tree(&mut runner)?.current(); + let mut ledger_vector = to_ledger_vector(&wallet, &tx) + .await + .expect("unable to construct test vector"); + ledger_vector.name = format!("{}_{}", i, ledger_vector.name); + test_vectors.push(ledger_vector.clone()); + debug_vectors.push((ledger_vector, tx, tx_data)); + } + let args: Vec<_> = std::env::args().collect(); + if args.len() < 3 { + eprintln!("Usage: namada-generator "); + return Result::Err(Reason::from("Incorrect command line arguments.")); + } + let json = serde_json::to_string(&test_vectors) + .expect("unable to serialize test vectors"); + std::fs::write(&args[1], json).expect("unable to save test vectors"); + std::fs::write(&args[2], format!("{:?}", debug_vectors)) + .expect("unable to save test vectors"); + Ok(()) +} diff --git a/genesis/localnet/parameters.toml b/genesis/localnet/parameters.toml index 266fe803fa..7a99fa34b5 100644 --- a/genesis/localnet/parameters.toml +++ b/genesis/localnet/parameters.toml @@ -17,10 +17,6 @@ tx_whitelist = [] implicit_vp = "vp_implicit" # Expected number of epochs per year (also sets the min duration of an epoch in seconds) epochs_per_year = 31_536_000 -# The P gain factor in the Proof of Stake rewards controller -pos_gain_p = "0.1" -# The D gain factor in the Proof of Stake rewards controller -pos_gain_d = "0.1" # Maximum number of signature per transaction max_signatures_per_transaction = 15 # Max gas for block @@ -72,6 +68,10 @@ liveness_window_check = 100 # The minimum required activity of consensus validators, in percentage, over # the `liveness_window_check` liveness_threshold = "0.9" +# The P gain factor in the Proof of Stake rewards controller +rewards_gain_p = "0.25" +# The D gain factor in the Proof of Stake rewards controller +rewards_gain_d = "0.25" # Governance parameters. [gov_params] diff --git a/genesis/starter/parameters.toml b/genesis/starter/parameters.toml index 543f7dbc13..926e564755 100644 --- a/genesis/starter/parameters.toml +++ b/genesis/starter/parameters.toml @@ -17,10 +17,6 @@ tx_whitelist = [] implicit_vp = "vp_implicit" # Expected number of epochs per year (also sets the min duration of an epoch in seconds) epochs_per_year = 31_536_000 -# The P gain factor in the Proof of Stake rewards controller -pos_gain_p = "0.1" -# The D gain factor in the Proof of Stake rewards controller -pos_gain_d = "0.1" # Maximum number of signature per transaction max_signatures_per_transaction = 15 # Max gas for block @@ -72,6 +68,10 @@ liveness_window_check = 10_000 # The minimum required activity of consensus validators, in percentage, over # the `liveness_window_check` liveness_threshold = "0.9" +# The P gain factor in the Proof of Stake rewards controller +rewards_gain_p = "0.25" +# The D gain factor in the Proof of Stake rewards controller +rewards_gain_d = "0.25" # Governance parameters. [gov_params] diff --git a/light_sdk/Cargo.toml b/light_sdk/Cargo.toml new file mode 100644 index 0000000000..63669efc45 --- /dev/null +++ b/light_sdk/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "namada_light_sdk" +description = "A more simple version of the Namada SDK" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +borsh.workspace = true +borsh-ext.workspace = true +ibc = "0.47.0" +namada_core = {path = "../core"} +namada_sdk = {path = "../sdk"} +prost.workspace = true +tendermint-config.workspace = true +tendermint-rpc = { worskpace = true, features = ["http-client"] } +tokio = {workspace = true, features = ["rt"]} diff --git a/light_sdk/src/lib.rs b/light_sdk/src/lib.rs new file mode 100644 index 0000000000..317eaada4c --- /dev/null +++ b/light_sdk/src/lib.rs @@ -0,0 +1,21 @@ +//! The Namada light SDK is a simplified version of the SDK aimed at making +//! interaction with the protocol easier and faster. The library is developed +//! with ease-of-use and interoperability in mind so that it should be possible +//! to wrap it for usage in an FFI context. +//! +//! The [`namada_core`] crate of Namada is also re-exported to allow access to +//! its types. +//! +//! # Structure +//! +//! This SDK is divided into three modules: +//! +//! - transaction: contains functions to construct all the transactions +//! currently supported by the protocol +//! - reading: exposes queries to retrieve data from a Namada node +//! - writing: TO BE DONE + +pub mod reading; +pub mod transaction; +pub mod writing; +pub use namada_core; diff --git a/light_sdk/src/reading/account.rs b/light_sdk/src/reading/account.rs new file mode 100644 index 0000000000..920580dbbe --- /dev/null +++ b/light_sdk/src/reading/account.rs @@ -0,0 +1,79 @@ +use namada_core::types::account::Account; +use namada_core::types::address::Address; +use namada_core::types::key::common; + +use super::*; + +/// Query token amount of owner. +pub fn get_token_balance( + tendermint_addr: &str, + token: &Address, + owner: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_token_balance(&client, token, owner)) +} + +/// Check if the address exists on chain. Established address exists if it +/// has a stored validity predicate. Implicit and internal addresses +/// always return true. +pub fn known_address( + tendermint_addr: &str, + address: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::known_address(&client, address)) +} + +/// Query the accunt substorage space of an address +pub fn get_account_info( + tendermint_addr: &str, + owner: &Address, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_account_info(&client, owner)) +} + +/// Query if the public_key is revealed +pub fn is_public_key_revealed( + tendermint_addr: &str, + owner: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::is_public_key_revealed(&client, owner)) +} + +/// Query an account substorage at a specific index +pub fn get_public_key_at( + tendermint_addr: &str, + owner: &Address, + index: u8, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_public_key_at(&client, owner, index)) +} diff --git a/light_sdk/src/reading/governance.rs b/light_sdk/src/reading/governance.rs new file mode 100644 index 0000000000..7f7399ef64 --- /dev/null +++ b/light_sdk/src/reading/governance.rs @@ -0,0 +1,46 @@ +use namada_core::ledger::governance::parameters::GovernanceParameters; +use namada_core::ledger::governance::storage::proposal::StorageProposal; +use namada_core::ledger::governance::utils::Vote; + +use super::*; + +/// Query proposal by Id +pub fn query_proposal_by_id( + tendermint_addr: &str, + proposal_id: u64, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_proposal_by_id(&client, proposal_id)) +} + +/// Get the givernance parameters +pub fn query_governance_parameters( + tendermint_addr: &str, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + Ok(rt.block_on(rpc::query_governance_parameters(&client))) +} + +/// Get the givernance parameters +pub fn query_proposal_votes( + tendermint_addr: &str, + proposal_id: u64, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_proposal_votes(&client, proposal_id)) +} diff --git a/light_sdk/src/reading/mod.rs b/light_sdk/src/reading/mod.rs new file mode 100644 index 0000000000..690ea074d4 --- /dev/null +++ b/light_sdk/src/reading/mod.rs @@ -0,0 +1,53 @@ +use std::str::FromStr; + +use namada_core::ledger::storage::LastBlock; +use namada_core::types::address::Address; +use namada_core::types::storage::BlockResults; +use namada_core::types::token; +use namada_sdk::error::Error; +use namada_sdk::queries::RPC; +use namada_sdk::rpc; +use tendermint_config::net::Address as TendermintAddress; +use tendermint_rpc::HttpClient; +use tokio::runtime::Runtime; + +pub mod account; +pub mod governance; +pub mod pgf; +pub mod pos; +pub mod tx; + +/// Query the address of the native token +pub fn query_native_token(tendermint_addr: &str) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_native_token(&client)) +} + +/// Query the last committed block, if any. +pub fn query_block(tendermint_addr: &str) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_block(&client)) +} + +/// Query the results of the last committed block +pub fn query_results( + tendermint_addr: &str, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_results(&client)) +} diff --git a/light_sdk/src/reading/pgf.rs b/light_sdk/src/reading/pgf.rs new file mode 100644 index 0000000000..f6edfdaeac --- /dev/null +++ b/light_sdk/src/reading/pgf.rs @@ -0,0 +1,15 @@ +use super::*; + +/// Check if the given address is a pgf steward. +pub async fn is_steward( + tendermint_addr: &str, + address: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + Ok(rt.block_on(rpc::is_steward(&client, address))) +} diff --git a/light_sdk/src/reading/pos.rs b/light_sdk/src/reading/pos.rs new file mode 100644 index 0000000000..86aea2dc01 --- /dev/null +++ b/light_sdk/src/reading/pos.rs @@ -0,0 +1,366 @@ +use std::collections::{BTreeSet, HashMap, HashSet}; + +use namada_core::types::address::Address; +use namada_core::types::key::common; +use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_sdk::proof_of_stake::types::{ + BondsAndUnbondsDetails, CommissionPair, ValidatorMetaData, ValidatorState, +}; +use namada_sdk::proof_of_stake::PosParams; +use namada_sdk::queries::vp::pos::EnrichedBondsAndUnbondsDetails; + +use super::*; + +/// Query the epoch of the last committed block +pub fn query_epoch(tendermint_addr: &str) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_epoch(&client)) +} + +/// Query the epoch of the given block height, if it exists. +/// Will return none if the input block height is greater than +/// the latest committed block height. +pub fn query_epoch_at_height( + tendermint_addr: &str, + height: BlockHeight, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_epoch_at_height(&client, height)) +} + +/// Check if the given address is a known validator. +pub fn is_validator( + tendermint_addr: &str, + address: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::is_validator(&client, address)) +} + +/// Check if a given address is a known delegator +pub fn is_delegator( + tendermint_addr: &str, + address: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::is_delegator(&client, address)) +} + +/// Check if a given address is a known delegator at the given epoch +pub fn is_delegator_at( + tendermint_addr: &str, + address: &Address, + epoch: Epoch, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::is_delegator_at(&client, address, epoch)) +} + +/// Get the set of consensus keys registered in the network +pub fn get_consensus_keys( + tendermint_addr: &str, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_consensus_keys(&client)) +} + +/// Get the PoS parameters +pub fn get_pos_params(tendermint_addr: &str) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_pos_params(&client)) +} + +/// Get all validators in the given epoch +pub fn get_all_validators( + tendermint_addr: &str, + epoch: Epoch, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_all_validators(&client, epoch)) +} + +/// Get the total staked tokens in the given epoch +pub fn get_total_staked_tokens( + tendermint_addr: &str, + epoch: Epoch, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_total_staked_tokens(&client, epoch)) +} + +/// Get the given validator's stake at the given epoch +pub fn get_validator_stake( + tendermint_addr: &str, + epoch: Epoch, + validator: &Address, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_validator_stake(&client, epoch, validator)) +} + +/// Query and return a validator's state +pub fn get_validator_state( + tendermint_addr: &str, + validator: &Address, + epoch: Option, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_validator_state(&client, validator, epoch)) +} + +/// Get the delegator's delegation +pub fn get_delegators_delegation( + tendermint_addr: &str, + address: &Address, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_delegators_delegation(&client, address)) +} + +/// Get the delegator's delegation at some epoh +pub fn get_delegators_delegation_at( + tendermint_addr: &str, + address: &Address, + epoch: Epoch, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_delegators_delegation_at(&client, address, epoch)) +} + +/// Query and return validator's commission rate and max commission rate +/// change per epoch +pub fn query_commission_rate( + tendermint_addr: &str, + validator: &Address, + epoch: Option, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_commission_rate(&client, validator, epoch)) +} + +/// Query and return validator's metadata, including the commission rate and +/// max commission rate change +pub fn query_metadata( + tendermint_addr: &str, + validator: &Address, + epoch: Option, +) -> Result<(Option, Option), Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_metadata(&client, validator, epoch)) +} + +/// Query and return the incoming redelegation epoch for a given pair of +/// source validator and delegator, if there is any. +pub fn query_incoming_redelegations( + tendermint_addr: &str, + src_validator: &Address, + delegator: &Address, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_incoming_redelegations( + &client, + src_validator, + delegator, + )) +} + +/// Query a validator's bonds for a given epoch +pub fn query_bond( + tendermint_addr: &str, + source: &Address, + validator: &Address, + epoch: Option, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_bond(&client, source, validator, epoch)) +} + +/// Query a validator's unbonds for a given epoch +pub fn query_and_print_unbonds( + tendermint_addr: &str, + source: &Address, + validator: &Address, +) -> Result, Error> { + let rt = Runtime::new().unwrap(); + rt.block_on(async { + query_unbond_with_slashing(tendermint_addr, source, validator) + }) +} + +/// Query withdrawable tokens in a validator account for a given epoch +pub fn query_withdrawable_tokens( + tendermint_addr: &str, + bond_source: &Address, + validator: &Address, + epoch: Option, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_withdrawable_tokens( + &client, + bond_source, + validator, + epoch, + )) +} + +/// Query all unbonds for a validator, applying slashes +pub fn query_unbond_with_slashing( + tendermint_addr: &str, + source: &Address, + validator: &Address, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_unbond_with_slashing(&client, source, validator)) +} + +/// Get the bond amount at the given epoch +pub fn get_bond_amount_at( + tendermint_addr: &str, + delegator: &Address, + validator: &Address, + epoch: Epoch, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::get_bond_amount_at( + &client, delegator, validator, epoch, + )) +} + +/// Get bonds and unbonds with all details (slashes and rewards, if any) +/// grouped by their bond IDs. +pub fn bonds_and_unbonds( + tendermint_addr: &str, + source: &Option
, + validator: &Option
, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::bonds_and_unbonds(&client, source, validator)) +} + +/// Get bonds and unbonds with all details (slashes and rewards, if any) +/// grouped by their bond IDs, enriched with extra information calculated +/// from the data. +pub fn enriched_bonds_and_unbonds( + tendermint_addr: &str, + current_epoch: Epoch, + source: &Option
, + validator: &Option
, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::enriched_bonds_and_unbonds( + &client, + current_epoch, + source, + validator, + )) +} diff --git a/light_sdk/src/reading/tx.rs b/light_sdk/src/reading/tx.rs new file mode 100644 index 0000000000..036f38ee59 --- /dev/null +++ b/light_sdk/src/reading/tx.rs @@ -0,0 +1,73 @@ +use namada_sdk::events::Event; +use namada_sdk::rpc::{TxEventQuery, TxResponse}; + +use super::*; + +/// Call the corresponding `tx_event_query` RPC method, to fetch +/// the current status of a transation. +pub fn query_tx_events( + tendermint_addr: &str, + tx_hash: &str, +) -> Result, Error> { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let rt = Runtime::new().unwrap(); + let tx_event_query = TxEventQuery::Applied(tx_hash); + rt.block_on(rpc::query_tx_events(&client, tx_event_query)) + .map_err(|e| Error::Other(e.to_string())) +} + +/// Dry run a transaction +pub fn dry_run_tx( + tendermint_addr: &str, + tx_bytes: Vec, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let (data, height, prove) = (Some(tx_bytes), None, false); + let rt = Runtime::new().unwrap(); + let result = rt + .block_on(RPC.shell().dry_run_tx(&client, data, height, prove)) + .map_err(|err| { + Error::from(namada_sdk::error::QueryError::NoResponse( + err.to_string(), + )) + })? + .data; + Ok(result) +} + +/// Lookup the full response accompanying the specified transaction event +pub fn query_tx_response( + tendermint_addr: &str, + tx_hash: &str, +) -> Result { + let client = HttpClient::new( + TendermintAddress::from_str(tendermint_addr) + .map_err(|e| Error::Other(e.to_string()))?, + ) + .map_err(|e| Error::Other(e.to_string()))?; + let tx_query = TxEventQuery::Applied(tx_hash); + let rt = Runtime::new().unwrap(); + rt.block_on(rpc::query_tx_response(&client, tx_query)) + .map_err(|e| Error::Other(e.to_string())) +} + +/// Query the status of a given transaction. +pub async fn query_tx_status( + tendermint_addr: &str, + tx_hash: &str, +) -> Result { + let maybe_event = query_tx_events(tendermint_addr, tx_hash)?; + if let Some(e) = maybe_event { + Ok(e) + } else { + Err(Error::Tx(namada_sdk::error::TxError::AppliedTimeout)) + } +} diff --git a/light_sdk/src/transaction/account.rs b/light_sdk/src/transaction/account.rs new file mode 100644 index 0000000000..146915881a --- /dev/null +++ b/light_sdk/src/transaction/account.rs @@ -0,0 +1,143 @@ +use namada_core::proto::Tx; +use namada_core::types::address::Address; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; + +use super::GlobalArgs; +use crate::transaction; + +const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; +const TX_REVEAL_PK_WASM: &str = "tx_reveal_pk.wasm"; +const TX_UPDATE_ACCOUNT_WASM: &str = "tx_update_account.wasm"; + +/// Transaction to initialize an established account +pub struct InitAccount(Tx); + +impl InitAccount { + /// Build a raw InitAccount transaction from the given parameters + pub fn new( + public_keys: Vec, + vp_code_hash: Hash, + threshold: u8, + args: GlobalArgs, + ) -> Self { + let init_account = + namada_core::types::transaction::account::InitAccount { + public_keys, + vp_code_hash, + threshold, + }; + + Self(transaction::build_tx( + args, + init_account, + TX_INIT_ACCOUNT_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to reveal a public key to the ledger to validate signatures of +/// an implicit account +pub struct RevealPk(Tx); + +impl RevealPk { + /// Build a raw Reveal Public Key transaction from the given parameters + pub fn new(public_key: common::PublicKey, args: GlobalArgs) -> Self { + Self(transaction::build_tx( + args, + public_key, + TX_REVEAL_PK_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to update the parameters of an established account +pub struct UpdateAccount(Tx); + +impl UpdateAccount { + /// Build a raw UpdateAccount transaction from the given parameters + pub fn new( + addr: Address, + vp_code_hash: Option, + public_keys: Vec, + threshold: Option, + args: GlobalArgs, + ) -> Self { + let update_account = + namada_core::types::transaction::account::UpdateAccount { + addr, + vp_code_hash, + public_keys, + threshold, + }; + + Self(transaction::build_tx( + args, + update_account, + TX_UPDATE_ACCOUNT_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/bridge.rs b/light_sdk/src/transaction/bridge.rs new file mode 100644 index 0000000000..c41407f31e --- /dev/null +++ b/light_sdk/src/transaction/bridge.rs @@ -0,0 +1,54 @@ +use namada_core::proto::Tx; +pub use namada_core::types::eth_bridge_pool::{GasFee, TransferToEthereum}; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; + +use super::GlobalArgs; +use crate::transaction; + +const TX_BRIDGE_POOL_WASM: &str = "tx_bridge_pool.wasm"; + +/// A transfer over the Ethereum bridge +pub struct BridgeTransfer(Tx); + +impl BridgeTransfer { + /// Build a raw BridgeTransfer transaction from the given parameters + pub fn new( + transfer: TransferToEthereum, + gas_fee: GasFee, + args: GlobalArgs, + ) -> Self { + let pending_transfer = + namada_core::types::eth_bridge_pool::PendingTransfer { + transfer, + gas_fee, + }; + + Self(transaction::build_tx( + args, + pending_transfer, + TX_BRIDGE_POOL_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/governance.rs b/light_sdk/src/transaction/governance.rs new file mode 100644 index 0000000000..ca6183569e --- /dev/null +++ b/light_sdk/src/transaction/governance.rs @@ -0,0 +1,118 @@ +use namada_core::ledger::governance::storage::proposal::ProposalType; +use namada_core::ledger::governance::storage::vote::StorageProposalVote; +use namada_core::proto::Tx; +use namada_core::types::address::Address; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::storage::Epoch; + +use super::GlobalArgs; +use crate::transaction; + +const TX_INIT_PROPOSAL_WASM: &str = "tx_init_proposal.wasm"; +const TX_VOTE_PROPOSAL: &str = "tx_vote_proposal.wasm"; + +/// Transaction to initialize a governance proposal +pub struct InitProposal(Tx); + +impl InitProposal { + /// Build a raw InitProposal transaction from the given parameters + #[allow(clippy::too_many_arguments)] + pub fn new( + id: Option, + content: Hash, + author: Address, + r#type: ProposalType, + voting_start_epoch: Epoch, + voting_end_epoch: Epoch, + grace_epoch: Epoch, + args: GlobalArgs, + ) -> Self { + let init_proposal = + namada_core::types::transaction::governance::InitProposalData { + id, + content, + author, + r#type, + voting_start_epoch, + voting_end_epoch, + grace_epoch, + }; + + Self(transaction::build_tx( + args, + init_proposal, + TX_INIT_PROPOSAL_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to vote on a governance proposal +pub struct VoteProposal(Tx); + +impl VoteProposal { + /// Build a raw VoteProposal transaction from the given parameters + pub fn new( + id: u64, + vote: StorageProposalVote, + voter: Address, + delegations: Vec
, + args: GlobalArgs, + ) -> Self { + let vote_proposal = + namada_core::types::transaction::governance::VoteProposalData { + id, + vote, + voter, + delegations, + }; + + Self(transaction::build_tx( + args, + vote_proposal, + TX_VOTE_PROPOSAL.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/ibc.rs b/light_sdk/src/transaction/ibc.rs new file mode 100644 index 0000000000..98a28aa226 --- /dev/null +++ b/light_sdk/src/transaction/ibc.rs @@ -0,0 +1,60 @@ +use std::str::FromStr; + +pub use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; +use namada_core::ibc::primitives::Msg; +use namada_core::proto::Tx; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::time::DateTimeUtc; + +use super::GlobalArgs; +use crate::transaction; + +const TX_IBC_WASM: &str = "tx_ibc.wasm"; + +/// An IBC transfer +pub struct IbcTransfer(Tx); + +impl IbcTransfer { + /// Build a raw IbcTransfer transaction from the given parameters + pub fn new( + packet_data: MsgTransfer, + GlobalArgs { + expiration, + code_hash, + chain_id, + }: GlobalArgs, + ) -> Self { + let mut tx = Tx::new(chain_id, expiration); + tx.header.timestamp = + DateTimeUtc::from_str("2000-01-01T00:00:00Z").unwrap(); + tx.add_code_from_hash(code_hash, Some(TX_IBC_WASM.to_string())); + + let mut data = vec![]; + prost::Message::encode(&packet_data.to_any(), &mut data).unwrap(); + tx.set_data(namada_core::proto::Data::new(data)); + + Self(tx) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/mod.rs b/light_sdk/src/transaction/mod.rs new file mode 100644 index 0000000000..2c755da39f --- /dev/null +++ b/light_sdk/src/transaction/mod.rs @@ -0,0 +1,61 @@ +use std::str::FromStr; + +use borsh::BorshSerialize; +use namada_core::proto::{Section, Signature, Signer, Tx}; +use namada_core::types::chain::ChainId; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::time::DateTimeUtc; + +pub mod account; +pub mod bridge; +pub mod governance; +pub mod ibc; +pub mod pgf; +pub mod pos; +pub mod transfer; +pub mod wrapper; + +/// Generic arguments required to construct a transaction +pub struct GlobalArgs { + pub expiration: Option, + pub code_hash: Hash, + pub chain_id: ChainId, +} + +pub(in crate::transaction) fn build_tx( + GlobalArgs { + expiration, + code_hash, + chain_id, + }: GlobalArgs, + data: impl BorshSerialize, + code_tag: String, +) -> Tx { + let mut inner_tx = Tx::new(chain_id, expiration); + + inner_tx.header.timestamp = + DateTimeUtc::from_str("2000-01-01T00:00:00Z").unwrap(); + inner_tx.add_code_from_hash(code_hash, Some(code_tag)); + inner_tx.add_data(data); + + inner_tx +} + +pub(in crate::transaction) fn get_sign_bytes(tx: &Tx) -> Vec { + vec![tx.raw_header_hash()] +} + +pub(in crate::transaction) fn attach_raw_signatures( + mut tx: Tx, + signer: common::PublicKey, + signature: common::Signature, +) -> Tx { + tx.protocol_filter(); + tx.add_section(Section::Signature(Signature { + targets: vec![tx.raw_header_hash()], + signer: Signer::PubKeys(vec![signer]), + signatures: [(0, signature)].into_iter().collect(), + })); + tx +} diff --git a/light_sdk/src/transaction/pgf.rs b/light_sdk/src/transaction/pgf.rs new file mode 100644 index 0000000000..7a53a7e4ca --- /dev/null +++ b/light_sdk/src/transaction/pgf.rs @@ -0,0 +1,94 @@ +use std::collections::HashMap; + +use namada_core::proto::Tx; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; + +use super::GlobalArgs; +use crate::transaction; + +const TX_RESIGN_STEWARD: &str = "tx_resign_steward.wasm"; +const TX_UPDATE_STEWARD_COMMISSION: &str = "tx_update_steward_commission.wasm"; + +/// A transaction to resign from stewarding pgf +pub struct ResignSteward(Tx); + +impl ResignSteward { + /// Build a raw ResignSteward transaction from the given parameters + pub fn new(steward: Address, args: GlobalArgs) -> Self { + Self(transaction::build_tx( + args, + steward, + TX_RESIGN_STEWARD.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to update a pgf steward's commission rate +pub struct UpdateStewardCommission(Tx); + +impl UpdateStewardCommission { + /// Build a raw UpdateStewardCommission transaction from the given + /// parameters + pub fn new( + steward: Address, + commission: HashMap, + args: GlobalArgs, + ) -> Self { + let update_commission = + namada_core::types::transaction::pgf::UpdateStewardCommission { + steward, + commission, + }; + + Self(transaction::build_tx( + args, + update_commission, + TX_UPDATE_STEWARD_COMMISSION.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/pos.rs b/light_sdk/src/transaction/pos.rs new file mode 100644 index 0000000000..a436e1588a --- /dev/null +++ b/light_sdk/src/transaction/pos.rs @@ -0,0 +1,560 @@ +use namada_core::proto::Tx; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::hash::Hash; +use namada_core::types::key::{common, secp256k1}; +use namada_core::types::token; +use namada_core::types::token::Amount; +use namada_core::types::transaction::pos::Redelegation; + +use super::GlobalArgs; +use crate::transaction; + +const TX_BOND_WASM: &str = "tx_bond.wasm"; +const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; +const TX_BECOME_VALIDATOR_WASM: &str = "tx_become_validator.wasm"; +const TX_UNJAIL_VALIDATOR_WASM: &str = "tx_unjail_validator.wasm"; +const TX_DEACTIVATE_VALIDATOR_WASM: &str = "tx_deactivate_validator.wasm"; +const TX_REACTIVATE_VALIDATOR_WASM: &str = "tx_reactivate_validator.wasm"; +const TX_CLAIM_REWARDS_WASM: &str = "tx_claim_rewards.wasm"; +const TX_REDELEGATE_WASM: &str = "tx_redelegate.wasm"; +const TX_CHANGE_METADATA_WASM: &str = "tx_change_validator_metadata.wasm"; +const TX_CHANGE_CONSENSUS_KEY_WASM: &str = "tx_change_consensus_key.wasm"; +const TX_CHANGE_COMMISSION_WASM: &str = "tx_change_validator_commission.wasm"; +const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; + +/// A bond transaction +pub struct Bond(Tx); + +impl Bond { + /// Build a raw Bond transaction from the given parameters + pub fn new( + validator: Address, + amount: token::Amount, + source: Option
, + args: GlobalArgs, + ) -> Self { + let unbond = namada_core::types::transaction::pos::Bond { + validator, + amount, + source, + }; + + Self(transaction::build_tx( + args, + unbond, + TX_BOND_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// An unbond transaction +pub struct Unbond(Tx); + +impl Unbond { + /// Build a raw Unbond transaction from the given parameters + pub fn new( + validator: Address, + amount: token::Amount, + source: Option
, + args: GlobalArgs, + ) -> Self { + let unbond = namada_core::types::transaction::pos::Unbond { + validator, + amount, + source, + }; + + Self(transaction::build_tx( + args, + unbond, + TX_UNBOND_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to initialize a new PoS validator +pub struct BecomeValidator(Tx); + +impl BecomeValidator { + /// Build a raw Init validator transaction from the given parameters + #[allow(clippy::too_many_arguments)] + pub fn new( + address: Address, + consensus_key: common::PublicKey, + eth_cold_key: secp256k1::PublicKey, + eth_hot_key: secp256k1::PublicKey, + protocol_key: common::PublicKey, + commission_rate: Dec, + max_commission_rate_change: Dec, + email: String, + description: Option, + website: Option, + discord_handle: Option, + args: GlobalArgs, + ) -> Self { + let update_account = + namada_core::types::transaction::pos::BecomeValidator { + address, + consensus_key, + eth_cold_key, + eth_hot_key, + protocol_key, + commission_rate, + max_commission_rate_change, + email, + description, + website, + discord_handle, + }; + + Self(transaction::build_tx( + args, + update_account, + TX_BECOME_VALIDATOR_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to unjail a PoS validator +pub struct UnjailValidator(Tx); + +impl UnjailValidator { + /// Build a raw Unjail validator transaction from the given parameters + pub fn new(address: Address, args: GlobalArgs) -> Self { + Self(transaction::build_tx( + args, + address, + TX_UNJAIL_VALIDATOR_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to deactivate a validator +pub struct DeactivateValidator(Tx); + +impl DeactivateValidator { + /// Build a raw DeactivateValidator transaction from the given parameters + pub fn new(address: Address, args: GlobalArgs) -> Self { + Self(transaction::build_tx( + args, + address, + TX_DEACTIVATE_VALIDATOR_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to reactivate a previously deactivated validator +pub struct ReactivateValidator(Tx); + +impl ReactivateValidator { + /// Build a raw ReactivateValidator transaction from the given parameters + pub fn new(address: Address, args: GlobalArgs) -> Self { + Self(transaction::build_tx( + args, + address, + TX_REACTIVATE_VALIDATOR_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to claim PoS rewards +pub struct ClaimRewards(Tx); + +impl ClaimRewards { + /// Build a raw ClaimRewards transaction from the given parameters + pub fn new( + validator: Address, + source: Option
, + args: GlobalArgs, + ) -> Self { + let init_proposal = namada_core::types::transaction::pos::Withdraw { + validator, + source, + }; + + Self(transaction::build_tx( + args, + init_proposal, + TX_CLAIM_REWARDS_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to change the validator's metadata +pub struct ChangeMetaData(Tx); + +impl ChangeMetaData { + /// Build a raw ChangeMetadata transaction from the given parameters + pub fn new( + validator: Address, + email: Option, + description: Option, + website: Option, + discord_handle: Option, + commission_rate: Option, + args: GlobalArgs, + ) -> Self { + let init_proposal = + namada_core::types::transaction::pos::MetaDataChange { + validator, + email, + description, + website, + discord_handle, + commission_rate, + }; + + Self(transaction::build_tx( + args, + init_proposal, + TX_CHANGE_METADATA_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to modify the validator's consensus key +pub struct ChangeConsensusKey(Tx); + +impl ChangeConsensusKey { + /// Build a raw ChangeConsensusKey transaction from the given parameters + pub fn new( + validator: Address, + consensus_key: common::PublicKey, + args: GlobalArgs, + ) -> Self { + let init_proposal = + namada_core::types::transaction::pos::ConsensusKeyChange { + validator, + consensus_key, + }; + + Self(transaction::build_tx( + args, + init_proposal, + TX_CHANGE_CONSENSUS_KEY_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to modify the validator's commission rate +pub struct ChangeCommission(Tx); + +impl ChangeCommission { + /// Build a raw ChangeCommission transaction from the given parameters + pub fn new(validator: Address, new_rate: Dec, args: GlobalArgs) -> Self { + let init_proposal = + namada_core::types::transaction::pos::CommissionChange { + validator, + new_rate, + }; + + Self(transaction::build_tx( + args, + init_proposal, + TX_CHANGE_COMMISSION_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to withdraw previously unstaked funds +pub struct Withdraw(Tx); + +impl Withdraw { + /// Build a raw Withdraw transaction from the given parameters + pub fn new( + validator: Address, + source: Option
, + args: GlobalArgs, + ) -> Self { + let init_proposal = namada_core::types::transaction::pos::Withdraw { + validator, + source, + }; + + Self(transaction::build_tx( + args, + init_proposal, + TX_WITHDRAW_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} + +/// Transaction to redelegate +pub struct Redelegate(Tx); + +impl Redelegate { + /// Build a raw Redelegate transaction from the given parameters + pub fn new( + src_validator: Address, + dest_validator: Address, + owner: Address, + amount: Amount, + args: GlobalArgs, + ) -> Self { + let redelegation = Redelegation { + src_validator, + dest_validator, + owner, + amount, + }; + + Self(transaction::build_tx( + args, + redelegation, + TX_REDELEGATE_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/transfer.rs b/light_sdk/src/transaction/transfer.rs new file mode 100644 index 0000000000..cce45f1e91 --- /dev/null +++ b/light_sdk/src/transaction/transfer.rs @@ -0,0 +1,64 @@ +use borsh_ext::BorshSerializeExt; +use namada_core::proto::Tx; +use namada_core::types::address::Address; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::token::DenominatedAmount; + +use super::GlobalArgs; +use crate::transaction; + +const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; + +/// A transfer transaction +pub struct Transfer(Tx); + +impl Transfer { + /// Build a raw Transfer transaction from the given parameters + pub fn new( + source: Address, + target: Address, + token: Address, + amount: DenominatedAmount, + key: Option, + // FIXME: handle masp here + shielded: Option, + args: GlobalArgs, + ) -> Self { + let init_proposal = namada_core::types::token::Transfer { + source, + target, + token, + amount, + key, + shielded, + }; + + Self(transaction::build_tx( + args, + init_proposal.serialize_to_vec(), + TX_TRANSFER_WASM.to_string(), + )) + } + + /// Get the bytes to sign for the given transaction + pub fn get_sign_bytes(&self) -> Vec { + transaction::get_sign_bytes(&self.0) + } + + /// Attach the provided signatures to the tx + pub fn attach_signatures( + self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + Self(transaction::attach_raw_signatures( + self.0, signer, signature, + )) + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/transaction/wrapper.rs b/light_sdk/src/transaction/wrapper.rs new file mode 100644 index 0000000000..eb4373a0c2 --- /dev/null +++ b/light_sdk/src/transaction/wrapper.rs @@ -0,0 +1,64 @@ +use namada_core::proto::{Section, Signature, Signer, Tx, TxError}; +use namada_core::types::hash::Hash; +use namada_core::types::key::common; +use namada_core::types::storage::Epoch; +use namada_core::types::transaction::{Fee, GasLimit}; + +#[allow(missing_docs)] +pub struct Wrapper(Tx); + +impl Wrapper { + /// Takes a transaction and a signature and wraps them in a wrapper + /// transaction ready for submission + pub fn new( + mut tx: Tx, + fee: Fee, + fee_payer: common::PublicKey, + gas_limit: GasLimit, + // FIXME: fix masp unshielding + unshield_hash: Option, + ) -> Self { + tx.add_wrapper( + fee, + fee_payer, + Epoch::default(), + gas_limit, + unshield_hash, + ); + + Self(tx) + } + + /// Returns the message to be signed for this transaction + pub fn get_sign_bytes(mut self) -> (Self, Vec) { + self.0.protocol_filter(); + let msg = self.0.sechashes(); + + (self, msg) + } + + /// Attach the given outer signature to the transaction + pub fn attach_signature( + mut self, + signer: common::PublicKey, + signature: common::Signature, + ) -> Self { + self.0.add_section(Section::Signature(Signature { + targets: self.0.sechashes(), + signer: Signer::PubKeys(vec![signer]), + signatures: [(0, signature)].into_iter().collect(), + })); + + self + } + + /// Validate this wrapper transaction + pub fn validate_tx(&self) -> Result, TxError> { + self.0.validate_tx() + } + + /// Generates the protobuf encoding of this transaction + pub fn to_bytes(&self) -> Vec { + self.0.to_bytes() + } +} diff --git a/light_sdk/src/writing/mod.rs b/light_sdk/src/writing/mod.rs new file mode 100644 index 0000000000..ac809ebf48 --- /dev/null +++ b/light_sdk/src/writing/mod.rs @@ -0,0 +1,3 @@ +pub mod blocking {} + +pub mod unblocking {} diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 531ea5e31a..a390d6ec00 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -9,9 +9,13 @@ pub mod epoched; pub mod parameters; pub mod pos_queries; +pub mod queries; pub mod rewards; +pub mod slashing; pub mod storage; +pub mod storage_key; pub mod types; +pub mod validator_set_update; // pub mod validation; mod error; @@ -19,55 +23,68 @@ mod error; mod tests; use core::fmt::Debug; -use std::cmp::{self, Reverse}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::cmp::{self}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; -use borsh::BorshDeserialize; pub use error::*; use namada_core::ledger::storage_api::collections::lazy_map::{ - Collectable, LazyMap, NestedMap, NestedSubKey, SubKey, + Collectable, LazyMap, NestedSubKey, SubKey, }; -use namada_core::ledger::storage_api::collections::{LazyCollection, LazySet}; use namada_core::ledger::storage_api::{ - self, governance, token, ResultExt, StorageRead, StorageWrite, + self, token, StorageRead, StorageWrite, }; -use namada_core::types::address::{self, Address, InternalAddress}; +use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::dec::Dec; -use namada_core::types::key::{ - common, protocol_pk_key, tm_consensus_key_raw_hash, PublicKeyTmRawHash, -}; +use namada_core::types::key::common; use namada_core::types::storage::BlockHeight; pub use namada_core::types::storage::{Epoch, Key, KeySeg}; -use once_cell::unsync::Lazy; pub use parameters::{OwnedPosParams, PosParams}; -use rewards::PosRewardsCalculator; -use storage::{ - bonds_for_source_prefix, bonds_prefix, consensus_keys_key, - get_validator_address_from_bond, is_bond_key, is_unbond_key, - is_validator_slashes_key, last_block_proposer_key, - last_pos_reward_claim_epoch_key, params_key, rewards_counter_key, - slashes_prefix, unbonds_for_source_prefix, unbonds_prefix, - validator_address_raw_hash_key, validator_description_key, - validator_discord_key, validator_email_key, validator_last_slash_key, - validator_max_commission_rate_change_key, validator_website_key, + +use crate::queries::{find_bonds, has_bonds}; +use crate::rewards::{ + add_rewards_to_counter, compute_current_rewards_from_bonds, + read_rewards_counter, take_rewards_from_counter, +}; +use crate::slashing::{ + apply_list_slashes, compute_amount_after_slashing_unbond, + compute_amount_after_slashing_withdraw, find_validator_slashes, +}; +use crate::storage::{ + below_capacity_validator_set_handle, bond_handle, + consensus_validator_set_handle, delegator_redelegated_bonds_handle, + delegator_redelegated_unbonds_handle, get_last_reward_claim_epoch, + liveness_missed_votes_handle, liveness_sum_missed_votes_handle, + read_consensus_validator_set_addresses, read_non_pos_owned_params, + read_pos_params, read_validator_last_slash_epoch, + read_validator_max_commission_rate_change, read_validator_stake, + total_bonded_handle, total_consensus_stake_handle, total_unbonded_handle, + try_insert_consensus_key, unbond_handle, update_total_deltas, + update_validator_deltas, validator_addresses_handle, + validator_commission_rate_handle, validator_consensus_key_handle, + validator_deltas_handle, validator_eth_cold_key_handle, + validator_eth_hot_key_handle, validator_incoming_redelegations_handle, + validator_outgoing_redelegations_handle, validator_protocol_key_handle, + validator_rewards_products_handle, validator_set_positions_handle, + validator_slashes_handle, validator_state_handle, + validator_total_redelegated_bonded_handle, + validator_total_redelegated_unbonded_handle, write_last_reward_claim_epoch, + write_pos_params, write_validator_address_raw_hash, + write_validator_description, write_validator_discord_handle, + write_validator_email, write_validator_max_commission_rate_change, + write_validator_metadata, write_validator_website, }; -use types::{ - into_tm_voting_power, BelowCapacityValidatorSet, - BelowCapacityValidatorSets, BondDetails, BondId, Bonds, - BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionRates, - ConsensusValidator, ConsensusValidatorSet, ConsensusValidatorSets, - DelegatorRedelegatedBonded, DelegatorRedelegatedUnbonded, - EagerRedelegatedBondsMap, EpochedSlashes, IncomingRedelegations, - LivenessMissedVotes, LivenessSumMissedVotes, OutgoingRedelegations, - Position, RedelegatedBondsOrUnbonds, RedelegatedTokens, - ReverseOrdTokenAmount, RewardsAccumulator, RewardsProducts, Slash, - SlashType, SlashedAmount, Slashes, TotalConsensusStakes, TotalDeltas, - TotalRedelegatedBonded, TotalRedelegatedUnbonded, UnbondDetails, Unbonds, - ValidatorAddresses, ValidatorConsensusKeys, ValidatorDeltas, - ValidatorEthColdKeys, ValidatorEthHotKeys, ValidatorMetaData, - ValidatorPositionAddresses, ValidatorProtocolKeys, ValidatorSetPositions, - ValidatorSetUpdate, ValidatorState, ValidatorStates, - ValidatorTotalUnbonded, VoteInfo, WeightedValidator, +use crate::storage_key::{bonds_for_source_prefix, is_bond_key}; +use crate::types::{ + BondId, ConsensusValidator, ConsensusValidatorSet, + EagerRedelegatedBondsMap, RedelegatedBondsOrUnbonds, RedelegatedTokens, + ResultSlashing, Slash, Unbonds, ValidatorMetaData, ValidatorSetUpdate, + ValidatorState, VoteInfo, +}; +use crate::validator_set_update::{ + copy_validator_sets_and_positions, insert_validator_into_validator_set, + promote_next_below_capacity_validator_to_consensus, + remove_below_capacity_validator, remove_consensus_validator, + update_validator_set, }; /// Address of the PoS account implemented as a native VP @@ -84,217 +101,6 @@ pub fn staking_token_address(storage: &impl StorageRead) -> Address { .expect("Must be able to read native token address") } -/// Get the storage handle to the epoched consensus validator set -pub fn consensus_validator_set_handle() -> ConsensusValidatorSets { - let key = storage::consensus_validator_set_key(); - ConsensusValidatorSets::open(key) -} - -/// Get the storage handle to the epoched below-capacity validator set -pub fn below_capacity_validator_set_handle() -> BelowCapacityValidatorSets { - let key = storage::below_capacity_validator_set_key(); - BelowCapacityValidatorSets::open(key) -} - -/// Get the storage handle to a PoS validator's consensus key (used for -/// signing block votes). -pub fn validator_consensus_key_handle( - validator: &Address, -) -> ValidatorConsensusKeys { - let key = storage::validator_consensus_key_key(validator); - ValidatorConsensusKeys::open(key) -} - -/// Get the storage handle to a PoS validator's protocol key key. -pub fn validator_protocol_key_handle( - validator: &Address, -) -> ValidatorProtocolKeys { - let key = protocol_pk_key(validator); - ValidatorProtocolKeys::open(key) -} - -/// Get the storage handle to a PoS validator's eth hot key. -pub fn validator_eth_hot_key_handle( - validator: &Address, -) -> ValidatorEthHotKeys { - let key = storage::validator_eth_hot_key_key(validator); - ValidatorEthHotKeys::open(key) -} - -/// Get the storage handle to a PoS validator's eth cold key. -pub fn validator_eth_cold_key_handle( - validator: &Address, -) -> ValidatorEthColdKeys { - let key = storage::validator_eth_cold_key_key(validator); - ValidatorEthColdKeys::open(key) -} - -/// Get the storage handle to the total consensus validator stake -pub fn total_consensus_stake_key_handle() -> TotalConsensusStakes { - let key = storage::total_consensus_stake_key(); - TotalConsensusStakes::open(key) -} - -/// Get the storage handle to a PoS validator's state -pub fn validator_state_handle(validator: &Address) -> ValidatorStates { - let key = storage::validator_state_key(validator); - ValidatorStates::open(key) -} - -/// Get the storage handle to a PoS validator's deltas -pub fn validator_deltas_handle(validator: &Address) -> ValidatorDeltas { - let key = storage::validator_deltas_key(validator); - ValidatorDeltas::open(key) -} - -/// Get the storage handle to the total deltas -pub fn total_deltas_handle() -> TotalDeltas { - let key = storage::total_deltas_key(); - TotalDeltas::open(key) -} - -/// Get the storage handle to the set of all validators -pub fn validator_addresses_handle() -> ValidatorAddresses { - let key = storage::validator_addresses_key(); - ValidatorAddresses::open(key) -} - -/// Get the storage handle to a PoS validator's commission rate -pub fn validator_commission_rate_handle( - validator: &Address, -) -> CommissionRates { - let key = storage::validator_commission_rate_key(validator); - CommissionRates::open(key) -} - -/// Get the storage handle to a bond, which is dynamically updated with when -/// unbonding -pub fn bond_handle(source: &Address, validator: &Address) -> Bonds { - let bond_id = BondId { - source: source.clone(), - validator: validator.clone(), - }; - let key = storage::bond_key(&bond_id); - Bonds::open(key) -} - -/// Get the storage handle to a validator's total bonds, which are not updated -/// due to unbonding -pub fn total_bonded_handle(validator: &Address) -> Bonds { - let key = storage::validator_total_bonded_key(validator); - Bonds::open(key) -} - -/// Get the storage handle to an unbond -pub fn unbond_handle(source: &Address, validator: &Address) -> Unbonds { - let bond_id = BondId { - source: source.clone(), - validator: validator.clone(), - }; - let key = storage::unbond_key(&bond_id); - Unbonds::open(key) -} - -/// Get the storage handle to a validator's total-unbonded map -pub fn total_unbonded_handle(validator: &Address) -> ValidatorTotalUnbonded { - let key = storage::validator_total_unbonded_key(validator); - ValidatorTotalUnbonded::open(key) -} - -/// Get the storage handle to a PoS validator's deltas -pub fn validator_set_positions_handle() -> ValidatorSetPositions { - let key = storage::validator_set_positions_key(); - ValidatorSetPositions::open(key) -} - -/// Get the storage handle to a PoS validator's slashes -pub fn validator_slashes_handle(validator: &Address) -> Slashes { - let key = storage::validator_slashes_key(validator); - Slashes::open(key) -} - -/// Get the storage handle to list of all slashes to be processed and ultimately -/// placed in the `validator_slashes_handle` -pub fn enqueued_slashes_handle() -> EpochedSlashes { - let key = storage::enqueued_slashes_key(); - EpochedSlashes::open(key) -} - -/// Get the storage handle to the rewards accumulator for the consensus -/// validators in a given epoch -pub fn rewards_accumulator_handle() -> RewardsAccumulator { - let key = storage::consensus_validator_rewards_accumulator_key(); - RewardsAccumulator::open(key) -} - -/// Get the storage handle to a validator's rewards products -pub fn validator_rewards_products_handle( - validator: &Address, -) -> RewardsProducts { - let key = storage::validator_rewards_product_key(validator); - RewardsProducts::open(key) -} - -/// Get the storage handle to a validator's incoming redelegations -pub fn validator_incoming_redelegations_handle( - validator: &Address, -) -> IncomingRedelegations { - let key = storage::validator_incoming_redelegations_key(validator); - IncomingRedelegations::open(key) -} - -/// Get the storage handle to a validator's outgoing redelegations -pub fn validator_outgoing_redelegations_handle( - validator: &Address, -) -> OutgoingRedelegations { - let key: Key = storage::validator_outgoing_redelegations_key(validator); - OutgoingRedelegations::open(key) -} - -/// Get the storage handle to a validator's total redelegated bonds -pub fn validator_total_redelegated_bonded_handle( - validator: &Address, -) -> TotalRedelegatedBonded { - let key: Key = storage::validator_total_redelegated_bonded_key(validator); - TotalRedelegatedBonded::open(key) -} - -/// Get the storage handle to a validator's outgoing redelegations -pub fn validator_total_redelegated_unbonded_handle( - validator: &Address, -) -> TotalRedelegatedUnbonded { - let key: Key = storage::validator_total_redelegated_unbonded_key(validator); - TotalRedelegatedUnbonded::open(key) -} - -/// Get the storage handle to a delegator's redelegated bonds information -pub fn delegator_redelegated_bonds_handle( - delegator: &Address, -) -> DelegatorRedelegatedBonded { - let key: Key = storage::delegator_redelegated_bonds_key(delegator); - DelegatorRedelegatedBonded::open(key) -} - -/// Get the storage handle to a delegator's redelegated unbonds information -pub fn delegator_redelegated_unbonds_handle( - delegator: &Address, -) -> DelegatorRedelegatedUnbonded { - let key: Key = storage::delegator_redelegated_unbonds_key(delegator); - DelegatorRedelegatedUnbonded::open(key) -} - -/// Get the storage handle to the missed votes for liveness tracking -pub fn liveness_missed_votes_handle() -> LivenessMissedVotes { - let key = storage::liveness_missed_votes_key(); - LivenessMissedVotes::open(key) -} - -/// Get the storage handle to the sum of missed votes for liveness tracking -pub fn liveness_sum_missed_votes_handle() -> LivenessSumMissedVotes { - let key = storage::liveness_sum_missed_votes_key(); - LivenessSumMissedVotes::open(key) -} - /// Init genesis. Requires that the governance parameters are initialized. pub fn init_genesis( storage: &mut S, @@ -339,4810 +145,1686 @@ where Ok(()) } -/// Read PoS parameters -pub fn read_pos_params(storage: &S) -> storage_api::Result -where - S: StorageRead, -{ - let params = storage - .read(¶ms_key()) - .transpose() - .expect("PosParams should always exist in storage after genesis")?; - read_non_pos_owned_params(storage, params) -} - -/// Read non-PoS-owned parameters to add them to `OwnedPosParams` to construct -/// `PosParams`. -pub fn read_non_pos_owned_params( +/// Check if the provided address is a validator address +pub fn is_validator( storage: &S, - owned: OwnedPosParams, -) -> storage_api::Result + address: &Address, +) -> storage_api::Result where S: StorageRead, { - let max_proposal_period = governance::get_max_proposal_period(storage)?; - Ok(PosParams { - owned, - max_proposal_period, - }) -} - -/// Write PoS parameters -pub fn write_pos_params( - storage: &mut S, - params: &OwnedPosParams, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = params_key(); - storage.write(&key, params) + // TODO: should this check be made different? I suppose it does work but + // feels weird... + let rate = read_validator_max_commission_rate_change(storage, address)?; + Ok(rate.is_some()) } -/// Get the validator address given the raw hash of the Tendermint consensus key -pub fn find_validator_by_raw_hash( +/// Check if the provided address is a delegator address, optionally at a +/// particular epoch +pub fn is_delegator( storage: &S, - raw_hash: impl AsRef, -) -> storage_api::Result> + address: &Address, + epoch: Option, +) -> storage_api::Result where S: StorageRead, { - let key = validator_address_raw_hash_key(raw_hash); - storage.read(&key) + let prefix = bonds_for_source_prefix(address); + match epoch { + Some(epoch) => { + let iter = storage_api::iter_prefix_bytes(storage, &prefix)?; + for res in iter { + let (key, _) = res?; + if let Some((bond_id, bond_epoch)) = is_bond_key(&key) { + if bond_id.source != bond_id.validator + && bond_epoch <= epoch + { + return Ok(true); + } + } + } + Ok(false) + } + None => { + let iter = storage_api::iter_prefix_bytes(storage, &prefix)?; + for res in iter { + let (key, _) = res?; + if let Some((bond_id, _epoch)) = is_bond_key(&key) { + if bond_id.source != bond_id.validator { + return Ok(true); + } + } + } + Ok(false) + } + } } -/// Write PoS validator's address raw hash. -pub fn write_validator_address_raw_hash( +/// Self-bond tokens to a validator when `source` is `None` or equal to +/// the `validator` address, or delegate tokens from the `source` to the +/// `validator`. +pub fn bond_tokens( storage: &mut S, + source: Option<&Address>, validator: &Address, - consensus_key: &common::PublicKey, + amount: token::Amount, + current_epoch: Epoch, + offset_opt: Option, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, { - let raw_hash = tm_consensus_key_raw_hash(consensus_key); - storage.write(&validator_address_raw_hash_key(raw_hash), validator) + tracing::debug!( + "Bonding token amount {} at epoch {current_epoch}", + amount.to_string_native() + ); + if amount.is_zero() { + return Ok(()); + } + + // Transfer the bonded tokens from the source to PoS + if let Some(source) = source { + if source != validator && is_validator(storage, source)? { + return Err( + BondError::SourceMustNotBeAValidator(source.clone()).into() + ); + } + } + let source = source.unwrap_or(validator); + tracing::debug!("Source {source} --> Validator {validator}"); + + let staking_token = staking_token_address(storage); + token::transfer(storage, &staking_token, source, &ADDRESS, amount)?; + + let params = read_pos_params(storage)?; + let offset = offset_opt.unwrap_or(params.pipeline_len); + let offset_epoch = current_epoch + offset; + + // Check that the validator is actually a validator + let validator_state_handle = validator_state_handle(validator); + let state = validator_state_handle.get(storage, offset_epoch, ¶ms)?; + if state.is_none() { + return Err(BondError::NotAValidator(validator.clone()).into()); + } + + let bond_handle = bond_handle(source, validator); + let total_bonded_handle = total_bonded_handle(validator); + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds before incrementing: {bonds:#?}"); + } + + // Initialize or update the bond at the pipeline offset + bond_handle.add(storage, amount, current_epoch, offset)?; + total_bonded_handle.add(storage, amount, current_epoch, offset)?; + + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds after incrementing: {bonds:#?}"); + } + + // Update the validator set + // Allow bonding even if the validator is jailed. However, if jailed, there + // must be no changes to the validator set. Check at the pipeline epoch. + let is_jailed_or_inactive_at_pipeline = matches!( + validator_state_handle.get(storage, offset_epoch, ¶ms)?, + Some(ValidatorState::Jailed) | Some(ValidatorState::Inactive) + ); + if !is_jailed_or_inactive_at_pipeline { + update_validator_set( + storage, + ¶ms, + validator, + amount.change(), + current_epoch, + offset_opt, + )?; + } + + // Update the validator and total deltas + update_validator_deltas( + storage, + ¶ms, + validator, + amount.change(), + current_epoch, + offset_opt, + )?; + + update_total_deltas( + storage, + ¶ms, + amount.change(), + current_epoch, + offset_opt, + )?; + + Ok(()) } -/// Read PoS validator's max commission rate change. -pub fn read_validator_max_commission_rate_change( +/// Compute total validator stake for the current epoch +fn compute_total_consensus_stake( storage: &S, - validator: &Address, -) -> storage_api::Result> + epoch: Epoch, +) -> storage_api::Result where S: StorageRead, { - let key = validator_max_commission_rate_change_key(validator); - storage.read(&key) + consensus_validator_set_handle() + .at(&epoch) + .iter(storage)? + .fold(Ok(token::Amount::zero()), |acc, entry| { + let acc = acc?; + let ( + NestedSubKey::Data { + key: amount, + nested_sub_key: _, + }, + _validator, + ) = entry?; + Ok(acc.checked_add(amount).expect( + "Total consensus stake computation should not overflow.", + )) + }) } -/// Write PoS validator's max commission rate change. -pub fn write_validator_max_commission_rate_change( +/// Compute and then store the total consensus stake +pub fn compute_and_store_total_consensus_stake( storage: &mut S, - validator: &Address, - change: Dec, + epoch: Epoch, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, { - let key = validator_max_commission_rate_change_key(validator); - storage.write(&key, change) -} + let total = compute_total_consensus_stake(storage, epoch)?; + tracing::debug!( + "Total consensus stake for epoch {}: {}", + epoch, + total.to_string_native() + ); + total_consensus_stake_handle().set(storage, total, epoch, 0) +} -/// Read the most recent slash epoch for the given epoch -pub fn read_validator_last_slash_epoch( - storage: &S, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - let key = validator_last_slash_key(validator); - storage.read(&key) +/// Used below in `fn unbond_tokens` to update the bond and unbond amounts +#[derive(Eq, Hash, PartialEq)] +struct BondAndUnbondUpdates { + bond_start: Epoch, + new_bond_value: token::Change, + unbond_value: token::Change, } -/// Write the most recent slash epoch for the given epoch -pub fn write_validator_last_slash_epoch( +/// Unbond tokens that are bonded between a validator and a source (self or +/// delegator). +/// +/// This fn is also called during redelegation for a source validator, in +/// which case the `is_redelegation` param must be true. +pub fn unbond_tokens( storage: &mut S, + source: Option<&Address>, validator: &Address, - epoch: Epoch, -) -> storage_api::Result<()> + amount: token::Amount, + current_epoch: Epoch, + is_redelegation: bool, +) -> storage_api::Result where S: StorageRead + StorageWrite, { - let key = validator_last_slash_key(validator); - storage.write(&key, epoch) -} + if amount.is_zero() { + return Ok(ResultSlashing::default()); + } -/// Read last block proposer address. -pub fn read_last_block_proposer_address( - storage: &S, -) -> storage_api::Result> -where - S: StorageRead, -{ - let key = last_block_proposer_key(); - storage.read(&key) -} + let params = read_pos_params(storage)?; + let pipeline_epoch = current_epoch + params.pipeline_len; + let withdrawable_epoch = current_epoch + params.withdrawable_epoch_offset(); + tracing::debug!( + "Unbonding token amount {} at epoch {}, withdrawable at epoch {}", + amount.to_string_native(), + current_epoch, + withdrawable_epoch + ); -/// Write last block proposer address. -pub fn write_last_block_proposer_address( - storage: &mut S, - address: Address, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = last_block_proposer_key(); - storage.write(&key, address) -} + // Make sure source is not some other validator + if let Some(source) = source { + if source != validator && is_validator(storage, source)? { + return Err( + BondError::SourceMustNotBeAValidator(source.clone()).into() + ); + } + } + // Make sure the target is actually a validator + if !is_validator(storage, validator)? { + return Err(BondError::NotAValidator(validator.clone()).into()); + } + // Make sure the validator is not currently frozen + if is_validator_frozen(storage, validator, current_epoch, ¶ms)? { + return Err(UnbondError::ValidatorIsFrozen(validator.clone()).into()); + } -/// Read PoS validator's delta value. -pub fn read_validator_deltas_value( - storage: &S, - validator: &Address, - epoch: &namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - let handle = validator_deltas_handle(validator); - handle.get_delta_val(storage, *epoch) -} + let source = source.unwrap_or(validator); + let bonds_handle = bond_handle(source, validator); -/// Read PoS validator's stake (sum of deltas). -/// For non-validators and validators with `0` stake, this returns the default - -/// `token::Amount::zero()`. -pub fn read_validator_stake( - storage: &S, - params: &PosParams, - validator: &Address, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - let handle = validator_deltas_handle(validator); - let amount = handle - .get_sum(storage, epoch, params)? - .map(|change| { - debug_assert!(change.non_negative()); - token::Amount::from_change(change) - }) + // Make sure there are enough tokens left in the bond at the pipeline offset + let remaining_at_pipeline = bonds_handle + .get_sum(storage, pipeline_epoch, ¶ms)? .unwrap_or_default(); - Ok(amount) -} + if amount > remaining_at_pipeline { + return Err(UnbondError::UnbondAmountGreaterThanBond( + amount.to_string_native(), + remaining_at_pipeline.to_string_native(), + ) + .into()); + } -/// Add or remove PoS validator's stake delta value -pub fn update_validator_deltas( - storage: &mut S, - params: &OwnedPosParams, - validator: &Address, - delta: token::Change, - current_epoch: namada_core::types::storage::Epoch, - offset_opt: Option, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let handle = validator_deltas_handle(validator); - let offset = offset_opt.unwrap_or(params.pipeline_len); - let val = handle - .get_delta_val(storage, current_epoch + offset)? - .unwrap_or_default(); - handle.set( - storage, - val.checked_add(&delta) - .expect("Validator deltas updated amount should not overflow"), - current_epoch, - offset, - ) -} + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds before decrementing: {bonds:#?}"); + } -/// Read PoS total stake (sum of deltas). -pub fn read_total_stake( - storage: &S, - params: &PosParams, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - let handle = total_deltas_handle(); - let amnt = handle - .get_sum(storage, epoch, params)? - .map(|change| { - debug_assert!(change.non_negative()); - token::Amount::from_change(change) - }) - .unwrap_or_default(); - Ok(amnt) -} + let unbonds = unbond_handle(source, validator); -/// Read all addresses from consensus validator set. -pub fn read_consensus_validator_set_addresses( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - consensus_validator_set_handle() - .at(&epoch) - .iter(storage)? - .map(|res| res.map(|(_sub_key, address)| address)) - .collect() -} + let redelegated_bonds = + delegator_redelegated_bonds_handle(source).at(validator); -/// Read all addresses from below-capacity validator set. -pub fn read_below_capacity_validator_set_addresses( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - below_capacity_validator_set_handle() - .at(&epoch) - .iter(storage)? - .map(|res| res.map(|(_sub_key, address)| address)) - .collect() -} + #[cfg(debug_assertions)] + let redel_bonds_pre = redelegated_bonds.collect_map(storage)?; -/// Read all addresses from the below-threshold set -pub fn read_below_threshold_validator_set_addresses( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - let params = read_pos_params(storage)?; - Ok(validator_addresses_handle() - .at(&epoch) - .iter(storage)? - .map(Result::unwrap) - .filter(|address| { - matches!( - validator_state_handle(address).get(storage, epoch, ¶ms), - Ok(Some(ValidatorState::BelowThreshold)) - ) - }) - .collect()) -} + // `resultUnbonding` + // Find the bonds to fully unbond (remove) and one to partially unbond, if + // necessary + let bonds_to_unbond = find_bonds_to_remove( + storage, + &bonds_handle.get_data_handler(), + amount, + )?; -/// Read all addresses from consensus validator set with their stake. -pub fn read_consensus_validator_set_addresses_with_stake( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - consensus_validator_set_handle() - .at(&epoch) - .iter(storage)? - .map(|res| { - res.map( - |( - NestedSubKey::Data { - key: bonded_stake, - nested_sub_key: _, - }, - address, - )| { - WeightedValidator { - address, - bonded_stake, - } - }, - ) - }) - .collect() -} + // `modifiedRedelegation` + // A bond may have both redelegated and non-redelegated tokens in it. If + // this is the case, compute the modified state of the redelegation. + let modified_redelegation = match bonds_to_unbond.new_entry { + Some((bond_epoch, new_bond_amount)) => { + if redelegated_bonds.contains(storage, &bond_epoch)? { + let cur_bond_amount = bonds_handle + .get_delta_val(storage, bond_epoch)? + .unwrap_or_default(); + compute_modified_redelegation( + storage, + &redelegated_bonds.at(&bond_epoch), + bond_epoch, + cur_bond_amount - new_bond_amount, + )? + } else { + ModifiedRedelegation::default() + } + } + None => ModifiedRedelegation::default(), + }; -/// Count the number of consensus validators -pub fn get_num_consensus_validators( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - Ok(consensus_validator_set_handle() - .at(&epoch) - .iter(storage)? - .count() as u64) -} + // Compute the new unbonds eagerly + // `keysUnbonds` + // Get a set of epochs from which we're unbonding (fully and partially). + let bond_epochs_to_unbond = + if let Some((start_epoch, _)) = bonds_to_unbond.new_entry { + let mut to_remove = bonds_to_unbond.epochs.clone(); + to_remove.insert(start_epoch); + to_remove + } else { + bonds_to_unbond.epochs.clone() + }; -/// Read all addresses from below-capacity validator set with their stake. -pub fn read_below_capacity_validator_set_addresses_with_stake( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - below_capacity_validator_set_handle() - .at(&epoch) - .iter(storage)? - .map(|res| { - res.map( - |( - NestedSubKey::Data { - key: ReverseOrdTokenAmount(bonded_stake), - nested_sub_key: _, - }, - address, - )| { - WeightedValidator { - address, - bonded_stake, - } - }, - ) + // `newUnbonds` + // For each epoch we're unbonding, find the amount that's being unbonded. + // For full unbonds, this is the current bond value. For partial unbonds + // it is a difference between the current and new bond amount. + let new_unbonds_map = bond_epochs_to_unbond + .into_iter() + .map(|epoch| { + let cur_bond_value = bonds_handle + .get_delta_val(storage, epoch) + .unwrap() + .unwrap_or_default(); + let value = if let Some((start_epoch, new_bond_amount)) = + bonds_to_unbond.new_entry + { + if start_epoch == epoch { + cur_bond_value - new_bond_amount + } else { + cur_bond_value + } + } else { + cur_bond_value + }; + (epoch, value) }) - .collect() -} + .collect::>(); -/// Read all validator addresses. -pub fn read_all_validator_addresses( - storage: &S, - epoch: namada_core::types::storage::Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - validator_addresses_handle() - .at(&epoch) - .iter(storage)? - .collect() -} - -/// Update PoS total deltas. -/// Note: for EpochedDelta, write the value to change storage by -pub fn update_total_deltas( - storage: &mut S, - params: &OwnedPosParams, - delta: token::Change, - current_epoch: namada_core::types::storage::Epoch, - offset_opt: Option, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let handle = total_deltas_handle(); - let offset = offset_opt.unwrap_or(params.pipeline_len); - let val = handle - .get_delta_val(storage, current_epoch + offset)? - .unwrap_or_default(); - handle.set( - storage, - val.checked_add(&delta) - .expect("Total deltas updated amount should not overflow"), - current_epoch, - offset, - ) -} - -/// Check if the provided address is a validator address -pub fn is_validator( - storage: &S, - address: &Address, -) -> storage_api::Result -where - S: StorageRead, -{ - // TODO: should this check be made different? I suppose it does work but - // feels weird... - let rate = read_validator_max_commission_rate_change(storage, address)?; - Ok(rate.is_some()) -} + // `updatedBonded` + // Remove bonds for all the full unbonds. + for epoch in &bonds_to_unbond.epochs { + bonds_handle.get_data_handler().remove(storage, epoch)?; + } + // Replace bond amount for partial unbond, if any. + if let Some((bond_epoch, new_bond_amount)) = bonds_to_unbond.new_entry { + bonds_handle.set(storage, new_bond_amount, bond_epoch, 0)?; + } -/// Check if the provided address is a delegator address, optionally at a -/// particular epoch -pub fn is_delegator( - storage: &S, - address: &Address, - epoch: Option, -) -> storage_api::Result -where - S: StorageRead, -{ - let prefix = bonds_for_source_prefix(address); - match epoch { - Some(epoch) => { - let iter = storage_api::iter_prefix_bytes(storage, &prefix)?; - for res in iter { - let (key, _) = res?; - if let Some((bond_id, bond_epoch)) = is_bond_key(&key) { - if bond_id.source != bond_id.validator - && bond_epoch <= epoch - { - return Ok(true); - } - } - } - Ok(false) - } - None => { - let iter = storage_api::iter_prefix_bytes(storage, &prefix)?; - for res in iter { - let (key, _) = res?; - if let Some((bond_id, _epoch)) = is_bond_key(&key) { - if bond_id.source != bond_id.validator { - return Ok(true); - } - } - } - Ok(false) + // `updatedUnbonded` + // Update the unbonds in storage using the eager map computed above + if !is_redelegation { + for (start_epoch, &unbond_amount) in new_unbonds_map.iter() { + unbonds.at(start_epoch).update( + storage, + withdrawable_epoch, + |cur_val| cur_val.unwrap_or_default() + unbond_amount, + )?; } } -} -/// Self-bond tokens to a validator when `source` is `None` or equal to -/// the `validator` address, or delegate tokens from the `source` to the -/// `validator`. -pub fn bond_tokens( - storage: &mut S, - source: Option<&Address>, - validator: &Address, - amount: token::Amount, - current_epoch: Epoch, - offset_opt: Option, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - tracing::debug!( - "Bonding token amount {} at epoch {current_epoch}", - amount.to_string_native() - ); - if amount.is_zero() { - return Ok(()); - } + // `newRedelegatedUnbonds` + // This is what the delegator's redelegated unbonds would look like if this + // was the only unbond in the PoS system. We need to add these redelegated + // unbonds to the existing redelegated unbonds + let new_redelegated_unbonds = compute_new_redelegated_unbonds( + storage, + &redelegated_bonds, + &bonds_to_unbond.epochs, + &modified_redelegation, + )?; - // Transfer the bonded tokens from the source to PoS - if let Some(source) = source { - if source != validator && is_validator(storage, source)? { - return Err( - BondError::SourceMustNotBeAValidator(source.clone()).into() - ); + // `updatedRedelegatedBonded` + // NOTE: for now put this here after redelegated unbonds calc bc that one + // uses the pre-modified redelegated bonds from storage! + // First remove redelegation entries in epochs with full unbonds. + for epoch_to_remove in &bonds_to_unbond.epochs { + redelegated_bonds.remove_all(storage, epoch_to_remove)?; + } + if let Some(epoch) = modified_redelegation.epoch { + tracing::debug!("\nIs modified redelegation"); + if modified_redelegation.validators_to_remove.is_empty() { + redelegated_bonds.remove_all(storage, &epoch)?; + } else { + // Then update the redelegated bonds at this epoch + let rbonds = redelegated_bonds.at(&epoch); + update_redelegated_bonds(storage, &rbonds, &modified_redelegation)?; } } - let source = source.unwrap_or(validator); - tracing::debug!("Source {source} --> Validator {validator}"); - let staking_token = staking_token_address(storage); - token::transfer(storage, &staking_token, source, &ADDRESS, amount)?; + if !is_redelegation { + // `val updatedRedelegatedUnbonded` with updates applied below + // Delegator's redelegated unbonds to this validator. + let delegator_redelegated_unbonded = + delegator_redelegated_unbonds_handle(source).at(validator); - let params = read_pos_params(storage)?; - let offset = offset_opt.unwrap_or(params.pipeline_len); - let offset_epoch = current_epoch + offset; + // Quint `def updateRedelegatedUnbonded` with `val + // updatedRedelegatedUnbonded` together with last statement + // in `updatedDelegator.with("redelegatedUnbonded", ...` updated + // directly in storage + for (start, unbonds) in &new_redelegated_unbonds { + let this_redelegated_unbonded = delegator_redelegated_unbonded + .at(start) + .at(&withdrawable_epoch); - // Check that the validator is actually a validator - let validator_state_handle = validator_state_handle(validator); - let state = validator_state_handle.get(storage, offset_epoch, ¶ms)?; - if state.is_none() { - return Err(BondError::NotAValidator(validator.clone()).into()); + // Update the delegator's redelegated unbonds with the change + for (src_validator, redelegated_unbonds) in unbonds { + let redelegated_unbonded = + this_redelegated_unbonded.at(src_validator); + for (&redelegation_epoch, &change) in redelegated_unbonds { + redelegated_unbonded.update( + storage, + redelegation_epoch, + |current| current.unwrap_or_default() + change, + )?; + } + } + } } + // all `val updatedDelegator` changes are applied at this point - let bond_handle = bond_handle(source, validator); - let total_bonded_handle = total_bonded_handle(validator); - - if tracing::level_enabled!(tracing::Level::DEBUG) { - let bonds = find_bonds(storage, source, validator)?; - tracing::debug!("\nBonds before incrementing: {bonds:#?}"); + // `val updatedTotalBonded` and `val updatedTotalUnbonded` with updates + // Update the validator's total bonded and unbonded amounts + let total_bonded = total_bonded_handle(validator).get_data_handler(); + let total_unbonded = total_unbonded_handle(validator).at(&pipeline_epoch); + for (&start_epoch, &amount) in &new_unbonds_map { + total_bonded.update(storage, start_epoch, |current| { + current.unwrap_or_default() - amount + })?; + total_unbonded.update(storage, start_epoch, |current| { + current.unwrap_or_default() + amount + })?; } - // Initialize or update the bond at the pipeline offset - bond_handle.add(storage, amount, current_epoch, offset)?; - total_bonded_handle.add(storage, amount, current_epoch, offset)?; - - if tracing::level_enabled!(tracing::Level::DEBUG) { - let bonds = find_bonds(storage, source, validator)?; - tracing::debug!("\nBonds after incrementing: {bonds:#?}"); - } + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(validator); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(validator); + for (redelegation_start_epoch, unbonds) in &new_redelegated_unbonds { + for (src_validator, changes) in unbonds { + for (bond_start_epoch, change) in changes { + // total redelegated bonded + let bonded_sub_map = total_redelegated_bonded + .at(redelegation_start_epoch) + .at(src_validator); + bonded_sub_map.update( + storage, + *bond_start_epoch, + |current| current.unwrap_or_default() - *change, + )?; - // Update the validator set - // Allow bonding even if the validator is jailed. However, if jailed, there - // must be no changes to the validator set. Check at the pipeline epoch. - let is_jailed_or_inactive_at_pipeline = matches!( - validator_state_handle.get(storage, offset_epoch, ¶ms)?, - Some(ValidatorState::Jailed) | Some(ValidatorState::Inactive) - ); - if !is_jailed_or_inactive_at_pipeline { - update_validator_set( - storage, - ¶ms, - validator, - amount.change(), - current_epoch, - offset_opt, - )?; + // total redelegated unbonded + let unbonded_sub_map = total_redelegated_unbonded + .at(&pipeline_epoch) + .at(redelegation_start_epoch) + .at(src_validator); + unbonded_sub_map.update( + storage, + *bond_start_epoch, + |current| current.unwrap_or_default() + *change, + )?; + } + } } - // Update the validator and total deltas - update_validator_deltas( + let slashes = find_validator_slashes(storage, validator)?; + // `val resultSlashing` + let result_slashing = compute_amount_after_slashing_unbond( storage, ¶ms, - validator, - amount.change(), - current_epoch, - offset_opt, - )?; - - update_total_deltas( - storage, - ¶ms, - amount.change(), - current_epoch, - offset_opt, + &new_unbonds_map, + &new_redelegated_unbonds, + slashes, )?; + #[cfg(debug_assertions)] + let redel_bonds_post = redelegated_bonds.collect_map(storage)?; + debug_assert!( + result_slashing.sum <= amount, + "Amount after slashing ({}) must be <= requested amount to unbond \ + ({}).", + result_slashing.sum.to_string_native(), + amount.to_string_native(), + ); - Ok(()) -} - -/// Insert the new validator into the right validator set (depending on its -/// stake) -fn insert_validator_into_validator_set( - storage: &mut S, - params: &PosParams, - address: &Address, - stake: token::Amount, - current_epoch: Epoch, - offset: u64, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let target_epoch = current_epoch + offset; - let consensus_set = consensus_validator_set_handle().at(&target_epoch); - let below_cap_set = below_capacity_validator_set_handle().at(&target_epoch); - - let num_consensus_validators = - get_num_consensus_validators(storage, target_epoch)?; - - if stake < params.validator_stake_threshold { - validator_state_handle(address).set( - storage, - ValidatorState::BelowThreshold, - current_epoch, - offset, - )?; - } else if num_consensus_validators < params.max_validator_slots { - insert_validator_into_set( - &consensus_set.at(&stake), + let change_after_slashing = -result_slashing.sum.change(); + // Update the validator set at the pipeline offset. Since unbonding from a + // jailed validator who is no longer frozen is allowed, only update the + // validator set if the validator is not jailed + let is_jailed_or_inactive_at_pipeline = matches!( + validator_state_handle(validator).get( storage, - &target_epoch, - address, - )?; - validator_state_handle(address).set( + pipeline_epoch, + ¶ms + )?, + Some(ValidatorState::Jailed) | Some(ValidatorState::Inactive) + ); + if !is_jailed_or_inactive_at_pipeline { + update_validator_set( storage, - ValidatorState::Consensus, + ¶ms, + validator, + change_after_slashing, current_epoch, - offset, + None, )?; - } else { - // Check to see if the current genesis validator should replace one - // already in the consensus set - let min_consensus_amount = - get_min_consensus_validator_amount(&consensus_set, storage)?; - if stake > min_consensus_amount { - // Swap this genesis validator in and demote the last min consensus - // validator - let min_consensus_handle = consensus_set.at(&min_consensus_amount); - // Remove last min consensus validator - let last_min_consensus_position = - find_last_position(&min_consensus_handle, storage)?.expect( - "There must be always be at least 1 consensus validator", - ); - let removed = min_consensus_handle - .remove(storage, &last_min_consensus_position)? - .expect( - "There must be always be at least 1 consensus validator", - ); - // Insert last min consensus validator into the below-capacity set - insert_validator_into_set( - &below_cap_set.at(&min_consensus_amount.into()), - storage, - &target_epoch, - &removed, - )?; - validator_state_handle(&removed).set( - storage, - ValidatorState::BelowCapacity, - current_epoch, - offset, - )?; - // Insert the current genesis validator into the consensus set - insert_validator_into_set( - &consensus_set.at(&stake), - storage, - &target_epoch, - address, - )?; - // Update and set the validator states - validator_state_handle(address).set( - storage, - ValidatorState::Consensus, - current_epoch, - offset, - )?; - } else { - // Insert the current genesis validator into the below-capacity set - insert_validator_into_set( - &below_cap_set.at(&stake.into()), - storage, - &target_epoch, - address, - )?; - validator_state_handle(address).set( - storage, - ValidatorState::BelowCapacity, - current_epoch, - offset, - )?; - } - } - Ok(()) -} - -/// Update validator set at the pipeline epoch when a validator receives a new -/// bond and when its bond is unbonded (self-bond or delegation). -fn update_validator_set( - storage: &mut S, - params: &PosParams, - validator: &Address, - token_change: token::Change, - current_epoch: Epoch, - offset: Option, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - if token_change.is_zero() { - return Ok(()); - } - let offset = offset.unwrap_or(params.pipeline_len); - let epoch = current_epoch + offset; - tracing::debug!( - "Update epoch for validator set: {epoch}, validator: {validator}" - ); - let consensus_validator_set = consensus_validator_set_handle(); - let below_capacity_validator_set = below_capacity_validator_set_handle(); - - // Validator sets at the pipeline offset - let consensus_val_handle = consensus_validator_set.at(&epoch); - let below_capacity_val_handle = below_capacity_validator_set.at(&epoch); - - let tokens_pre = read_validator_stake(storage, params, validator, epoch)?; - - let tokens_post = tokens_pre - .change() - .checked_add(&token_change) - .expect("Post-validator set update token amount has overflowed"); - debug_assert!(tokens_post.non_negative()); - let tokens_post = token::Amount::from_change(tokens_post); - - // If token amounts both before and after the action are below the threshold - // stake, do nothing - if tokens_pre < params.validator_stake_threshold - && tokens_post < params.validator_stake_threshold - { - return Ok(()); - } - - // The position is only set when the validator is in consensus or - // below_capacity set (not in below_threshold set) - let position = - read_validator_set_position(storage, validator, epoch, params)?; - if let Some(position) = position { - let consensus_vals_pre = consensus_val_handle.at(&tokens_pre); - - let in_consensus = if consensus_vals_pre.contains(storage, &position)? { - let val_address = consensus_vals_pre.get(storage, &position)?; - debug_assert!(val_address.is_some()); - val_address == Some(validator.clone()) - } else { - false - }; - - if in_consensus { - // It's initially consensus - tracing::debug!("Target validator is consensus"); - - // First remove the consensus validator - consensus_vals_pre.remove(storage, &position)?; - - let max_below_capacity_validator_amount = - get_max_below_capacity_validator_amount( - &below_capacity_val_handle, - storage, - )? - .unwrap_or_default(); - - if tokens_post < params.validator_stake_threshold { - tracing::debug!( - "Demoting this validator to the below-threshold set" - ); - // Set the validator state as below-threshold - validator_state_handle(validator).set( - storage, - ValidatorState::BelowThreshold, - current_epoch, - offset, - )?; - - // Remove the validator's position from storage - validator_set_positions_handle() - .at(&epoch) - .remove(storage, validator)?; - - // Promote the next below-cap validator if there is one - if let Some(max_bc_amount) = - get_max_below_capacity_validator_amount( - &below_capacity_val_handle, - storage, - )? - { - // Remove the max below-capacity validator first - let below_capacity_vals_max = - below_capacity_val_handle.at(&max_bc_amount.into()); - let lowest_position = - find_first_position(&below_capacity_vals_max, storage)? - .unwrap(); - let removed_max_below_capacity = below_capacity_vals_max - .remove(storage, &lowest_position)? - .expect("Must have been removed"); - - // Insert the previous max below-capacity validator into the - // consensus set - insert_validator_into_set( - &consensus_val_handle.at(&max_bc_amount), - storage, - &epoch, - &removed_max_below_capacity, - )?; - validator_state_handle(&removed_max_below_capacity).set( - storage, - ValidatorState::Consensus, - current_epoch, - offset, - )?; - } - } else if tokens_post < max_below_capacity_validator_amount { - tracing::debug!( - "Demoting this validator to the below-capacity set and \ - promoting another to the consensus set" - ); - // Place the validator into the below-capacity set and promote - // the lowest position max below-capacity - // validator. - - // Remove the max below-capacity validator first - let below_capacity_vals_max = below_capacity_val_handle - .at(&max_below_capacity_validator_amount.into()); - let lowest_position = - find_first_position(&below_capacity_vals_max, storage)? - .unwrap(); - let removed_max_below_capacity = below_capacity_vals_max - .remove(storage, &lowest_position)? - .expect("Must have been removed"); - - // Insert the previous max below-capacity validator into the - // consensus set - insert_validator_into_set( - &consensus_val_handle - .at(&max_below_capacity_validator_amount), - storage, - &epoch, - &removed_max_below_capacity, - )?; - validator_state_handle(&removed_max_below_capacity).set( - storage, - ValidatorState::Consensus, - current_epoch, - offset, - )?; - - // Insert the current validator into the below-capacity set - insert_validator_into_set( - &below_capacity_val_handle.at(&tokens_post.into()), - storage, - &epoch, - validator, - )?; - validator_state_handle(validator).set( - storage, - ValidatorState::BelowCapacity, - current_epoch, - offset, - )?; - } else { - tracing::debug!("Validator remains in consensus set"); - // The current validator should remain in the consensus set - - // place it into a new position - insert_validator_into_set( - &consensus_val_handle.at(&tokens_post), - storage, - &epoch, - validator, - )?; - } - } else { - // It's initially below-capacity - tracing::debug!("Target validator is below-capacity"); - - let below_capacity_vals_pre = - below_capacity_val_handle.at(&tokens_pre.into()); - let removed = below_capacity_vals_pre.remove(storage, &position)?; - debug_assert!(removed.is_some()); - debug_assert_eq!(&removed.unwrap(), validator); - - let min_consensus_validator_amount = - get_min_consensus_validator_amount( - &consensus_val_handle, - storage, - )?; - - if tokens_post > min_consensus_validator_amount { - // Place the validator into the consensus set and demote the - // last position min consensus validator to the - // below-capacity set - tracing::debug!( - "Inserting validator into the consensus set and demoting \ - a consensus validator to the below-capacity set" - ); - - insert_into_consensus_and_demote_to_below_cap( - storage, - validator, - tokens_post, - min_consensus_validator_amount, - current_epoch, - offset, - &consensus_val_handle, - &below_capacity_val_handle, - )?; - } else if tokens_post >= params.validator_stake_threshold { - tracing::debug!("Validator remains in below-capacity set"); - // The current validator should remain in the below-capacity set - insert_validator_into_set( - &below_capacity_val_handle.at(&tokens_post.into()), - storage, - &epoch, - validator, - )?; - validator_state_handle(validator).set( - storage, - ValidatorState::BelowCapacity, - current_epoch, - offset, - )?; - } else { - // The current validator is demoted to the below-threshold set - tracing::debug!( - "Demoting this validator to the below-threshold set" - ); - - validator_state_handle(validator).set( - storage, - ValidatorState::BelowThreshold, - current_epoch, - offset, - )?; - - // Remove the validator's position from storage - validator_set_positions_handle() - .at(&epoch) - .remove(storage, validator)?; - } - } - } else { - // At non-zero offset (0 is genesis only) - if offset > 0 { - // If there is no position at pipeline offset, then the validator - // must be in the below-threshold set - debug_assert!(tokens_pre < params.validator_stake_threshold); - } - tracing::debug!("Target validator is below-threshold"); - - // Move the validator into the appropriate set - let num_consensus_validators = - get_num_consensus_validators(storage, epoch)?; - if num_consensus_validators < params.max_validator_slots { - // Just insert into the consensus set - tracing::debug!("Inserting validator into the consensus set"); - - insert_validator_into_set( - &consensus_val_handle.at(&tokens_post), - storage, - &epoch, - validator, - )?; - validator_state_handle(validator).set( - storage, - ValidatorState::Consensus, - current_epoch, - offset, - )?; - } else { - let min_consensus_validator_amount = - get_min_consensus_validator_amount( - &consensus_val_handle, - storage, - )?; - if tokens_post > min_consensus_validator_amount { - // Insert this validator into consensus and demote one into the - // below-capacity - tracing::debug!( - "Inserting validator into the consensus set and demoting \ - a consensus validator to the below-capacity set" - ); - - insert_into_consensus_and_demote_to_below_cap( - storage, - validator, - tokens_post, - min_consensus_validator_amount, - current_epoch, - offset, - &consensus_val_handle, - &below_capacity_val_handle, - )?; - } else { - // Insert this validator into below-capacity - tracing::debug!( - "Inserting validator into the below-capacity set" - ); - - insert_validator_into_set( - &below_capacity_val_handle.at(&tokens_post.into()), - storage, - &epoch, - validator, - )?; - validator_state_handle(validator).set( - storage, - ValidatorState::BelowCapacity, - current_epoch, - offset, - )?; - } - } } - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -fn insert_into_consensus_and_demote_to_below_cap( - storage: &mut S, - validator: &Address, - tokens_post: token::Amount, - min_consensus_amount: token::Amount, - current_epoch: Epoch, - offset: u64, - consensus_set: &ConsensusValidatorSet, - below_capacity_set: &BelowCapacityValidatorSet, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - // First, remove the last position min consensus validator - let consensus_vals_min = consensus_set.at(&min_consensus_amount); - let last_position_of_min_consensus_vals = - find_last_position(&consensus_vals_min, storage)? - .expect("There must be always be at least 1 consensus validator"); - let removed_min_consensus = consensus_vals_min - .remove(storage, &last_position_of_min_consensus_vals)? - .expect("There must be always be at least 1 consensus validator"); - - let offset_epoch = current_epoch + offset; - - // Insert the min consensus validator into the below-capacity - // set - insert_validator_into_set( - &below_capacity_set.at(&min_consensus_amount.into()), - storage, - &offset_epoch, - &removed_min_consensus, - )?; - validator_state_handle(&removed_min_consensus).set( - storage, - ValidatorState::BelowCapacity, - current_epoch, - offset, - )?; - - // Insert the current validator into the consensus set - insert_validator_into_set( - &consensus_set.at(&tokens_post), + // Update the validator and total deltas at the pipeline offset + update_validator_deltas( storage, - &offset_epoch, + ¶ms, validator, + change_after_slashing, + current_epoch, + None, )?; - validator_state_handle(validator).set( + update_total_deltas( storage, - ValidatorState::Consensus, + ¶ms, + change_after_slashing, current_epoch, - offset, + None, )?; - Ok(()) -} - -/// Copy the consensus and below-capacity validator sets and positions into a -/// future epoch. Also copies the epoched set of all known validators in the -/// network. -pub fn copy_validator_sets_and_positions( - storage: &mut S, - params: &PosParams, - current_epoch: Epoch, - target_epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let prev_epoch = target_epoch.prev(); - - let consensus_validator_set = consensus_validator_set_handle(); - let below_capacity_validator_set = below_capacity_validator_set_handle(); - - let (consensus, below_capacity) = ( - consensus_validator_set.at(&prev_epoch), - below_capacity_validator_set.at(&prev_epoch), - ); - debug_assert!(!consensus.is_empty(storage)?); - - // Need to copy into memory here to avoid borrowing a ref - // simultaneously as immutable and mutable - let mut consensus_in_mem: HashMap<(token::Amount, Position), Address> = - HashMap::new(); - let mut below_cap_in_mem: HashMap< - (ReverseOrdTokenAmount, Position), - Address, - > = HashMap::new(); - - for val in consensus.iter(storage)? { - let ( - NestedSubKey::Data { - key: stake, - nested_sub_key: SubKey::Data(position), - }, - address, - ) = val?; - consensus_in_mem.insert((stake, position), address); - } - for val in below_capacity.iter(storage)? { - let ( - NestedSubKey::Data { - key: stake, - nested_sub_key: SubKey::Data(position), - }, - address, - ) = val?; - below_cap_in_mem.insert((stake, position), address); - } - for ((val_stake, val_position), val_address) in consensus_in_mem.into_iter() - { - consensus_validator_set - .at(&target_epoch) - .at(&val_stake) - .insert(storage, val_position, val_address)?; + if tracing::level_enabled!(tracing::Level::DEBUG) { + let bonds = find_bonds(storage, source, validator)?; + tracing::debug!("\nBonds after decrementing: {bonds:#?}"); } - for ((val_stake, val_position), val_address) in below_cap_in_mem.into_iter() + // Invariant: in the affected epochs, the delta of bonds must be >= delta of + // redelegated bonds deltas sum + #[cfg(debug_assertions)] { - below_capacity_validator_set - .at(&target_epoch) - .at(&val_stake) - .insert(storage, val_position, val_address)?; - } - // Purge consensus and below-capacity validator sets - consensus_validator_set.update_data(storage, params, current_epoch)?; - below_capacity_validator_set.update_data(storage, params, current_epoch)?; - - // Copy validator positions - let mut positions = HashMap::::default(); - let validator_set_positions_handle = validator_set_positions_handle(); - let positions_handle = validator_set_positions_handle.at(&prev_epoch); - - for result in positions_handle.iter(storage)? { - let (validator, position) = result?; - positions.insert(validator, position); - } - - let new_positions_handle = validator_set_positions_handle.at(&target_epoch); - for (validator, position) in positions { - let prev = new_positions_handle.insert(storage, validator, position)?; - debug_assert!(prev.is_none()); - } - validator_set_positions_handle.set_last_update(storage, current_epoch)?; - - // Purge old epochs of validator positions - validator_set_positions_handle.update_data( - storage, - params, - current_epoch, - )?; - - // Copy set of all validator addresses - let mut all_validators = HashSet::
::default(); - let validator_addresses_handle = validator_addresses_handle(); - let all_validators_handle = validator_addresses_handle.at(&prev_epoch); - for result in all_validators_handle.iter(storage)? { - let validator = result?; - all_validators.insert(validator); - } - let new_all_validators_handle = - validator_addresses_handle.at(&target_epoch); - for validator in all_validators { - let was_in = new_all_validators_handle.insert(storage, validator)?; - debug_assert!(!was_in); - } - - // Purge old epochs of all validator addresses - validator_addresses_handle.update_data(storage, params, current_epoch)?; - - Ok(()) -} - -/// Compute total validator stake for the current epoch -fn compute_total_consensus_stake( - storage: &S, - epoch: Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - consensus_validator_set_handle() - .at(&epoch) - .iter(storage)? - .fold(Ok(token::Amount::zero()), |acc, entry| { - let acc = acc?; - let ( - NestedSubKey::Data { - key: amount, - nested_sub_key: _, - }, - _validator, - ) = entry?; - Ok(acc.checked_add(amount).expect( - "Total consensus stake computation should not overflow.", - )) - }) -} - -/// Compute and then store the total consensus stake -pub fn compute_and_store_total_consensus_stake( - storage: &mut S, - epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let total = compute_total_consensus_stake(storage, epoch)?; - tracing::debug!( - "Total consensus stake for epoch {}: {}", - epoch, - total.to_string_native() - ); - total_consensus_stake_key_handle().set(storage, total, epoch, 0) -} - -/// Read the position of the validator in the subset of validators that have the -/// same bonded stake. This information is held in its own epoched structure in -/// addition to being inside the validator sets. -fn read_validator_set_position( - storage: &S, - validator: &Address, - epoch: Epoch, - _params: &PosParams, -) -> storage_api::Result> -where - S: StorageRead, -{ - let handle = validator_set_positions_handle(); - handle.get_data_handler().at(&epoch).get(storage, validator) -} - -/// Find the first (lowest) position in a validator set if it is not empty -fn find_first_position( - handle: &ValidatorPositionAddresses, - storage: &S, -) -> storage_api::Result> -where - S: StorageRead, -{ - let lowest_position = handle - .iter(storage)? - .next() - .transpose()? - .map(|(position, _addr)| position); - Ok(lowest_position) -} - -/// Find the last (greatest) position in a validator set if it is not empty -fn find_last_position( - handle: &ValidatorPositionAddresses, - storage: &S, -) -> storage_api::Result> -where - S: StorageRead, -{ - let position = handle - .iter(storage)? - .last() - .transpose()? - .map(|(position, _addr)| position); - Ok(position) -} - -/// Find next position in a validator set or 0 if empty -fn find_next_position( - handle: &ValidatorPositionAddresses, - storage: &S, -) -> storage_api::Result -where - S: StorageRead, -{ - let position_iter = handle.iter(storage)?; - let next = position_iter - .last() - .transpose()? - .map(|(position, _address)| position.next()) - .unwrap_or_default(); - Ok(next) -} - -fn get_min_consensus_validator_amount( - handle: &ConsensusValidatorSet, - storage: &S, -) -> storage_api::Result -where - S: StorageRead, -{ - Ok(handle - .iter(storage)? - .next() - .transpose()? - .map(|(subkey, _address)| match subkey { - NestedSubKey::Data { - key, - nested_sub_key: _, - } => key, - }) - .unwrap_or_default()) -} - -/// Returns `Ok(None)` when the below capacity set is empty. -fn get_max_below_capacity_validator_amount( - handle: &BelowCapacityValidatorSet, - storage: &S, -) -> storage_api::Result> -where - S: StorageRead, -{ - Ok(handle - .iter(storage)? - .next() - .transpose()? - .map(|(subkey, _address)| match subkey { - NestedSubKey::Data { - key, - nested_sub_key: _, - } => token::Amount::from(key), - })) -} - -fn insert_validator_into_set( - handle: &ValidatorPositionAddresses, - storage: &mut S, - epoch: &Epoch, - address: &Address, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let next_position = find_next_position(handle, storage)?; - tracing::debug!( - "Inserting validator {} into position {:?} at epoch {}", - address.clone(), - next_position.clone(), - epoch.clone() - ); - handle.insert(storage, next_position, address.clone())?; - validator_set_positions_handle().at(epoch).insert( - storage, - address.clone(), - next_position, - )?; - Ok(()) -} - -/// Used below in `fn unbond_tokens` to update the bond and unbond amounts -#[derive(Eq, Hash, PartialEq)] -struct BondAndUnbondUpdates { - bond_start: Epoch, - new_bond_value: token::Change, - unbond_value: token::Change, -} - -/// Temp: In quint this is from `ResultUnbondTx` field `resultSlashing: {sum: -/// int, epochMap: Epoch -> int}` -#[derive(Debug, Default)] -pub struct ResultSlashing { - /// The token amount unbonded from the validator stake after accounting for - /// slashes - pub sum: token::Amount, - /// Map from bond start epoch to token amount after slashing - pub epoch_map: BTreeMap, -} - -/// Unbond tokens that are bonded between a validator and a source (self or -/// delegator). -/// -/// This fn is also called during redelegation for a source validator, in -/// which case the `is_redelegation` param must be true. -pub fn unbond_tokens( - storage: &mut S, - source: Option<&Address>, - validator: &Address, - amount: token::Amount, - current_epoch: Epoch, - is_redelegation: bool, -) -> storage_api::Result -where - S: StorageRead + StorageWrite, -{ - if amount.is_zero() { - return Ok(ResultSlashing::default()); - } - - let params = read_pos_params(storage)?; - let pipeline_epoch = current_epoch + params.pipeline_len; - let withdrawable_epoch = current_epoch + params.withdrawable_epoch_offset(); - tracing::debug!( - "Unbonding token amount {} at epoch {}, withdrawable at epoch {}", - amount.to_string_native(), - current_epoch, - withdrawable_epoch - ); - - // Make sure source is not some other validator - if let Some(source) = source { - if source != validator && is_validator(storage, source)? { - return Err( - BondError::SourceMustNotBeAValidator(source.clone()).into() - ); - } - } - // Make sure the target is actually a validator - if !is_validator(storage, validator)? { - return Err(BondError::NotAValidator(validator.clone()).into()); - } - // Make sure the validator is not currently frozen - if is_validator_frozen(storage, validator, current_epoch, ¶ms)? { - return Err(UnbondError::ValidatorIsFrozen(validator.clone()).into()); - } - - let source = source.unwrap_or(validator); - let bonds_handle = bond_handle(source, validator); - - // Make sure there are enough tokens left in the bond at the pipeline offset - let remaining_at_pipeline = bonds_handle - .get_sum(storage, pipeline_epoch, ¶ms)? - .unwrap_or_default(); - if amount > remaining_at_pipeline { - return Err(UnbondError::UnbondAmountGreaterThanBond( - amount.to_string_native(), - remaining_at_pipeline.to_string_native(), - ) - .into()); - } - - if tracing::level_enabled!(tracing::Level::DEBUG) { - let bonds = find_bonds(storage, source, validator)?; - tracing::debug!("\nBonds before decrementing: {bonds:#?}"); - } - - let unbonds = unbond_handle(source, validator); - - let redelegated_bonds = - delegator_redelegated_bonds_handle(source).at(validator); - - #[cfg(debug_assertions)] - let redel_bonds_pre = redelegated_bonds.collect_map(storage)?; - - // `resultUnbonding` - // Find the bonds to fully unbond (remove) and one to partially unbond, if - // necessary - let bonds_to_unbond = find_bonds_to_remove( - storage, - &bonds_handle.get_data_handler(), - amount, - )?; - - // `modifiedRedelegation` - // A bond may have both redelegated and non-redelegated tokens in it. If - // this is the case, compute the modified state of the redelegation. - let modified_redelegation = match bonds_to_unbond.new_entry { - Some((bond_epoch, new_bond_amount)) => { - if redelegated_bonds.contains(storage, &bond_epoch)? { - let cur_bond_amount = bonds_handle - .get_delta_val(storage, bond_epoch)? - .unwrap_or_default(); - compute_modified_redelegation( - storage, - &redelegated_bonds.at(&bond_epoch), - bond_epoch, - cur_bond_amount - new_bond_amount, - )? - } else { - ModifiedRedelegation::default() - } - } - None => ModifiedRedelegation::default(), - }; - - // Compute the new unbonds eagerly - // `keysUnbonds` - // Get a set of epochs from which we're unbonding (fully and partially). - let bond_epochs_to_unbond = - if let Some((start_epoch, _)) = bonds_to_unbond.new_entry { - let mut to_remove = bonds_to_unbond.epochs.clone(); - to_remove.insert(start_epoch); - to_remove - } else { - bonds_to_unbond.epochs.clone() - }; - - // `newUnbonds` - // For each epoch we're unbonding, find the amount that's being unbonded. - // For full unbonds, this is the current bond value. For partial unbonds - // it is a difference between the current and new bond amount. - let new_unbonds_map = bond_epochs_to_unbond - .into_iter() - .map(|epoch| { - let cur_bond_value = bonds_handle - .get_delta_val(storage, epoch) - .unwrap() - .unwrap_or_default(); - let value = if let Some((start_epoch, new_bond_amount)) = - bonds_to_unbond.new_entry - { - if start_epoch == epoch { - cur_bond_value - new_bond_amount - } else { - cur_bond_value - } - } else { - cur_bond_value - }; - (epoch, value) - }) - .collect::>(); - - // `updatedBonded` - // Remove bonds for all the full unbonds. - for epoch in &bonds_to_unbond.epochs { - bonds_handle.get_data_handler().remove(storage, epoch)?; - } - // Replace bond amount for partial unbond, if any. - if let Some((bond_epoch, new_bond_amount)) = bonds_to_unbond.new_entry { - bonds_handle.set(storage, new_bond_amount, bond_epoch, 0)?; - } - - // `updatedUnbonded` - // Update the unbonds in storage using the eager map computed above - if !is_redelegation { - for (start_epoch, &unbond_amount) in new_unbonds_map.iter() { - unbonds.at(start_epoch).update( - storage, - withdrawable_epoch, - |cur_val| cur_val.unwrap_or_default() + unbond_amount, - )?; - } - } - - // `newRedelegatedUnbonds` - // This is what the delegator's redelegated unbonds would look like if this - // was the only unbond in the PoS system. We need to add these redelegated - // unbonds to the existing redelegated unbonds - let new_redelegated_unbonds = compute_new_redelegated_unbonds( - storage, - &redelegated_bonds, - &bonds_to_unbond.epochs, - &modified_redelegation, - )?; - - // `updatedRedelegatedBonded` - // NOTE: for now put this here after redelegated unbonds calc bc that one - // uses the pre-modified redelegated bonds from storage! - // First remove redelegation entries in epochs with full unbonds. - for epoch_to_remove in &bonds_to_unbond.epochs { - redelegated_bonds.remove_all(storage, epoch_to_remove)?; - } - if let Some(epoch) = modified_redelegation.epoch { - tracing::debug!("\nIs modified redelegation"); - if modified_redelegation.validators_to_remove.is_empty() { - redelegated_bonds.remove_all(storage, &epoch)?; - } else { - // Then update the redelegated bonds at this epoch - let rbonds = redelegated_bonds.at(&epoch); - update_redelegated_bonds(storage, &rbonds, &modified_redelegation)?; - } - } - - if !is_redelegation { - // `val updatedRedelegatedUnbonded` with updates applied below - // Delegator's redelegated unbonds to this validator. - let delegator_redelegated_unbonded = - delegator_redelegated_unbonds_handle(source).at(validator); - - // Quint `def updateRedelegatedUnbonded` with `val - // updatedRedelegatedUnbonded` together with last statement - // in `updatedDelegator.with("redelegatedUnbonded", ...` updated - // directly in storage - for (start, unbonds) in &new_redelegated_unbonds { - let this_redelegated_unbonded = delegator_redelegated_unbonded - .at(start) - .at(&withdrawable_epoch); - - // Update the delegator's redelegated unbonds with the change - for (src_validator, redelegated_unbonds) in unbonds { - let redelegated_unbonded = - this_redelegated_unbonded.at(src_validator); - for (&redelegation_epoch, &change) in redelegated_unbonds { - redelegated_unbonded.update( - storage, - redelegation_epoch, - |current| current.unwrap_or_default() + change, - )?; - } - } - } - } - // all `val updatedDelegator` changes are applied at this point - - // `val updatedTotalBonded` and `val updatedTotalUnbonded` with updates - // Update the validator's total bonded and unbonded amounts - let total_bonded = total_bonded_handle(validator).get_data_handler(); - let total_unbonded = total_unbonded_handle(validator).at(&pipeline_epoch); - for (&start_epoch, &amount) in &new_unbonds_map { - total_bonded.update(storage, start_epoch, |current| { - current.unwrap_or_default() - amount - })?; - total_unbonded.update(storage, start_epoch, |current| { - current.unwrap_or_default() + amount - })?; - } - - let total_redelegated_bonded = - validator_total_redelegated_bonded_handle(validator); - let total_redelegated_unbonded = - validator_total_redelegated_unbonded_handle(validator); - for (redelegation_start_epoch, unbonds) in &new_redelegated_unbonds { - for (src_validator, changes) in unbonds { - for (bond_start_epoch, change) in changes { - // total redelegated bonded - let bonded_sub_map = total_redelegated_bonded - .at(redelegation_start_epoch) - .at(src_validator); - bonded_sub_map.update( - storage, - *bond_start_epoch, - |current| current.unwrap_or_default() - *change, - )?; - - // total redelegated unbonded - let unbonded_sub_map = total_redelegated_unbonded - .at(&pipeline_epoch) - .at(redelegation_start_epoch) - .at(src_validator); - unbonded_sub_map.update( - storage, - *bond_start_epoch, - |current| current.unwrap_or_default() + *change, - )?; - } - } - } - - let slashes = find_validator_slashes(storage, validator)?; - // `val resultSlashing` - let result_slashing = compute_amount_after_slashing_unbond( - storage, - ¶ms, - &new_unbonds_map, - &new_redelegated_unbonds, - slashes, - )?; - #[cfg(debug_assertions)] - let redel_bonds_post = redelegated_bonds.collect_map(storage)?; - debug_assert!( - result_slashing.sum <= amount, - "Amount after slashing ({}) must be <= requested amount to unbond \ - ({}).", - result_slashing.sum.to_string_native(), - amount.to_string_native(), - ); - - let change_after_slashing = -result_slashing.sum.change(); - // Update the validator set at the pipeline offset. Since unbonding from a - // jailed validator who is no longer frozen is allowed, only update the - // validator set if the validator is not jailed - let is_jailed_or_inactive_at_pipeline = matches!( - validator_state_handle(validator).get( - storage, - pipeline_epoch, - ¶ms - )?, - Some(ValidatorState::Jailed) | Some(ValidatorState::Inactive) - ); - if !is_jailed_or_inactive_at_pipeline { - update_validator_set( - storage, - ¶ms, - validator, - change_after_slashing, - current_epoch, - None, - )?; - } - - // Update the validator and total deltas at the pipeline offset - update_validator_deltas( - storage, - ¶ms, - validator, - change_after_slashing, - current_epoch, - None, - )?; - update_total_deltas( - storage, - ¶ms, - change_after_slashing, - current_epoch, - None, - )?; - - if tracing::level_enabled!(tracing::Level::DEBUG) { - let bonds = find_bonds(storage, source, validator)?; - tracing::debug!("\nBonds after decrementing: {bonds:#?}"); - } - - // Invariant: in the affected epochs, the delta of bonds must be >= delta of - // redelegated bonds deltas sum - #[cfg(debug_assertions)] - { - let mut epochs = bonds_to_unbond.epochs.clone(); - if let Some((epoch, _)) = bonds_to_unbond.new_entry { - epochs.insert(epoch); - } - for epoch in epochs { - let cur_bond = bonds_handle - .get_delta_val(storage, epoch)? - .unwrap_or_default(); - let redelegated_deltas = redelegated_bonds - .at(&epoch) - // Sum of redelegations from any src validator - .collect_map(storage)? - .into_values() - .map(|redeleg| redeleg.into_values().sum()) - .sum(); - debug_assert!( - cur_bond >= redelegated_deltas, - "After unbonding, in epoch {epoch} the bond amount {} must be \ - >= redelegated deltas at pipeline {}.\n\nredelegated_bonds \ - pre: {redel_bonds_pre:#?}\nredelegated_bonds post: \ - {redel_bonds_post:#?},\nmodified_redelegation: \ - {modified_redelegation:#?},\nbonds_to_unbond: \ - {bonds_to_unbond:#?}", - cur_bond.to_string_native(), - redelegated_deltas.to_string_native() - ); - } - } - - // Tally rewards (only call if this is not the first epoch) - if current_epoch > Epoch::default() { - let mut rewards = token::Amount::zero(); - - let last_claim_epoch = - get_last_reward_claim_epoch(storage, source, validator)? - .unwrap_or_default(); - let rewards_products = validator_rewards_products_handle(validator); - - for (start_epoch, slashed_amount) in &result_slashing.epoch_map { - // Stop collecting rewards at the moment the unbond is initiated - // (right now) - for ep in - Epoch::iter_bounds_inclusive(*start_epoch, current_epoch.prev()) - { - // Consider the last epoch when rewards were claimed - if ep < last_claim_epoch { - continue; - } - let rp = - rewards_products.get(storage, &ep)?.unwrap_or_default(); - rewards += rp * (*slashed_amount); - } - } - - // Update the rewards from the current unbonds first - add_rewards_to_counter(storage, source, validator, rewards)?; - } - - Ok(result_slashing) -} - -#[derive(Debug, Default, Eq, PartialEq)] -struct FoldRedelegatedBondsResult { - total_redelegated: token::Amount, - total_after_slashing: token::Amount, -} - -/// Iterates over a `redelegated_unbonds` and computes the both the sum of all -/// redelegated tokens and how much is left after applying all relevant slashes. -// `def foldAndSlashRedelegatedBondsMap` -fn fold_and_slash_redelegated_bonds( - storage: &S, - params: &OwnedPosParams, - redelegated_unbonds: &EagerRedelegatedBondsMap, - start_epoch: Epoch, - list_slashes: &[Slash], - slash_epoch_filter: impl Fn(Epoch) -> bool, -) -> FoldRedelegatedBondsResult -where - S: StorageRead, -{ - let mut result = FoldRedelegatedBondsResult::default(); - for (src_validator, bonds_map) in redelegated_unbonds { - for (bond_start, &change) in bonds_map { - // Merge the two lists of slashes - let mut merged: Vec = - // Look-up slashes for this validator ... - validator_slashes_handle(src_validator) - .iter(storage) - .unwrap() - .map(Result::unwrap) - .filter(|slash| { - params.in_redelegation_slashing_window( - slash.epoch, - params.redelegation_start_epoch_from_end( - start_epoch, - ), - start_epoch, - ) && *bond_start <= slash.epoch - && slash_epoch_filter(slash.epoch) - }) - // ... and add `list_slashes` - .chain(list_slashes.iter().cloned()) - .collect(); - - // Sort slashes by epoch - merged.sort_by(|s1, s2| s1.epoch.partial_cmp(&s2.epoch).unwrap()); - - result.total_redelegated += change; - result.total_after_slashing += - apply_list_slashes(params, &merged, change); - } - } - result -} - -/// Computes how much remains from an amount of tokens after applying a list of -/// slashes. -/// -/// - `slashes` - a list of slashes ordered by misbehaving epoch. -/// - `amount` - the amount of slashable tokens. -// `def applyListSlashes` -fn apply_list_slashes( - params: &OwnedPosParams, - slashes: &[Slash], - amount: token::Amount, -) -> token::Amount { - let mut final_amount = amount; - let mut computed_slashes = BTreeMap::::new(); - for slash in slashes { - let slashed_amount = - compute_slashable_amount(params, slash, amount, &computed_slashes); - final_amount = - final_amount.checked_sub(slashed_amount).unwrap_or_default(); - computed_slashes.insert(slash.epoch, slashed_amount); - } - final_amount -} - -/// Computes how much is left from a bond or unbond after applying a slash given -/// that a set of slashes may have been previously applied. -// `def computeSlashableAmount` -fn compute_slashable_amount( - params: &OwnedPosParams, - slash: &Slash, - amount: token::Amount, - computed_slashes: &BTreeMap, -) -> token::Amount { - let updated_amount = computed_slashes - .iter() - .filter(|(&epoch, _)| { - // Keep slashes that have been applied and processed before the - // current slash occurred. We use `<=` because slashes processed at - // `slash.epoch` (at the start of the epoch) are also processed - // before this slash occurred. - epoch + params.slash_processing_epoch_offset() <= slash.epoch - }) - .fold(amount, |acc, (_, &amnt)| { - acc.checked_sub(amnt).unwrap_or_default() - }); - updated_amount.mul_ceil(slash.rate) -} - -/// Epochs for full and partial unbonds. -#[derive(Debug, Default)] -struct BondsForRemovalRes { - /// Full unbond epochs - pub epochs: BTreeSet, - /// Partial unbond epoch associated with the new bond amount - pub new_entry: Option<(Epoch, token::Amount)>, -} - -/// In decreasing epoch order, decrement the non-zero bond amount entries until -/// the full `amount` has been removed. Returns a `BondsForRemovalRes` object -/// that contains the epochs for which the full bond amount is removed and -/// additionally information for the one epoch whose bond amount is partially -/// removed, if any. -fn find_bonds_to_remove( - storage: &S, - bonds_handle: &LazyMap, - amount: token::Amount, -) -> storage_api::Result -where - S: StorageRead, -{ - #[allow(clippy::needless_collect)] - let bonds: Vec> = bonds_handle.iter(storage)?.collect(); - - let mut bonds_for_removal = BondsForRemovalRes::default(); - let mut remaining = amount; - - for bond in bonds.into_iter().rev() { - let (bond_epoch, bond_amount) = bond?; - let to_unbond = cmp::min(bond_amount, remaining); - if to_unbond == bond_amount { - bonds_for_removal.epochs.insert(bond_epoch); - } else { - bonds_for_removal.new_entry = - Some((bond_epoch, bond_amount - to_unbond)); - } - remaining -= to_unbond; - if remaining.is_zero() { - break; - } - } - Ok(bonds_for_removal) -} - -#[derive(Debug, Default, PartialEq, Eq)] -struct ModifiedRedelegation { - epoch: Option, - validators_to_remove: BTreeSet
, - validator_to_modify: Option
, - epochs_to_remove: BTreeSet, - epoch_to_modify: Option, - new_amount: Option, -} - -/// Used in `fn unbond_tokens` to compute the modified state of a redelegation -/// if redelegated tokens are being unbonded. -fn compute_modified_redelegation( - storage: &S, - redelegated_bonds: &RedelegatedTokens, - start_epoch: Epoch, - amount_to_unbond: token::Amount, -) -> storage_api::Result -where - S: StorageRead, -{ - let mut modified_redelegation = ModifiedRedelegation::default(); - - let mut src_validators = BTreeSet::
::new(); - let mut total_redelegated = token::Amount::zero(); - for rb in redelegated_bonds.iter(storage)? { - let ( - NestedSubKey::Data { - key: src_validator, - nested_sub_key: _, - }, - amount, - ) = rb?; - total_redelegated += amount; - src_validators.insert(src_validator); - } - - modified_redelegation.epoch = Some(start_epoch); - - // If the total amount of redelegated bonds is less than the target amount, - // then all redelegated bonds must be unbonded. - if total_redelegated <= amount_to_unbond { - return Ok(modified_redelegation); - } - - let mut remaining = amount_to_unbond; - for src_validator in src_validators.into_iter() { - if remaining.is_zero() { - break; - } - let rbonds = redelegated_bonds.at(&src_validator); - let total_src_val_amount = rbonds - .iter(storage)? - .map(|res| { - let (_, amount) = res?; - Ok(amount) - }) - .sum::>()?; - - // TODO: move this into the `if total_redelegated <= remaining` branch - // below, then we don't have to remove it in `fn - // update_redelegated_bonds` when `validator_to_modify` is Some (and - // avoid `modified_redelegation.validators_to_remove.clone()`). - // It affects assumption 2. in `fn compute_new_redelegated_unbonds`, but - // that looks trivial to change. - // NOTE: not sure if this TODO is still relevant... - modified_redelegation - .validators_to_remove - .insert(src_validator.clone()); - if total_src_val_amount <= remaining { - remaining -= total_src_val_amount; - } else { - let bonds_to_remove = - find_bonds_to_remove(storage, &rbonds, remaining)?; - - remaining = token::Amount::zero(); - - // NOTE: When there are multiple `src_validators` from which we're - // unbonding, `validator_to_modify` cannot get overridden, because - // only one of them can be a partial unbond (`new_entry` - // is partial unbond) - if let Some((bond_epoch, new_bond_amount)) = - bonds_to_remove.new_entry - { - modified_redelegation.validator_to_modify = Some(src_validator); - modified_redelegation.epochs_to_remove = { - let mut epochs = bonds_to_remove.epochs; - // TODO: remove this insertion then we don't have to remove - // it again in `fn update_redelegated_bonds` - // when `epoch_to_modify` is Some (and avoid - // `modified_redelegation.epochs_to_remove.clone`) - // It affects assumption 3. in `fn - // compute_new_redelegated_unbonds`, but that also looks - // trivial to change. - epochs.insert(bond_epoch); - epochs - }; - modified_redelegation.epoch_to_modify = Some(bond_epoch); - modified_redelegation.new_amount = Some(new_bond_amount); - } else { - modified_redelegation.validator_to_modify = Some(src_validator); - modified_redelegation.epochs_to_remove = bonds_to_remove.epochs; - } - } - } - Ok(modified_redelegation) -} - -fn update_redelegated_bonds( - storage: &mut S, - redelegated_bonds: &RedelegatedTokens, - modified_redelegation: &ModifiedRedelegation, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - if let Some(val_to_modify) = &modified_redelegation.validator_to_modify { - let mut updated_vals_to_remove = - modified_redelegation.validators_to_remove.clone(); - updated_vals_to_remove.remove(val_to_modify); - - // Remove the updated_vals_to_remove keys from the - // redelegated_bonds map - for val in &updated_vals_to_remove { - redelegated_bonds.remove_all(storage, val)?; - } - - if let Some(epoch_to_modify) = modified_redelegation.epoch_to_modify { - let mut updated_epochs_to_remove = - modified_redelegation.epochs_to_remove.clone(); - updated_epochs_to_remove.remove(&epoch_to_modify); - let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); - for epoch in updated_epochs_to_remove { - val_bonds_to_modify.remove(storage, &epoch)?; - } - val_bonds_to_modify.insert( - storage, - epoch_to_modify, - modified_redelegation.new_amount.unwrap(), - )?; - } else { - // Then remove to epochs_to_remove from the redelegated bonds of the - // val_to_modify - let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); - for epoch in &modified_redelegation.epochs_to_remove { - val_bonds_to_modify.remove(storage, epoch)?; - } - } - } else { - // Remove all validators in modified_redelegation.validators_to_remove - // from redelegated_bonds - for val in &modified_redelegation.validators_to_remove { - redelegated_bonds.remove_all(storage, val)?; - } - } - Ok(()) -} - -/// Temp helper type to match quint model. -/// Result of `compute_new_redelegated_unbonds` that contains a map of -/// redelegated unbonds. -/// The map keys from outside in are: -/// -/// - redelegation end epoch where redeleg stops contributing to src validator -/// - src validator address -/// - src bond start epoch where it started contributing to src validator -type EagerRedelegatedUnbonds = BTreeMap; - -/// Computes a map of redelegated unbonds from a set of redelegated bonds. -/// -/// - `redelegated_bonds` - a map of redelegated bonds from epoch to -/// `RedelegatedTokens`. -/// - `epochs_to_remove` - a set of epochs that indicate the set of epochs -/// unbonded. -/// - `modified` record that represents a redelegated bond that it is only -/// partially unbonded. -/// -/// The function assumes that: -/// -/// 1. `modified.epoch` is not in the `epochs_to_remove` set. -/// 2. `modified.validator_to_modify` is in `modified.vals_to_remove`. -/// 3. `modified.epoch_to_modify` is in in `modified.epochs_to_remove`. -// `def computeNewRedelegatedUnbonds` from Quint -fn compute_new_redelegated_unbonds( - storage: &S, - redelegated_bonds: &RedelegatedBondsOrUnbonds, - epochs_to_remove: &BTreeSet, - modified: &ModifiedRedelegation, -) -> storage_api::Result -where - S: StorageRead + StorageWrite, -{ - let unbonded_epochs = if let Some(epoch) = modified.epoch { - debug_assert!( - !epochs_to_remove.contains(&epoch), - "1. assumption in `fn compute_new_redelegated_unbonds` doesn't \ - hold" - ); - let mut epochs = epochs_to_remove.clone(); - epochs.insert(epoch); - epochs - .iter() - .cloned() - .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) - .collect::>() - } else { - epochs_to_remove - .iter() - .cloned() - .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) - .collect::>() - }; - debug_assert!( - modified - .validator_to_modify - .as_ref() - .map(|validator| modified.validators_to_remove.contains(validator)) - .unwrap_or(true), - "2. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" - ); - debug_assert!( - modified - .epoch_to_modify - .as_ref() - .map(|epoch| modified.epochs_to_remove.contains(epoch)) - .unwrap_or(true), - "3. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" - ); - - // quint `newRedelegatedUnbonds` returned from - // `computeNewRedelegatedUnbonds` - let new_redelegated_unbonds: EagerRedelegatedUnbonds = unbonded_epochs - .into_iter() - .map(|start| { - let mut rbonds = EagerRedelegatedBondsMap::default(); - if modified - .epoch - .map(|redelegation_epoch| start != redelegation_epoch) - .unwrap_or(true) - || modified.validators_to_remove.is_empty() - { - for res in redelegated_bonds.at(&start).iter(storage).unwrap() { - let ( - NestedSubKey::Data { - key: validator, - nested_sub_key: SubKey::Data(epoch), - }, - amount, - ) = res.unwrap(); - rbonds - .entry(validator.clone()) - .or_default() - .insert(epoch, amount); - } - (start, rbonds) - } else { - for src_validator in &modified.validators_to_remove { - if modified - .validator_to_modify - .as_ref() - .map(|validator| src_validator != validator) - .unwrap_or(true) - { - let raw_bonds = - redelegated_bonds.at(&start).at(src_validator); - for res in raw_bonds.iter(storage).unwrap() { - let (bond_epoch, bond_amount) = res.unwrap(); - rbonds - .entry(src_validator.clone()) - .or_default() - .insert(bond_epoch, bond_amount); - } - } else { - for bond_start in &modified.epochs_to_remove { - let cur_redel_bond_amount = redelegated_bonds - .at(&start) - .at(src_validator) - .get(storage, bond_start) - .unwrap() - .unwrap_or_default(); - let raw_bonds = rbonds - .entry(src_validator.clone()) - .or_default(); - if modified - .epoch_to_modify - .as_ref() - .map(|epoch| bond_start != epoch) - .unwrap_or(true) - { - raw_bonds - .insert(*bond_start, cur_redel_bond_amount); - } else { - raw_bonds.insert( - *bond_start, - cur_redel_bond_amount - - modified - .new_amount - // Safe unwrap - it shouldn't - // get to - // this if it's None - .unwrap(), - ); - } - } - } - } - (start, rbonds) - } - }) - .collect(); - - Ok(new_redelegated_unbonds) -} - -/// Compute a token amount after slashing, given the initial amount and a set of -/// slashes. It is assumed that the input `slashes` are those committed while -/// the `amount` was contributing to voting power. -fn get_slashed_amount( - params: &PosParams, - amount: token::Amount, - slashes: &BTreeMap, -) -> storage_api::Result { - let mut updated_amount = amount; - let mut computed_amounts = Vec::::new(); - - for (&infraction_epoch, &slash_rate) in slashes { - let mut computed_to_remove = BTreeSet::>::new(); - for (ix, slashed_amount) in computed_amounts.iter().enumerate() { - // Update amount with slashes that happened more than unbonding_len - // epochs before this current slash - if slashed_amount.epoch + params.slash_processing_epoch_offset() - <= infraction_epoch - { - updated_amount = updated_amount - .checked_sub(slashed_amount.amount) - .unwrap_or_default(); - computed_to_remove.insert(Reverse(ix)); - } - } - // Invariant: `computed_to_remove` must be in reverse ord to avoid - // left-shift of the `computed_amounts` after call to `remove` - // invalidating the rest of the indices. - for item in computed_to_remove { - computed_amounts.remove(item.0); - } - computed_amounts.push(SlashedAmount { - amount: updated_amount.mul_ceil(slash_rate), - epoch: infraction_epoch, - }); - } - - let total_computed_amounts = computed_amounts - .into_iter() - .map(|slashed| slashed.amount) - .sum(); - - let final_amount = updated_amount - .checked_sub(total_computed_amounts) - .unwrap_or_default(); - - Ok(final_amount) -} - -// `def computeAmountAfterSlashingUnbond` -fn compute_amount_after_slashing_unbond( - storage: &S, - params: &OwnedPosParams, - unbonds: &BTreeMap, - redelegated_unbonds: &EagerRedelegatedUnbonds, - slashes: Vec, -) -> storage_api::Result -where - S: StorageRead, -{ - let mut result_slashing = ResultSlashing::default(); - for (&start_epoch, amount) in unbonds { - // `val listSlashes` - let list_slashes: Vec = slashes - .iter() - .filter(|slash| slash.epoch >= start_epoch) - .cloned() - .collect(); - // `val resultFold` - let result_fold = if let Some(redelegated_unbonds) = - redelegated_unbonds.get(&start_epoch) - { - fold_and_slash_redelegated_bonds( - storage, - params, - redelegated_unbonds, - start_epoch, - &list_slashes, - |_| true, - ) - } else { - FoldRedelegatedBondsResult::default() - }; - // `val totalNoRedelegated` - let total_not_redelegated = amount - .checked_sub(result_fold.total_redelegated) - .unwrap_or_default(); - // `val afterNoRedelegated` - let after_not_redelegated = - apply_list_slashes(params, &list_slashes, total_not_redelegated); - // `val amountAfterSlashing` - let amount_after_slashing = - after_not_redelegated + result_fold.total_after_slashing; - // Accumulation step - result_slashing.sum += amount_after_slashing; - result_slashing - .epoch_map - .insert(start_epoch, amount_after_slashing); - } - Ok(result_slashing) -} - -/// Compute from a set of unbonds (both redelegated and not) how much is left -/// after applying all relevant slashes. -// `def computeAmountAfterSlashingWithdraw` -fn compute_amount_after_slashing_withdraw( - storage: &S, - params: &OwnedPosParams, - unbonds_and_redelegated_unbonds: &BTreeMap< - (Epoch, Epoch), - (token::Amount, EagerRedelegatedBondsMap), - >, - slashes: Vec, -) -> storage_api::Result -where - S: StorageRead, -{ - let mut result_slashing = ResultSlashing::default(); - - for ((start_epoch, withdraw_epoch), (amount, redelegated_unbonds)) in - unbonds_and_redelegated_unbonds.iter() - { - // TODO: check if slashes in the same epoch can be - // folded into one effective slash - let end_epoch = *withdraw_epoch - - params.unbonding_len - - params.cubic_slashing_window_length; - // Find slashes that apply to `start_epoch..end_epoch` - let list_slashes = slashes - .iter() - .filter(|slash| { - // Started before the slash occurred - start_epoch <= &slash.epoch - // Ends after the slash - && end_epoch > slash.epoch - }) - .cloned() - .collect::>(); - - // Find the sum and the sum after slashing of the redelegated unbonds - let result_fold = fold_and_slash_redelegated_bonds( - storage, - params, - redelegated_unbonds, - *start_epoch, - &list_slashes, - |_| true, - ); - - // Unbond amount that didn't come from a redelegation - let total_not_redelegated = *amount - result_fold.total_redelegated; - // Find how much remains after slashing non-redelegated amount - let after_not_redelegated = - apply_list_slashes(params, &list_slashes, total_not_redelegated); - - // Add back the unbond and redelegated unbond amount after slashing - let amount_after_slashing = - after_not_redelegated + result_fold.total_after_slashing; - - result_slashing.sum += amount_after_slashing; - result_slashing - .epoch_map - .insert(*start_epoch, amount_after_slashing); - } - - Ok(result_slashing) -} - -/// Arguments to [`become_validator`]. -pub struct BecomeValidator<'a> { - /// Proof-of-stake parameters. - pub params: &'a PosParams, - /// The validator's address. - pub address: &'a Address, - /// The validator's consensus key, used by Tendermint. - pub consensus_key: &'a common::PublicKey, - /// The validator's protocol key. - pub protocol_key: &'a common::PublicKey, - /// The validator's Ethereum bridge cold key. - pub eth_cold_key: &'a common::PublicKey, - /// The validator's Ethereum bridge hot key. - pub eth_hot_key: &'a common::PublicKey, - /// The numeric value of the current epoch. - pub current_epoch: Epoch, - /// Commission rate. - pub commission_rate: Dec, - /// Max commission rate change. - pub max_commission_rate_change: Dec, - /// Validator metadata - pub metadata: ValidatorMetaData, - /// Optional offset to use instead of pipeline offset - pub offset_opt: Option, -} - -/// Initialize data for a new validator. -pub fn become_validator( - storage: &mut S, - args: BecomeValidator<'_>, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let BecomeValidator { - params, - address, - consensus_key, - protocol_key, - eth_cold_key, - eth_hot_key, - current_epoch, - commission_rate, - max_commission_rate_change, - metadata, - offset_opt, - } = args; - let offset = offset_opt.unwrap_or(params.pipeline_len); - - if !address.is_established() { - return Err(storage_api::Error::new_const( - "The given address {address} is not established. Only an \ - established address can become a validator.", - )); - } - - if is_validator(storage, address)? { - return Err(storage_api::Error::new_const( - "The given address is already a validator", - )); - } - - // If the address is not yet a validator, it cannot have self-bonds, but it - // may have delegations. - if has_bonds(storage, address)? { - return Err(storage_api::Error::new_const( - "The given address has delegations and therefore cannot become a \ - validator. Unbond first.", - )); - } - - // This will fail if the key is already being used - try_insert_consensus_key(storage, consensus_key)?; - - let pipeline_epoch = current_epoch + offset; - validator_addresses_handle() - .at(&pipeline_epoch) - .insert(storage, address.clone())?; - - // Non-epoched validator data - write_validator_address_raw_hash(storage, address, consensus_key)?; - write_validator_max_commission_rate_change( - storage, - address, - max_commission_rate_change, - )?; - write_validator_metadata(storage, address, &metadata)?; - - // Epoched validator data - validator_consensus_key_handle(address).set( - storage, - consensus_key.clone(), - current_epoch, - offset, - )?; - validator_protocol_key_handle(address).set( - storage, - protocol_key.clone(), - current_epoch, - offset, - )?; - validator_eth_hot_key_handle(address).set( - storage, - eth_hot_key.clone(), - current_epoch, - offset, - )?; - validator_eth_cold_key_handle(address).set( - storage, - eth_cold_key.clone(), - current_epoch, - offset, - )?; - validator_commission_rate_handle(address).set( - storage, - commission_rate, - current_epoch, - offset, - )?; - validator_deltas_handle(address).set( - storage, - token::Change::zero(), - current_epoch, - offset, - )?; - - // The validator's stake at initialization is 0, so its state is immediately - // below-threshold - validator_state_handle(address).set( - storage, - ValidatorState::BelowThreshold, - current_epoch, - offset, - )?; - - insert_validator_into_validator_set( - storage, - params, - address, - token::Amount::zero(), - current_epoch, - offset, - )?; - - Ok(()) -} - -/// Consensus key change for a validator -pub fn change_consensus_key( - storage: &mut S, - validator: &Address, - consensus_key: &common::PublicKey, - current_epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - tracing::debug!("Changing consensus key for validator {}", validator); - - // Require that the new consensus key is an Ed25519 key - match consensus_key { - common::PublicKey::Ed25519(_) => {} - common::PublicKey::Secp256k1(_) => { - return Err(ConsensusKeyChangeError::MustBeEd25519.into()); - } - } - - // Check for uniqueness of the consensus key - try_insert_consensus_key(storage, consensus_key)?; - - // Set the new consensus key at the pipeline epoch - let params = read_pos_params(storage)?; - validator_consensus_key_handle(validator).set( - storage, - consensus_key.clone(), - current_epoch, - params.pipeline_len, - )?; - - // Write validator's new raw hash - write_validator_address_raw_hash(storage, validator, consensus_key)?; - - Ok(()) -} - -/// Withdraw tokens from those that have been unbonded from proof-of-stake -pub fn withdraw_tokens( - storage: &mut S, - source: Option<&Address>, - validator: &Address, - current_epoch: Epoch, -) -> storage_api::Result -where - S: StorageRead + StorageWrite, -{ - let params = read_pos_params(storage)?; - let source = source.unwrap_or(validator); - - tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); - tracing::debug!("Source {} --> Validator {}", source, validator); - - let unbond_handle: Unbonds = unbond_handle(source, validator); - let redelegated_unbonds = - delegator_redelegated_unbonds_handle(source).at(validator); - - // Check that there are unbonded tokens available for withdrawal - if unbond_handle.is_empty(storage)? { - return Err(WithdrawError::NoUnbondFound(BondId { - source: source.clone(), - validator: validator.clone(), - }) - .into()); - } - - let mut unbonds_and_redelegated_unbonds: BTreeMap< - (Epoch, Epoch), - (token::Amount, EagerRedelegatedBondsMap), - > = BTreeMap::new(); - - for unbond in unbond_handle.iter(storage)? { - let ( - NestedSubKey::Data { - key: start_epoch, - nested_sub_key: SubKey::Data(withdraw_epoch), - }, - amount, - ) = unbond?; - - // Logging - tracing::debug!( - "Unbond delta ({start_epoch}..{withdraw_epoch}), amount {}", - amount.to_string_native() - ); - // Consider only unbonds that are eligible to be withdrawn - if withdraw_epoch > current_epoch { - tracing::debug!( - "Not yet withdrawable until epoch {withdraw_epoch}" - ); - continue; - } - - let mut eager_redelegated_unbonds = EagerRedelegatedBondsMap::default(); - let matching_redelegated_unbonds = - redelegated_unbonds.at(&start_epoch).at(&withdraw_epoch); - for ub in matching_redelegated_unbonds.iter(storage)? { - let ( - NestedSubKey::Data { - key: address, - nested_sub_key: SubKey::Data(epoch), - }, - amount, - ) = ub?; - eager_redelegated_unbonds - .entry(address) - .or_default() - .entry(epoch) - .or_insert(amount); - } - - unbonds_and_redelegated_unbonds.insert( - (start_epoch, withdraw_epoch), - (amount, eager_redelegated_unbonds), - ); - } - - let slashes = find_validator_slashes(storage, validator)?; - - // `val resultSlashing` - let result_slashing = compute_amount_after_slashing_withdraw( - storage, - ¶ms, - &unbonds_and_redelegated_unbonds, - slashes, - )?; - - let withdrawable_amount = result_slashing.sum; - tracing::debug!( - "Withdrawing total {}", - withdrawable_amount.to_string_native() - ); - - // `updateDelegator` with `unbonded` and `redelegeatedUnbonded` - for ((start_epoch, withdraw_epoch), _unbond_and_redelegations) in - unbonds_and_redelegated_unbonds - { - tracing::debug!("Remove ({start_epoch}..{withdraw_epoch}) from unbond"); - unbond_handle - .at(&start_epoch) - .remove(storage, &withdraw_epoch)?; - redelegated_unbonds - .at(&start_epoch) - .remove_all(storage, &withdraw_epoch)?; - - if unbond_handle.at(&start_epoch).is_empty(storage)? { - unbond_handle.remove_all(storage, &start_epoch)?; - } - if redelegated_unbonds.at(&start_epoch).is_empty(storage)? { - redelegated_unbonds.remove_all(storage, &start_epoch)?; - } - } - - // Transfer the withdrawable tokens from the PoS address back to the source - let staking_token = staking_token_address(storage); - token::transfer( - storage, - &staking_token, - &ADDRESS, - source, - withdrawable_amount, - )?; - - // TODO: Transfer the slashed tokens from the PoS address to the Slash Pool - // address - // token::transfer( - // storage, - // &staking_token, - // &ADDRESS, - // &SLASH_POOL_ADDRESS, - // total_slashed, - // )?; - - Ok(withdrawable_amount) -} - -/// Change the commission rate of a validator -pub fn change_validator_commission_rate( - storage: &mut S, - validator: &Address, - new_rate: Dec, - current_epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - if new_rate.is_negative() { - return Err(CommissionRateChangeError::NegativeRate( - new_rate, - validator.clone(), - ) - .into()); - } - - if new_rate > Dec::one() { - return Err(CommissionRateChangeError::LargerThanOne( - new_rate, - validator.clone(), - ) - .into()); - } - - let max_change = - read_validator_max_commission_rate_change(storage, validator)?; - if max_change.is_none() { - return Err(CommissionRateChangeError::NoMaxSetInStorage( - validator.clone(), - ) - .into()); - } - - let params = read_pos_params(storage)?; - let commission_handle = validator_commission_rate_handle(validator); - let pipeline_epoch = current_epoch + params.pipeline_len; - - let rate_at_pipeline = commission_handle - .get(storage, pipeline_epoch, ¶ms)? - .expect("Could not find a rate in given epoch"); - if new_rate == rate_at_pipeline { - return Ok(()); - } - let rate_before_pipeline = commission_handle - .get(storage, pipeline_epoch.prev(), ¶ms)? - .expect("Could not find a rate in given epoch"); - - let change_from_prev = new_rate.abs_diff(&rate_before_pipeline); - if change_from_prev > max_change.unwrap() { - return Err(CommissionRateChangeError::RateChangeTooLarge( - change_from_prev, - validator.clone(), - ) - .into()); - } - - commission_handle.set(storage, new_rate, current_epoch, params.pipeline_len) -} - -/// Check if the given consensus key is already being used to ensure uniqueness. -/// -/// If it's not being used, it will be inserted into the set that's being used -/// for this. If it's already used, this will return an Error. -pub fn try_insert_consensus_key( - storage: &mut S, - consensus_key: &common::PublicKey, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = consensus_keys_key(); - LazySet::open(key).try_insert(storage, consensus_key.clone()) -} - -/// Get the unique set of consensus keys in storage -pub fn get_consensus_key_set( - storage: &S, -) -> storage_api::Result> -where - S: StorageRead, -{ - let key = consensus_keys_key(); - let lazy_set = LazySet::::open(key); - Ok(lazy_set.iter(storage)?.map(Result::unwrap).collect()) -} - -/// Check if the given consensus key is already being used to ensure uniqueness. -pub fn is_consensus_key_used( - storage: &S, - consensus_key: &common::PublicKey, -) -> storage_api::Result -where - S: StorageRead, -{ - let key = consensus_keys_key(); - let handle = LazySet::open(key); - handle.contains(storage, consensus_key) -} - -/// Get the total bond amount, including slashes, for a given bond ID and epoch. -/// Returns the bond amount after slashing. For future epochs the value is -/// subject to change. -pub fn bond_amount( - storage: &S, - bond_id: &BondId, - epoch: Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - let params = read_pos_params(storage)?; - // Outer key is the start epoch used to calculate slashes. The inner - // keys are discarded after applying slashes. - let mut amounts: BTreeMap = BTreeMap::default(); - - // Bonds - let bonds = - bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); - for next in bonds.iter(storage)? { - let (start, delta) = next?; - if start <= epoch { - let amount = amounts.entry(start).or_default(); - *amount += delta; - } - } - - // Add unbonds that are still contributing to stake - let unbonds = unbond_handle(&bond_id.source, &bond_id.validator); - for next in unbonds.iter(storage)? { - let ( - NestedSubKey::Data { - key: start, - nested_sub_key: SubKey::Data(withdrawable_epoch), - }, - delta, - ) = next?; - // This is the first epoch in which the unbond stops contributing to - // voting power - let end = withdrawable_epoch - params.withdrawable_epoch_offset() - + params.pipeline_len; - - if start <= epoch && end > epoch { - let amount = amounts.entry(start).or_default(); - *amount += delta; - } - } - - if bond_id.validator != bond_id.source { - // Add outgoing redelegations that are still contributing to the source - // validator's stake - let redelegated_bonds = - delegator_redelegated_bonds_handle(&bond_id.source); - for res in redelegated_bonds.iter(storage)? { - let ( - NestedSubKey::Data { - key: _dest_validator, - nested_sub_key: - NestedSubKey::Data { - key: end, - nested_sub_key: - NestedSubKey::Data { - key: src_validator, - nested_sub_key: SubKey::Data(start), - }, - }, - }, - delta, - ) = res?; - if src_validator == bond_id.validator - && start <= epoch - && end > epoch - { - let amount = amounts.entry(start).or_default(); - *amount += delta; - } - } - - // Add outgoing redelegation unbonds that are still contributing to - // the source validator's stake - let redelegated_unbonds = - delegator_redelegated_unbonds_handle(&bond_id.source); - for res in redelegated_unbonds.iter(storage)? { - let ( - NestedSubKey::Data { - key: _dest_validator, - nested_sub_key: - NestedSubKey::Data { - key: redelegation_epoch, - nested_sub_key: - NestedSubKey::Data { - key: _withdraw_epoch, - nested_sub_key: - NestedSubKey::Data { - key: src_validator, - nested_sub_key: SubKey::Data(start), - }, - }, - }, - }, - delta, - ) = res?; - if src_validator == bond_id.validator - // If the unbonded bond was redelegated after this epoch ... - && redelegation_epoch > epoch - // ... the start was before or at this epoch - && start <= epoch - { - let amount = amounts.entry(start).or_default(); - *amount += delta; - } - } - } - - if !amounts.is_empty() { - let slashes = find_validator_slashes(storage, &bond_id.validator)?; - - // Apply slashes - for (&start, amount) in amounts.iter_mut() { - let list_slashes = slashes - .iter() - .filter(|slash| { - let processing_epoch = - slash.epoch + params.slash_processing_epoch_offset(); - // Only use slashes that were processed before or at the - // epoch associated with the bond amount. This assumes - // that slashes are applied before inflation. - processing_epoch <= epoch && start <= slash.epoch - }) - .cloned() - .collect::>(); - - *amount = apply_list_slashes(¶ms, &list_slashes, *amount); - } - } - - Ok(amounts.values().cloned().sum()) -} - -/// Get bond amounts within the `claim_start..=claim_end` epoch range for -/// claiming rewards for a given bond ID. Returns a map of bond amounts -/// associated with every epoch within the given epoch range (accumulative) in -/// which an amount contributed to the validator's stake. -/// This function will only consider slashes that were processed before or at -/// the epoch in which we're calculating the bond amount to correspond to the -/// validator stake that was used to calculate reward products (slashes do *not* -/// retrospectively affect the rewards calculated before slash processing). -pub fn bond_amounts_for_rewards( - storage: &S, - bond_id: &BondId, - claim_start: Epoch, - claim_end: Epoch, -) -> storage_api::Result> -where - S: StorageRead, -{ - let params = read_pos_params(storage)?; - // Outer key is every epoch in which the a bond amount contributed to stake - // and the inner key is the start epoch used to calculate slashes. The inner - // keys are discarded after applying slashes. - let mut amounts: BTreeMap> = - BTreeMap::default(); - - // Only need to do bonds since rewwards are accumulated during - // `unbond_tokens` - let bonds = - bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); - for next in bonds.iter(storage)? { - let (start, delta) = next?; - - for ep in Epoch::iter_bounds_inclusive(claim_start, claim_end) { - // A bond that wasn't unbonded is added to all epochs up to - // `claim_end` - if start <= ep { - let amount = - amounts.entry(ep).or_default().entry(start).or_default(); - *amount += delta; - } - } - } - - if !amounts.is_empty() { - let slashes = find_validator_slashes(storage, &bond_id.validator)?; - let redelegated_bonded = - delegator_redelegated_bonds_handle(&bond_id.source) - .at(&bond_id.validator); - - // Apply slashes - for (&ep, amounts) in amounts.iter_mut() { - for (&start, amount) in amounts.iter_mut() { - let list_slashes = slashes - .iter() - .filter(|slash| { - let processing_epoch = slash.epoch - + params.slash_processing_epoch_offset(); - // Only use slashes that were processed before or at the - // epoch associated with the bond amount. This assumes - // that slashes are applied before inflation. - processing_epoch <= ep && start <= slash.epoch - }) - .cloned() - .collect::>(); - - let slash_epoch_filter = - |e: Epoch| e + params.slash_processing_epoch_offset() <= ep; - - let redelegated_bonds = - redelegated_bonded.at(&start).collect_map(storage)?; - - let result_fold = fold_and_slash_redelegated_bonds( - storage, - ¶ms, - &redelegated_bonds, - start, - &list_slashes, - slash_epoch_filter, - ); - - let total_not_redelegated = - *amount - result_fold.total_redelegated; - - let after_not_redelegated = apply_list_slashes( - ¶ms, - &list_slashes, - total_not_redelegated, - ); - - *amount = - after_not_redelegated + result_fold.total_after_slashing; - } + let mut epochs = bonds_to_unbond.epochs.clone(); + if let Some((epoch, _)) = bonds_to_unbond.new_entry { + epochs.insert(epoch); } - } - - Ok(amounts - .into_iter() - // Flatten the inner maps to discard bond start epochs - .map(|(ep, amounts)| (ep, amounts.values().cloned().sum())) - .collect()) -} - -/// Get the genesis consensus validators stake and consensus key for Tendermint, -/// converted from [`ValidatorSetUpdate`]s using the given function. -pub fn genesis_validator_set_tendermint( - storage: &S, - params: &PosParams, - current_epoch: Epoch, - mut f: impl FnMut(ValidatorSetUpdate) -> T, -) -> storage_api::Result> -where - S: StorageRead, -{ - let consensus_validator_handle = - consensus_validator_set_handle().at(¤t_epoch); - let iter = consensus_validator_handle.iter(storage)?; - - iter.map(|validator| { - let ( - NestedSubKey::Data { - key: new_stake, - nested_sub_key: _, - }, - address, - ) = validator?; - let consensus_key = validator_consensus_key_handle(&address) - .get(storage, current_epoch, params)? - .unwrap(); - let converted = f(ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key, - bonded_stake: new_stake, - })); - Ok(converted) - }) - .collect() -} - -/// Communicate imminent validator set updates to Tendermint. This function is -/// called two blocks before the start of a new epoch because Tendermint -/// validator updates become active two blocks after the updates are submitted. -pub fn validator_set_update_tendermint( - storage: &S, - params: &PosParams, - current_epoch: Epoch, - f: impl FnMut(ValidatorSetUpdate) -> T, -) -> storage_api::Result> -where - S: StorageRead, -{ - tracing::debug!("Communicating validator set updates to Tendermint."); - // Because this is called 2 blocks before a start on an epoch, we're gonna - // give Tendermint updates for the next epoch - let next_epoch = current_epoch.next(); - - let new_consensus_validator_handle = - consensus_validator_set_handle().at(&next_epoch); - let prev_consensus_validator_handle = - consensus_validator_set_handle().at(¤t_epoch); - - let new_consensus_validators = new_consensus_validator_handle - .iter(storage)? - .map(|validator| { - let ( - NestedSubKey::Data { - key: new_stake, - nested_sub_key: _, - }, - address, - ) = validator.unwrap(); - - tracing::debug!( - "Consensus validator address {address}, stake {}", - new_stake.to_string_native() - ); - - let new_consensus_key = validator_consensus_key_handle(&address) - .get(storage, next_epoch, params) - .unwrap() - .unwrap(); - - let old_consensus_key = validator_consensus_key_handle(&address) - .get(storage, current_epoch, params) - .unwrap(); - - // Check if the validator was consensus in the previous epoch with - // the same stake. If so, no updated is needed. - // Look up previous state and prev and current voting powers - if !prev_consensus_validator_handle.is_empty(storage).unwrap() { - let prev_state = validator_state_handle(&address) - .get(storage, current_epoch, params) - .unwrap(); - let prev_tm_voting_power = Lazy::new(|| { - let prev_validator_stake = read_validator_stake( - storage, - params, - &address, - current_epoch, - ) - .unwrap(); - into_tm_voting_power( - params.tm_votes_per_token, - prev_validator_stake, - ) - }); - let new_tm_voting_power = Lazy::new(|| { - into_tm_voting_power(params.tm_votes_per_token, new_stake) - }); - - // If it was in `Consensus` before and voting power has not - // changed, skip the update - if matches!(prev_state, Some(ValidatorState::Consensus)) - && *prev_tm_voting_power == *new_tm_voting_power - { - if old_consensus_key.as_ref().unwrap() == &new_consensus_key - { - tracing::debug!( - "skipping validator update, {address} is in \ - consensus set but voting power hasn't changed" - ); - return vec![]; - } else { - return vec![ - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: new_consensus_key, - bonded_stake: new_stake, - }), - ValidatorSetUpdate::Deactivated( - old_consensus_key.unwrap(), - ), - ]; - } - } - // If both previous and current voting powers are 0, and the - // validator_stake_threshold is 0, skip update - if params.validator_stake_threshold.is_zero() - && *prev_tm_voting_power == 0 - && *new_tm_voting_power == 0 - { - tracing::info!( - "skipping validator update, {address} is in consensus \ - set but without voting power" - ); - return vec![]; - } - } - - tracing::debug!( - "{address} consensus key {}", - new_consensus_key.tm_raw_hash() + for epoch in epochs { + let cur_bond = bonds_handle + .get_delta_val(storage, epoch)? + .unwrap_or_default(); + let redelegated_deltas = redelegated_bonds + .at(&epoch) + // Sum of redelegations from any src validator + .collect_map(storage)? + .into_values() + .map(|redeleg| redeleg.into_values().sum()) + .sum(); + debug_assert!( + cur_bond >= redelegated_deltas, + "After unbonding, in epoch {epoch} the bond amount {} must be \ + >= redelegated deltas at pipeline {}.\n\nredelegated_bonds \ + pre: {redel_bonds_pre:#?}\nredelegated_bonds post: \ + {redel_bonds_post:#?},\nmodified_redelegation: \ + {modified_redelegation:#?},\nbonds_to_unbond: \ + {bonds_to_unbond:#?}", + cur_bond.to_string_native(), + redelegated_deltas.to_string_native() ); + } + } - if old_consensus_key.as_ref() == Some(&new_consensus_key) - || old_consensus_key.is_none() - { - vec![ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: new_consensus_key, - bonded_stake: new_stake, - })] - } else { - vec![ - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: new_consensus_key, - bonded_stake: new_stake, - }), - ValidatorSetUpdate::Deactivated(old_consensus_key.unwrap()), - ] - } - }); - - let prev_consensus_validators = prev_consensus_validator_handle - .iter(storage)? - .map(|validator| { - let ( - NestedSubKey::Data { - key: _prev_stake, - nested_sub_key: _, - }, - address, - ) = validator.unwrap(); - - let new_state = validator_state_handle(&address) - .get(storage, next_epoch, params) - .unwrap(); + // Tally rewards (only call if this is not the first epoch) + if current_epoch > Epoch::default() { + let mut rewards = token::Amount::zero(); - let prev_tm_voting_power = Lazy::new(|| { - let prev_validator_stake = read_validator_stake( - storage, - params, - &address, - current_epoch, - ) - .unwrap(); - into_tm_voting_power( - params.tm_votes_per_token, - prev_validator_stake, - ) - }); + let last_claim_epoch = + get_last_reward_claim_epoch(storage, source, validator)? + .unwrap_or_default(); + let rewards_products = validator_rewards_products_handle(validator); - let old_consensus_key = validator_consensus_key_handle(&address) - .get(storage, current_epoch, params) - .unwrap() - .unwrap(); - - // If the validator is still in the Consensus set, we accounted for - // it in the `new_consensus_validators` iterator above - if matches!(new_state, Some(ValidatorState::Consensus)) { - return vec![]; - } else if params.validator_stake_threshold.is_zero() - && *prev_tm_voting_power == 0 + for (start_epoch, slashed_amount) in &result_slashing.epoch_map { + // Stop collecting rewards at the moment the unbond is initiated + // (right now) + for ep in + Epoch::iter_bounds_inclusive(*start_epoch, current_epoch.prev()) { - // If the new state is not Consensus but its prev voting power - // was 0 and the stake threshold is 0, we can also skip the - // update - tracing::info!( - "skipping validator update, {address} is in consensus set \ - but without voting power" - ); - return vec![]; + // Consider the last epoch when rewards were claimed + if ep < last_claim_epoch { + continue; + } + let rp = + rewards_products.get(storage, &ep)?.unwrap_or_default(); + rewards += rp * (*slashed_amount); } + } - // The remaining validators were previously Consensus but no longer - // are, so they must be deactivated - let consensus_key = validator_consensus_key_handle(&address) - .get(storage, next_epoch, params) - .unwrap() - .unwrap(); - tracing::debug!( - "{address} consensus key {}", - consensus_key.tm_raw_hash() - ); - vec![ValidatorSetUpdate::Deactivated(old_consensus_key)] - }); + // Update the rewards from the current unbonds first + add_rewards_to_counter(storage, source, validator, rewards)?; + } - Ok(new_consensus_validators - .chain(prev_consensus_validators) - .flatten() - .map(f) - .collect()) + Ok(result_slashing) } -/// Find all validators to which a given bond `owner` (or source) has a -/// delegation -pub fn find_delegation_validators( - storage: &S, - owner: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - let bonds_prefix = bonds_for_source_prefix(owner); - let mut delegations: HashSet
= HashSet::new(); - - for iter_result in storage_api::iter_prefix_bytes(storage, &bonds_prefix)? { - let (key, _bond_bytes) = iter_result?; - let validator_address = get_validator_address_from_bond(&key) - .ok_or_else(|| { - storage_api::Error::new_const( - "Delegation key should contain validator address.", - ) - })?; - delegations.insert(validator_address); - } - Ok(delegations) +#[derive(Debug, Default, Eq, PartialEq)] +struct FoldRedelegatedBondsResult { + total_redelegated: token::Amount, + total_after_slashing: token::Amount, } -/// Find all validators to which a given bond `owner` (or source) has a -/// delegation with the amount -pub fn find_delegations( +/// Iterates over a `redelegated_unbonds` and computes the both the sum of all +/// redelegated tokens and how much is left after applying all relevant slashes. +// `def foldAndSlashRedelegatedBondsMap` +fn fold_and_slash_redelegated_bonds( storage: &S, - owner: &Address, - epoch: &Epoch, -) -> storage_api::Result> + params: &OwnedPosParams, + redelegated_unbonds: &EagerRedelegatedBondsMap, + start_epoch: Epoch, + list_slashes: &[Slash], + slash_epoch_filter: impl Fn(Epoch) -> bool, +) -> FoldRedelegatedBondsResult where S: StorageRead, { - let bonds_prefix = bonds_for_source_prefix(owner); - let params = read_pos_params(storage)?; - let mut delegations: HashMap = HashMap::new(); - - for iter_result in storage_api::iter_prefix_bytes(storage, &bonds_prefix)? { - let (key, _bond_bytes) = iter_result?; - let validator_address = get_validator_address_from_bond(&key) - .ok_or_else(|| { - storage_api::Error::new_const( - "Delegation key should contain validator address.", - ) - })?; - let deltas_sum = bond_handle(owner, &validator_address) - .get_sum(storage, *epoch, ¶ms)? - .unwrap_or_default(); - delegations.insert(validator_address, deltas_sum); + let mut result = FoldRedelegatedBondsResult::default(); + for (src_validator, bonds_map) in redelegated_unbonds { + for (bond_start, &change) in bonds_map { + // Merge the two lists of slashes + let mut merged: Vec = + // Look-up slashes for this validator ... + validator_slashes_handle(src_validator) + .iter(storage) + .unwrap() + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end( + start_epoch, + ), + start_epoch, + ) && *bond_start <= slash.epoch + && slash_epoch_filter(slash.epoch) + }) + // ... and add `list_slashes` + .chain(list_slashes.iter().cloned()) + .collect(); + + // Sort slashes by epoch + merged.sort_by(|s1, s2| s1.epoch.partial_cmp(&s2.epoch).unwrap()); + + result.total_redelegated += change; + result.total_after_slashing += + apply_list_slashes(params, &merged, change); + } } - Ok(delegations) + result } -/// Find if the given source address has any bonds. -pub fn has_bonds(storage: &S, source: &Address) -> storage_api::Result -where - S: StorageRead, -{ - let max_epoch = Epoch(u64::MAX); - let delegations = find_delegations(storage, source, &max_epoch)?; - Ok(!delegations - .values() - .cloned() - .sum::() - .is_zero()) +/// Epochs for full and partial unbonds. +#[derive(Debug, Default)] +struct BondsForRemovalRes { + /// Full unbond epochs + pub epochs: BTreeSet, + /// Partial unbond epoch associated with the new bond amount + pub new_entry: Option<(Epoch, token::Amount)>, } -/// Find PoS slashes applied to a validator, if any -pub fn find_validator_slashes( +/// In decreasing epoch order, decrement the non-zero bond amount entries until +/// the full `amount` has been removed. Returns a `BondsForRemovalRes` object +/// that contains the epochs for which the full bond amount is removed and +/// additionally information for the one epoch whose bond amount is partially +/// removed, if any. +fn find_bonds_to_remove( storage: &S, - validator: &Address, -) -> storage_api::Result> + bonds_handle: &LazyMap, + amount: token::Amount, +) -> storage_api::Result where S: StorageRead, { - validator_slashes_handle(validator).iter(storage)?.collect() -} + #[allow(clippy::needless_collect)] + let bonds: Vec> = bonds_handle.iter(storage)?.collect(); -/// Find raw bond deltas for the given source and validator address. -pub fn find_bonds( - storage: &S, - source: &Address, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - bond_handle(source, validator) - .get_data_handler() - .iter(storage)? - .collect() + let mut bonds_for_removal = BondsForRemovalRes::default(); + let mut remaining = amount; + + for bond in bonds.into_iter().rev() { + let (bond_epoch, bond_amount) = bond?; + let to_unbond = cmp::min(bond_amount, remaining); + if to_unbond == bond_amount { + bonds_for_removal.epochs.insert(bond_epoch); + } else { + bonds_for_removal.new_entry = + Some((bond_epoch, bond_amount - to_unbond)); + } + remaining -= to_unbond; + if remaining.is_zero() { + break; + } + } + Ok(bonds_for_removal) } -/// Find raw unbond deltas for the given source and validator address. -pub fn find_unbonds( - storage: &S, - source: &Address, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - unbond_handle(source, validator) - .iter(storage)? - .map(|next_result| { - let ( - NestedSubKey::Data { - key: start_epoch, - nested_sub_key: SubKey::Data(withdraw_epoch), - }, - amount, - ) = next_result?; - Ok(((start_epoch, withdraw_epoch), amount)) - }) - .collect() +#[derive(Debug, Default, PartialEq, Eq)] +struct ModifiedRedelegation { + epoch: Option, + validators_to_remove: BTreeSet
, + validator_to_modify: Option
, + epochs_to_remove: BTreeSet, + epoch_to_modify: Option, + new_amount: Option, } -/// Collect the details of all bonds and unbonds that match the source and -/// validator arguments. If either source or validator is `None`, then grab the -/// information for all sources or validators, respectively. -pub fn bonds_and_unbonds( +/// Used in `fn unbond_tokens` to compute the modified state of a redelegation +/// if redelegated tokens are being unbonded. +fn compute_modified_redelegation( storage: &S, - source: Option
, - validator: Option
, -) -> storage_api::Result + redelegated_bonds: &RedelegatedTokens, + start_epoch: Epoch, + amount_to_unbond: token::Amount, +) -> storage_api::Result where S: StorageRead, { - let params = read_pos_params(storage)?; + let mut modified_redelegation = ModifiedRedelegation::default(); + + let mut src_validators = BTreeSet::
::new(); + let mut total_redelegated = token::Amount::zero(); + for rb in redelegated_bonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: src_validator, + nested_sub_key: _, + }, + amount, + ) = rb?; + total_redelegated += amount; + src_validators.insert(src_validator); + } + + modified_redelegation.epoch = Some(start_epoch); + + // If the total amount of redelegated bonds is less than the target amount, + // then all redelegated bonds must be unbonded. + if total_redelegated <= amount_to_unbond { + return Ok(modified_redelegation); + } - match (source.clone(), validator.clone()) { - (Some(source), Some(validator)) => { - find_bonds_and_unbonds_details(storage, ¶ms, source, validator) + let mut remaining = amount_to_unbond; + for src_validator in src_validators.into_iter() { + if remaining.is_zero() { + break; } - _ => { - get_multiple_bonds_and_unbonds(storage, ¶ms, source, validator) + let rbonds = redelegated_bonds.at(&src_validator); + let total_src_val_amount = rbonds + .iter(storage)? + .map(|res| { + let (_, amount) = res?; + Ok(amount) + }) + .sum::>()?; + + // TODO: move this into the `if total_redelegated <= remaining` branch + // below, then we don't have to remove it in `fn + // update_redelegated_bonds` when `validator_to_modify` is Some (and + // avoid `modified_redelegation.validators_to_remove.clone()`). + // It affects assumption 2. in `fn compute_new_redelegated_unbonds`, but + // that looks trivial to change. + // NOTE: not sure if this TODO is still relevant... + modified_redelegation + .validators_to_remove + .insert(src_validator.clone()); + if total_src_val_amount <= remaining { + remaining -= total_src_val_amount; + } else { + let bonds_to_remove = + find_bonds_to_remove(storage, &rbonds, remaining)?; + + remaining = token::Amount::zero(); + + // NOTE: When there are multiple `src_validators` from which we're + // unbonding, `validator_to_modify` cannot get overriden, because + // only one of them can be a partial unbond (`new_entry` + // is partial unbond) + if let Some((bond_epoch, new_bond_amount)) = + bonds_to_remove.new_entry + { + modified_redelegation.validator_to_modify = Some(src_validator); + modified_redelegation.epochs_to_remove = { + let mut epochs = bonds_to_remove.epochs; + // TODO: remove this insertion then we don't have to remove + // it again in `fn update_redelegated_bonds` + // when `epoch_to_modify` is Some (and avoid + // `modified_redelegation.epochs_to_remove.clone`) + // It affects assumption 3. in `fn + // compute_new_redelegated_unbonds`, but that also looks + // trivial to change. + epochs.insert(bond_epoch); + epochs + }; + modified_redelegation.epoch_to_modify = Some(bond_epoch); + modified_redelegation.new_amount = Some(new_bond_amount); + } else { + modified_redelegation.validator_to_modify = Some(src_validator); + modified_redelegation.epochs_to_remove = bonds_to_remove.epochs; + } } } + Ok(modified_redelegation) } -/// Collect the details of all of the enqueued slashes to be processed in future -/// epochs into a nested map -pub fn find_all_enqueued_slashes( - storage: &S, - epoch: Epoch, -) -> storage_api::Result>>> +fn update_redelegated_bonds( + storage: &mut S, + redelegated_bonds: &RedelegatedTokens, + modified_redelegation: &ModifiedRedelegation, +) -> storage_api::Result<()> where - S: StorageRead, + S: StorageRead + StorageWrite, { - let mut enqueued = HashMap::>>::new(); - for res in enqueued_slashes_handle().get_data_handler().iter(storage)? { - let ( - NestedSubKey::Data { - key: processing_epoch, - nested_sub_key: - NestedSubKey::Data { - key: address, - nested_sub_key: _, - }, - }, - slash, - ) = res?; - if processing_epoch <= epoch { - continue; - } + if let Some(val_to_modify) = &modified_redelegation.validator_to_modify { + let mut updated_vals_to_remove = + modified_redelegation.validators_to_remove.clone(); + updated_vals_to_remove.remove(val_to_modify); - let slashes = enqueued - .entry(address) - .or_default() - .entry(processing_epoch) - .or_default(); - slashes.push(slash); - } - Ok(enqueued) -} + // Remove the updated_vals_to_remove keys from the + // redelegated_bonds map + for val in &updated_vals_to_remove { + redelegated_bonds.remove_all(storage, val)?; + } -/// Find all slashes and the associated validators in the PoS system -pub fn find_all_slashes( - storage: &S, -) -> storage_api::Result>> -where - S: StorageRead, -{ - let mut slashes: HashMap> = HashMap::new(); - let slashes_iter = storage_api::iter_prefix_bytes( - storage, - &slashes_prefix(), - )? - .filter_map(|result| { - if let Ok((key, val_bytes)) = result { - if let Some(validator) = is_validator_slashes_key(&key) { - let slash: Slash = - BorshDeserialize::try_from_slice(&val_bytes).ok()?; - return Some((validator, slash)); + if let Some(epoch_to_modify) = modified_redelegation.epoch_to_modify { + let mut updated_epochs_to_remove = + modified_redelegation.epochs_to_remove.clone(); + updated_epochs_to_remove.remove(&epoch_to_modify); + let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); + for epoch in updated_epochs_to_remove { + val_bonds_to_modify.remove(storage, &epoch)?; + } + val_bonds_to_modify.insert( + storage, + epoch_to_modify, + modified_redelegation.new_amount.unwrap(), + )?; + } else { + // Then remove to epochs_to_remove from the redelegated bonds of the + // val_to_modify + let val_bonds_to_modify = redelegated_bonds.at(val_to_modify); + for epoch in &modified_redelegation.epochs_to_remove { + val_bonds_to_modify.remove(storage, epoch)?; } } - None - }); - - slashes_iter.for_each(|(address, slash)| match slashes.get(&address) { - Some(vec) => { - let mut vec = vec.clone(); - vec.push(slash); - slashes.insert(address, vec); - } - None => { - slashes.insert(address, vec![slash]); + } else { + // Remove all validators in modified_redelegation.validators_to_remove + // from redelegated_bonds + for val in &modified_redelegation.validators_to_remove { + redelegated_bonds.remove_all(storage, val)?; } - }); - Ok(slashes) + } + Ok(()) } -fn get_multiple_bonds_and_unbonds( +/// Temp helper type to match quint model. +/// Result of `compute_new_redelegated_unbonds` that contains a map of +/// redelegated unbonds. +/// The map keys from outside in are: +/// +/// - redelegation end epoch where redeleg stops contributing to src validator +/// - src validator address +/// - src bond start epoch where it started contributing to src validator +type EagerRedelegatedUnbonds = BTreeMap; + +/// Computes a map of redelegated unbonds from a set of redelegated bonds. +/// +/// - `redelegated_bonds` - a map of redelegated bonds from epoch to +/// `RedelegatedTokens`. +/// - `epochs_to_remove` - a set of epochs that indicate the set of epochs +/// unbonded. +/// - `modified` record that represents a redelegated bond that it is only +/// partially unbonded. +/// +/// The function assumes that: +/// +/// 1. `modified.epoch` is not in the `epochs_to_remove` set. +/// 2. `modified.validator_to_modify` is in `modified.vals_to_remove`. +/// 3. `modified.epoch_to_modify` is in in `modified.epochs_to_remove`. +// `def computeNewRedelegatedUnbonds` from Quint +fn compute_new_redelegated_unbonds( storage: &S, - params: &PosParams, - source: Option
, - validator: Option
, -) -> storage_api::Result + redelegated_bonds: &RedelegatedBondsOrUnbonds, + epochs_to_remove: &BTreeSet, + modified: &ModifiedRedelegation, +) -> storage_api::Result where - S: StorageRead, + S: StorageRead + StorageWrite, { + let unbonded_epochs = if let Some(epoch) = modified.epoch { + debug_assert!( + !epochs_to_remove.contains(&epoch), + "1. assumption in `fn compute_new_redelegated_unbonds` doesn't \ + hold" + ); + let mut epochs = epochs_to_remove.clone(); + epochs.insert(epoch); + epochs + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) + .collect::>() + } else { + epochs_to_remove + .iter() + .cloned() + .filter(|e| redelegated_bonds.contains(storage, e).unwrap()) + .collect::>() + }; + debug_assert!( + modified + .validator_to_modify + .as_ref() + .map(|validator| modified.validators_to_remove.contains(validator)) + .unwrap_or(true), + "2. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" + ); debug_assert!( - source.is_none() || validator.is_none(), - "Use `find_bonds_and_unbonds_details` when full bond ID is known" + modified + .epoch_to_modify + .as_ref() + .map(|epoch| modified.epochs_to_remove.contains(epoch)) + .unwrap_or(true), + "3. assumption in `fn compute_new_redelegated_unbonds` doesn't hold" ); - let mut slashes_cache = HashMap::>::new(); - // Applied slashes grouped by validator address - let mut applied_slashes = HashMap::>::new(); - - // TODO: if validator is `Some`, look-up all its bond owners (including - // self-bond, if any) first - let prefix = match source.as_ref() { - Some(source) => bonds_for_source_prefix(source), - None => bonds_prefix(), - }; - // We have to iterate raw bytes, cause the epoched data `last_update` field - // gets matched here too - let mut raw_bonds = storage_api::iter_prefix_bytes(storage, &prefix)? - .filter_map(|result| { - if let Ok((key, val_bytes)) = result { - if let Some((bond_id, start)) = is_bond_key(&key) { - if source.is_some() - && source.as_ref().unwrap() != &bond_id.source - { - return None; - } - if validator.is_some() - && validator.as_ref().unwrap() != &bond_id.validator - { - return None; - } - let change: token::Amount = - BorshDeserialize::try_from_slice(&val_bytes).ok()?; - if change.is_zero() { - return None; - } - return Some((bond_id, start, change)); + // quint `newRedelegatedUnbonds` returned from + // `computeNewRedelegatedUnbonds` + let new_redelegated_unbonds: EagerRedelegatedUnbonds = unbonded_epochs + .into_iter() + .map(|start| { + let mut rbonds = EagerRedelegatedBondsMap::default(); + if modified + .epoch + .map(|redelegation_epoch| start != redelegation_epoch) + .unwrap_or(true) + || modified.validators_to_remove.is_empty() + { + for res in redelegated_bonds.at(&start).iter(storage).unwrap() { + let ( + NestedSubKey::Data { + key: validator, + nested_sub_key: SubKey::Data(epoch), + }, + amount, + ) = res.unwrap(); + rbonds + .entry(validator.clone()) + .or_default() + .insert(epoch, amount); } - } - None - }); - - let prefix = match source.as_ref() { - Some(source) => unbonds_for_source_prefix(source), - None => unbonds_prefix(), - }; - let mut raw_unbonds = storage_api::iter_prefix_bytes(storage, &prefix)? - .filter_map(|result| { - if let Ok((key, val_bytes)) = result { - if let Some((bond_id, start, withdraw)) = is_unbond_key(&key) { - if source.is_some() - && source.as_ref().unwrap() != &bond_id.source - { - return None; - } - if validator.is_some() - && validator.as_ref().unwrap() != &bond_id.validator + (start, rbonds) + } else { + for src_validator in &modified.validators_to_remove { + if modified + .validator_to_modify + .as_ref() + .map(|validator| src_validator != validator) + .unwrap_or(true) { - return None; - } - match (source.clone(), validator.clone()) { - (None, Some(validator)) => { - if bond_id.validator != validator { - return None; - } - } - (Some(owner), None) => { - if owner != bond_id.source { - return None; - } + let raw_bonds = + redelegated_bonds.at(&start).at(src_validator); + for res in raw_bonds.iter(storage).unwrap() { + let (bond_epoch, bond_amount) = res.unwrap(); + rbonds + .entry(src_validator.clone()) + .or_default() + .insert(bond_epoch, bond_amount); } - _ => {} - } - let amount: token::Amount = - BorshDeserialize::try_from_slice(&val_bytes).ok()?; - return Some((bond_id, start, withdraw, amount)); - } - } - None - }); - - let mut bonds_and_unbonds = - HashMap::, Vec)>::new(); - - raw_bonds.try_for_each(|(bond_id, start, change)| { - if !slashes_cache.contains_key(&bond_id.validator) { - let slashes = find_validator_slashes(storage, &bond_id.validator)?; - slashes_cache.insert(bond_id.validator.clone(), slashes); - } - let slashes = slashes_cache - .get(&bond_id.validator) - .expect("We must have inserted it if it's not cached already"); - let validator = bond_id.validator.clone(); - let (bonds, _unbonds) = bonds_and_unbonds.entry(bond_id).or_default(); - bonds.push(make_bond_details( - params, - &validator, - change, - start, - slashes, - &mut applied_slashes, - )); - Ok::<_, storage_api::Error>(()) - })?; - - raw_unbonds.try_for_each(|(bond_id, start, withdraw, amount)| { - if !slashes_cache.contains_key(&bond_id.validator) { - let slashes = find_validator_slashes(storage, &bond_id.validator)?; - slashes_cache.insert(bond_id.validator.clone(), slashes); - } - let slashes = slashes_cache - .get(&bond_id.validator) - .expect("We must have inserted it if it's not cached already"); - let validator = bond_id.validator.clone(); - let (_bonds, unbonds) = bonds_and_unbonds.entry(bond_id).or_default(); - unbonds.push(make_unbond_details( - params, - &validator, - amount, - (start, withdraw), - slashes, - &mut applied_slashes, - )); - Ok::<_, storage_api::Error>(()) - })?; - - Ok(bonds_and_unbonds - .into_iter() - .map(|(bond_id, (bonds, unbonds))| { - let details = BondsAndUnbondsDetail { - bonds, - unbonds, - slashes: applied_slashes - .get(&bond_id.validator) - .cloned() - .unwrap_or_default(), - }; - (bond_id, details) - }) - .collect()) -} - -fn find_bonds_and_unbonds_details( - storage: &S, - params: &PosParams, - source: Address, - validator: Address, -) -> storage_api::Result -where - S: StorageRead, -{ - let slashes = find_validator_slashes(storage, &validator)?; - let mut applied_slashes = HashMap::>::new(); - - let bonds = find_bonds(storage, &source, &validator)? - .into_iter() - .filter(|(_start, amount)| *amount > token::Amount::zero()) - .map(|(start, amount)| { - make_bond_details( - params, - &validator, - amount, - start, - &slashes, - &mut applied_slashes, - ) - }) - .collect(); - - let unbonds = find_unbonds(storage, &source, &validator)? - .into_iter() - .map(|(epoch_range, change)| { - make_unbond_details( - params, - &validator, - change, - epoch_range, - &slashes, - &mut applied_slashes, - ) + } else { + for bond_start in &modified.epochs_to_remove { + let cur_redel_bond_amount = redelegated_bonds + .at(&start) + .at(src_validator) + .get(storage, bond_start) + .unwrap() + .unwrap_or_default(); + let raw_bonds = rbonds + .entry(src_validator.clone()) + .or_default(); + if modified + .epoch_to_modify + .as_ref() + .map(|epoch| bond_start != epoch) + .unwrap_or(true) + { + raw_bonds + .insert(*bond_start, cur_redel_bond_amount); + } else { + raw_bonds.insert( + *bond_start, + cur_redel_bond_amount + - modified + .new_amount + // Safe unwrap - it shouldn't + // get to + // this if it's None + .unwrap(), + ); + } + } + } + } + (start, rbonds) + } }) .collect(); - let details = BondsAndUnbondsDetail { - bonds, - unbonds, - slashes: applied_slashes.get(&validator).cloned().unwrap_or_default(), - }; - let bond_id = BondId { source, validator }; - Ok(HashMap::from_iter([(bond_id, details)])) -} - -fn make_bond_details( - params: &PosParams, - validator: &Address, - deltas_sum: token::Amount, - start: Epoch, - slashes: &[Slash], - applied_slashes: &mut HashMap>, -) -> BondDetails { - let prev_applied_slashes = applied_slashes - .clone() - .get(validator) - .cloned() - .unwrap_or_default(); - - let mut slash_rates_by_epoch = BTreeMap::::new(); - - let validator_slashes = - applied_slashes.entry(validator.clone()).or_default(); - for slash in slashes { - if slash.epoch >= start { - let cur_rate = slash_rates_by_epoch.entry(slash.epoch).or_default(); - *cur_rate = cmp::min(Dec::one(), *cur_rate + slash.rate); - - if !prev_applied_slashes.iter().any(|s| s == slash) { - validator_slashes.push(slash.clone()); - } - } - } - - let slashed_amount = if slash_rates_by_epoch.is_empty() { - None - } else { - let amount_after_slashing = - get_slashed_amount(params, deltas_sum, &slash_rates_by_epoch) - .unwrap(); - Some(deltas_sum - amount_after_slashing) - }; - - BondDetails { - start, - amount: deltas_sum, - slashed_amount, - } + Ok(new_redelegated_unbonds) } -fn make_unbond_details( - params: &PosParams, - validator: &Address, - amount: token::Amount, - (start, withdraw): (Epoch, Epoch), - slashes: &[Slash], - applied_slashes: &mut HashMap>, -) -> UnbondDetails { - let prev_applied_slashes = applied_slashes - .clone() - .get(validator) - .cloned() - .unwrap_or_default(); - let mut slash_rates_by_epoch = BTreeMap::::new(); - - let validator_slashes = - applied_slashes.entry(validator.clone()).or_default(); - for slash in slashes { - if slash.epoch >= start - && slash.epoch - < withdraw - .checked_sub( - params.unbonding_len - + params.cubic_slashing_window_length, - ) - .unwrap_or_default() - { - let cur_rate = slash_rates_by_epoch.entry(slash.epoch).or_default(); - *cur_rate = cmp::min(Dec::one(), *cur_rate + slash.rate); - - if !prev_applied_slashes.iter().any(|s| s == slash) { - validator_slashes.push(slash.clone()); - } - } - } - - let slashed_amount = if slash_rates_by_epoch.is_empty() { - None - } else { - let amount_after_slashing = - get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(); - Some(amount - amount_after_slashing) - }; - - UnbondDetails { - start, - withdraw, - amount, - slashed_amount, - } +/// Arguments to [`become_validator`]. +pub struct BecomeValidator<'a> { + /// Proof-of-stake parameters. + pub params: &'a PosParams, + /// The validator's address. + pub address: &'a Address, + /// The validator's consensus key, used by Tendermint. + pub consensus_key: &'a common::PublicKey, + /// The validator's protocol key. + pub protocol_key: &'a common::PublicKey, + /// The validator's Ethereum bridge cold key. + pub eth_cold_key: &'a common::PublicKey, + /// The validator's Ethereum bridge hot key. + pub eth_hot_key: &'a common::PublicKey, + /// The numeric value of the current epoch. + pub current_epoch: Epoch, + /// Commission rate. + pub commission_rate: Dec, + /// Max commission rate change. + pub max_commission_rate_change: Dec, + /// Validator metadata + pub metadata: ValidatorMetaData, + /// Optional offset to use instead of pipeline offset + pub offset_opt: Option, } -/// Tally a running sum of the fraction of rewards owed to each validator in -/// the consensus set. This is used to keep track of the rewards due to each -/// consensus validator over the lifetime of an epoch. -pub fn log_block_rewards( +/// Initialize data for a new validator. +pub fn become_validator( storage: &mut S, - epoch: impl Into, - proposer_address: &Address, - votes: Vec, + args: BecomeValidator<'_>, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, { - // The votes correspond to the last committed block (n-1 if we are - // finalizing block n) - - let epoch: Epoch = epoch.into(); - let params = read_pos_params(storage)?; - let consensus_validators = consensus_validator_set_handle().at(&epoch); - - // Get total stake of the consensus validator set - let total_consensus_stake = - get_total_consensus_stake(storage, epoch, ¶ms)?; - - // Get set of signing validator addresses and the combined stake of - // these signers - let mut signer_set: HashSet
= HashSet::new(); - let mut total_signing_stake = token::Amount::zero(); - for VoteInfo { - validator_address, - validator_vp, - } in votes - { - if validator_vp == 0 { - continue; - } - // Ensure that the validator is not currently jailed or other - let state = validator_state_handle(&validator_address) - .get(storage, epoch, ¶ms)?; - if state != Some(ValidatorState::Consensus) { - return Err(InflationError::ExpectedValidatorInConsensus( - validator_address, - state, - )) - .into_storage_result(); - } - - let stake_from_deltas = - read_validator_stake(storage, ¶ms, &validator_address, epoch)?; - - // Ensure TM stake updates properly with a debug_assert - if cfg!(debug_assertions) { - debug_assert_eq!( - into_tm_voting_power( - params.tm_votes_per_token, - stake_from_deltas, - ), - i64::try_from(validator_vp).unwrap_or_default(), - ); - } + let BecomeValidator { + params, + address, + consensus_key, + protocol_key, + eth_cold_key, + eth_hot_key, + current_epoch, + commission_rate, + max_commission_rate_change, + metadata, + offset_opt, + } = args; + let offset = offset_opt.unwrap_or(params.pipeline_len); - signer_set.insert(validator_address); - total_signing_stake += stake_from_deltas; + if !address.is_established() { + return Err(storage_api::Error::new_const( + "The given address {address} is not established. Only an \ + established address can become a validator.", + )); } - // Get the block rewards coefficients (proposing, signing/voting, - // consensus set status) - let rewards_calculator = PosRewardsCalculator { - proposer_reward: params.block_proposer_reward, - signer_reward: params.block_vote_reward, - signing_stake: total_signing_stake, - total_stake: total_consensus_stake, - }; - let coeffs = rewards_calculator - .get_reward_coeffs() - .map_err(InflationError::Rewards) - .into_storage_result()?; - tracing::debug!( - "PoS rewards coefficients {coeffs:?}, inputs: {rewards_calculator:?}." - ); - - // tracing::debug!( - // "TOTAL SIGNING STAKE (LOGGING BLOCK REWARDS) = {}", - // signing_stake - // ); - - // Compute the fractional block rewards for each consensus validator and - // update the reward accumulators - let consensus_stake_unscaled: Dec = total_consensus_stake.into(); - let signing_stake_unscaled: Dec = total_signing_stake.into(); - let mut values: HashMap = HashMap::new(); - for validator in consensus_validators.iter(storage)? { - let ( - NestedSubKey::Data { - key: stake, - nested_sub_key: _, - }, - address, - ) = validator?; - - if stake.is_zero() { - continue; - } - - let mut rewards_frac = Dec::zero(); - let stake_unscaled: Dec = stake.into(); - // tracing::debug!( - // "NAMADA VALIDATOR STAKE (LOGGING BLOCK REWARDS) OF EPOCH {} = - // {}", epoch, stake - // ); - - // Proposer reward - if address == *proposer_address { - rewards_frac += coeffs.proposer_coeff; - } - // Signer reward - if signer_set.contains(&address) { - let signing_frac = stake_unscaled / signing_stake_unscaled; - rewards_frac += coeffs.signer_coeff * signing_frac; - } - // Consensus validator reward - rewards_frac += coeffs.active_val_coeff - * (stake_unscaled / consensus_stake_unscaled); - - // To be added to the rewards accumulator - values.insert(address, rewards_frac); - } - for (address, value) in values.into_iter() { - // Update the rewards accumulator - rewards_accumulator_handle().update(storage, address, |prev| { - prev.unwrap_or_default() + value - })?; + if is_validator(storage, address)? { + return Err(storage_api::Error::new_const( + "The given address is already a validator", + )); } - Ok(()) -} + // If the address is not yet a validator, it cannot have self-bonds, but it + // may have delegations. + if has_bonds(storage, address)? { + return Err(storage_api::Error::new_const( + "The given address has delegations and therefore cannot become a \ + validator. Unbond first.", + )); + } -#[derive(Clone, Debug)] -struct Rewards { - product: Dec, - commissions: token::Amount, -} + // This will fail if the key is already being used + try_insert_consensus_key(storage, consensus_key)?; -/// Update validator and delegators rewards products and mint the inflation -/// tokens into the PoS account. -/// Any left-over inflation tokens from rounding error of the sum of the -/// rewards is given to the governance address. -pub fn update_rewards_products_and_mint_inflation( - storage: &mut S, - params: &PosParams, - last_epoch: Epoch, - num_blocks_in_last_epoch: u64, - inflation: token::Amount, - staking_token: &Address, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - // Read the rewards accumulator and calculate the new rewards products - // for the previous epoch - let mut reward_tokens_remaining = inflation; - let mut new_rewards_products: HashMap = HashMap::new(); - let mut accumulators_sum = Dec::zero(); - for acc in rewards_accumulator_handle().iter(storage)? { - let (validator, value) = acc?; - accumulators_sum += value; - - // Get reward token amount for this validator - let fractional_claim = value / num_blocks_in_last_epoch; - let reward_tokens = fractional_claim * inflation; - - // Get validator stake at the last epoch - let stake = Dec::from(read_validator_stake( - storage, params, &validator, last_epoch, - )?); - - let commission_rate = validator_commission_rate_handle(&validator) - .get(storage, last_epoch, params)? - .expect("Should be able to find validator commission rate"); - - // Calculate the reward product from the whole validator stake and take - // out the commissions. Because we're using the whole stake to work with - // a single product, we're also taking out commission on validator's - // self-bonds, but it is then included in the rewards claimable by the - // validator so they get it back. - let product = - (Dec::one() - commission_rate) * Dec::from(reward_tokens) / stake; - - // Tally the commission tokens earned by the validator. - // TODO: think abt Dec rounding and if `new_product` should be used - // instead of `reward_tokens` - let commissions = commission_rate * reward_tokens; - - new_rewards_products.insert( - validator, - Rewards { - product, - commissions, - }, - ); + let pipeline_epoch = current_epoch + offset; + validator_addresses_handle() + .at(&pipeline_epoch) + .insert(storage, address.clone())?; - reward_tokens_remaining -= reward_tokens; - } - for ( - validator, - Rewards { - product, - commissions, - }, - ) in new_rewards_products - { - validator_rewards_products_handle(&validator) - .insert(storage, last_epoch, product)?; - // The commissions belong to the validator - add_rewards_to_counter(storage, &validator, &validator, commissions)?; - } + // Non-epoched validator data + write_validator_address_raw_hash(storage, address, consensus_key)?; + write_validator_max_commission_rate_change( + storage, + address, + max_commission_rate_change, + )?; + write_validator_metadata(storage, address, &metadata)?; - // Mint tokens to the PoS account for the last epoch's inflation - let pos_reward_tokens = inflation - reward_tokens_remaining; - tracing::info!( - "Minting tokens for PoS rewards distribution into the PoS account. \ - Amount: {}. Total inflation: {}, number of blocks in the last epoch: \ - {num_blocks_in_last_epoch}, reward accumulators sum: \ - {accumulators_sum}.", - pos_reward_tokens.to_string_native(), - inflation.to_string_native(), - ); - token::credit_tokens( + // Epoched validator data + validator_consensus_key_handle(address).set( + storage, + consensus_key.clone(), + current_epoch, + offset, + )?; + validator_protocol_key_handle(address).set( + storage, + protocol_key.clone(), + current_epoch, + offset, + )?; + validator_eth_hot_key_handle(address).set( + storage, + eth_hot_key.clone(), + current_epoch, + offset, + )?; + validator_eth_cold_key_handle(address).set( + storage, + eth_cold_key.clone(), + current_epoch, + offset, + )?; + validator_commission_rate_handle(address).set( + storage, + commission_rate, + current_epoch, + offset, + )?; + validator_deltas_handle(address).set( storage, - staking_token, - &address::POS, - pos_reward_tokens, + token::Change::zero(), + current_epoch, + offset, )?; - if reward_tokens_remaining > token::Amount::zero() { - tracing::info!( - "Minting tokens remaining from PoS rewards distribution into the \ - Governance account. Amount: {}.", - reward_tokens_remaining.to_string_native() - ); - token::credit_tokens( - storage, - staking_token, - &address::GOV, - reward_tokens_remaining, - )?; - } + // The validator's stake at initialization is 0, so its state is immediately + // below-threshold + validator_state_handle(address).set( + storage, + ValidatorState::BelowThreshold, + current_epoch, + offset, + )?; - // Clear validator rewards accumulators - storage.delete_prefix( - // The prefix of `rewards_accumulator_handle` - &storage::consensus_validator_rewards_accumulator_key(), + insert_validator_into_validator_set( + storage, + params, + address, + token::Amount::zero(), + current_epoch, + offset, )?; Ok(()) } -/// Calculate the cubic slashing rate using all slashes within a window around -/// the given infraction epoch. There is no cap on the rate applied within this -/// function. -pub fn compute_cubic_slash_rate( - storage: &S, - params: &PosParams, - infraction_epoch: Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - tracing::debug!( - "Computing the cubic slash rate for infraction epoch \ - {infraction_epoch}." - ); - let mut sum_vp_fraction = Dec::zero(); - let (start_epoch, end_epoch) = - params.cubic_slash_epoch_window(infraction_epoch); - - for epoch in Epoch::iter_bounds_inclusive(start_epoch, end_epoch) { - let consensus_stake = - Dec::from(get_total_consensus_stake(storage, epoch, params)?); - tracing::debug!( - "Total consensus stake in epoch {}: {}", - epoch, - consensus_stake - ); - let processing_epoch = epoch + params.slash_processing_epoch_offset(); - let slashes = enqueued_slashes_handle().at(&processing_epoch); - let infracting_stake = slashes.iter(storage)?.fold( - Ok(Dec::zero()), - |acc: storage_api::Result, res| { - let acc = acc?; - let ( - NestedSubKey::Data { - key: validator, - nested_sub_key: _, - }, - _slash, - ) = res?; - - let validator_stake = - read_validator_stake(storage, params, &validator, epoch)?; - // tracing::debug!("Val {} stake: {}", &validator, - // validator_stake); - - Ok(acc + Dec::from(validator_stake)) - }, - )?; - sum_vp_fraction += infracting_stake / consensus_stake; - } - let cubic_rate = - Dec::new(9, 0).unwrap() * sum_vp_fraction * sum_vp_fraction; - tracing::debug!("Cubic slash rate: {}", cubic_rate); - Ok(cubic_rate) -} - -/// Record a slash for a misbehavior that has been received from Tendermint and -/// then jail the validator, removing it from the validator set. The slash rate -/// will be computed at a later epoch. -#[allow(clippy::too_many_arguments)] -pub fn slash( +/// Consensus key change for a validator +pub fn change_consensus_key( storage: &mut S, - params: &PosParams, - current_epoch: Epoch, - evidence_epoch: Epoch, - evidence_block_height: impl Into, - slash_type: SlashType, validator: &Address, - validator_set_update_epoch: Epoch, + consensus_key: &common::PublicKey, + current_epoch: Epoch, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, { - let evidence_block_height: u64 = evidence_block_height.into(); - let slash = Slash { - epoch: evidence_epoch, - block_height: evidence_block_height, - r#type: slash_type, - rate: Dec::zero(), // Let the rate be 0 initially before processing - }; - // Need `+1` because we process at the beginning of a new epoch - let processing_epoch = - evidence_epoch + params.slash_processing_epoch_offset(); - - // Add the slash to the list of enqueued slashes to be processed at a later - // epoch - enqueued_slashes_handle() - .get_data_handler() - .at(&processing_epoch) - .at(validator) - .push(storage, slash)?; - - // Update the most recent slash (infraction) epoch for the validator - let last_slash_epoch = read_validator_last_slash_epoch(storage, validator)?; - if last_slash_epoch.is_none() - || evidence_epoch.0 > last_slash_epoch.unwrap_or_default().0 - { - write_validator_last_slash_epoch(storage, validator, evidence_epoch)?; + tracing::debug!("Changing consensus key for validator {}", validator); + + // Require that the new consensus key is an Ed25519 key + match consensus_key { + common::PublicKey::Ed25519(_) => {} + common::PublicKey::Secp256k1(_) => { + return Err(ConsensusKeyChangeError::MustBeEd25519.into()); + } } - // Jail the validator and update validator sets - jail_validator( + // Check for uniqueness of the consensus key + try_insert_consensus_key(storage, consensus_key)?; + + // Set the new consensus key at the pipeline epoch + let params = read_pos_params(storage)?; + validator_consensus_key_handle(validator).set( storage, - params, - validator, + consensus_key.clone(), current_epoch, - validator_set_update_epoch, + params.pipeline_len, )?; - // No other actions are performed here until the epoch in which the slash is - // processed. + // Write validator's new raw hash + write_validator_address_raw_hash(storage, validator, consensus_key)?; Ok(()) } -/// Process enqueued slashes that were discovered earlier. This function is -/// called upon a new epoch. The final slash rate considering according to the -/// cubic slashing rate is computed. Then, each slash is recorded in storage -/// along with its computed rate, and stake is deducted from the affected -/// validators. -pub fn process_slashes( +/// Withdraw tokens from those that have been unbonded from proof-of-stake +pub fn withdraw_tokens( storage: &mut S, + source: Option<&Address>, + validator: &Address, current_epoch: Epoch, -) -> storage_api::Result<()> +) -> storage_api::Result where S: StorageRead + StorageWrite, { let params = read_pos_params(storage)?; + let source = source.unwrap_or(validator); - if current_epoch.0 < params.slash_processing_epoch_offset() { - return Ok(()); - } - let infraction_epoch = - current_epoch - params.slash_processing_epoch_offset(); + tracing::debug!("Withdrawing tokens in epoch {current_epoch}"); + tracing::debug!("Source {} --> Validator {}", source, validator); - // Slashes to be processed in the current epoch - let enqueued_slashes = enqueued_slashes_handle().at(¤t_epoch); - if enqueued_slashes.is_empty(storage)? { - return Ok(()); - } - tracing::debug!( - "Processing slashes at the beginning of epoch {} (committed in epoch \ - {})", - current_epoch, - infraction_epoch - ); + let unbond_handle: Unbonds = unbond_handle(source, validator); + let redelegated_unbonds = + delegator_redelegated_unbonds_handle(source).at(validator); - // Compute the cubic slash rate - let cubic_slash_rate = - compute_cubic_slash_rate(storage, ¶ms, infraction_epoch)?; + // Check that there are unbonded tokens available for withdrawal + if unbond_handle.is_empty(storage)? { + return Err(WithdrawError::NoUnbondFound(BondId { + source: source.clone(), + validator: validator.clone(), + }) + .into()); + } - // Collect the enqueued slashes and update their rates - let mut eager_validator_slashes: BTreeMap> = - BTreeMap::new(); - let mut eager_validator_slash_rates: HashMap = HashMap::new(); + let mut unbonds_and_redelegated_unbonds: BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + > = BTreeMap::new(); - // `slashPerValidator` and `slashesMap` while also updating in storage - for enqueued_slash in enqueued_slashes.iter(storage)? { + for unbond in unbond_handle.iter(storage)? { let ( NestedSubKey::Data { - key: validator, - nested_sub_key: _, + key: start_epoch, + nested_sub_key: SubKey::Data(withdraw_epoch), }, - enqueued_slash, - ) = enqueued_slash?; - debug_assert_eq!(enqueued_slash.epoch, infraction_epoch); - - let slash_rate = cmp::min( - Dec::one(), - cmp::max( - enqueued_slash.r#type.get_slash_rate(¶ms), - cubic_slash_rate, - ), - ); - let updated_slash = Slash { - epoch: enqueued_slash.epoch, - block_height: enqueued_slash.block_height, - r#type: enqueued_slash.r#type, - rate: slash_rate, - }; + amount, + ) = unbond?; - let cur_slashes = eager_validator_slashes - .entry(validator.clone()) - .or_default(); - cur_slashes.push(updated_slash); - let cur_rate = - eager_validator_slash_rates.entry(validator).or_default(); - *cur_rate = cmp::min(Dec::one(), *cur_rate + slash_rate); - } + // Logging + tracing::debug!( + "Unbond delta ({start_epoch}..{withdraw_epoch}), amount {}", + amount.to_string_native() + ); + // Consider only unbonds that are eligible to be withdrawn + if withdraw_epoch > current_epoch { + tracing::debug!( + "Not yet withdrawable until epoch {withdraw_epoch}" + ); + continue; + } - // Update the epochs of enqueued slashes in storage - enqueued_slashes_handle().update_data(storage, ¶ms, current_epoch)?; + let mut eager_redelegated_unbonds = EagerRedelegatedBondsMap::default(); + let matching_redelegated_unbonds = + redelegated_unbonds.at(&start_epoch).at(&withdraw_epoch); + for ub in matching_redelegated_unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: address, + nested_sub_key: SubKey::Data(epoch), + }, + amount, + ) = ub?; + eager_redelegated_unbonds + .entry(address) + .or_default() + .entry(epoch) + .or_insert(amount); + } - // `resultSlashing` - let mut map_validator_slash: EagerRedelegatedBondsMap = BTreeMap::new(); - for (validator, slash_rate) in eager_validator_slash_rates { - process_validator_slash( - storage, - ¶ms, - &validator, - slash_rate, - current_epoch, - &mut map_validator_slash, - )?; + unbonds_and_redelegated_unbonds.insert( + (start_epoch, withdraw_epoch), + (amount, eager_redelegated_unbonds), + ); } - tracing::debug!("Slashed amounts for validators: {map_validator_slash:#?}"); - // Now update the remaining parts of storage + let slashes = find_validator_slashes(storage, validator)?; - // Write slashes themselves into storage - for (validator, slashes) in eager_validator_slashes { - let validator_slashes = validator_slashes_handle(&validator); - for slash in slashes { - validator_slashes.push(storage, slash)?; - } - } + // `val resultSlashing` + let result_slashing = compute_amount_after_slashing_withdraw( + storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + )?; - // Update the validator stakes - for (validator, slash_amounts) in map_validator_slash { - let mut slash_acc = token::Amount::zero(); + let withdrawable_amount = result_slashing.sum; + tracing::debug!( + "Withdrawing total {}", + withdrawable_amount.to_string_native() + ); - // Update validator sets first because it needs to be able to read - // validator stake before we make any changes to it - for (&epoch, &slash_amount) in &slash_amounts { - let state = validator_state_handle(&validator) - .get(storage, epoch, ¶ms)? - .unwrap(); - if state != ValidatorState::Jailed { - update_validator_set( - storage, - ¶ms, - &validator, - -slash_amount.change(), - epoch, - Some(0), - )?; - } - } - // Then update validator and total deltas - for (epoch, slash_amount) in slash_amounts { - let slash_delta = slash_amount - slash_acc; - slash_acc += slash_delta; + // `updateDelegator` with `unbonded` and `redelegeatedUnbonded` + for ((start_epoch, withdraw_epoch), _unbond_and_redelegations) in + unbonds_and_redelegated_unbonds + { + tracing::debug!("Remove ({start_epoch}..{withdraw_epoch}) from unbond"); + unbond_handle + .at(&start_epoch) + .remove(storage, &withdraw_epoch)?; + redelegated_unbonds + .at(&start_epoch) + .remove_all(storage, &withdraw_epoch)?; - update_validator_deltas( - storage, - ¶ms, - &validator, - -slash_delta.change(), - epoch, - Some(0), - )?; - update_total_deltas( - storage, - ¶ms, - -slash_delta.change(), - epoch, - Some(0), - )?; + if unbond_handle.at(&start_epoch).is_empty(storage)? { + unbond_handle.remove_all(storage, &start_epoch)?; + } + if redelegated_unbonds.at(&start_epoch).is_empty(storage)? { + redelegated_unbonds.remove_all(storage, &start_epoch)?; } - - // TODO: should we clear some storage here as is done in Quint?? - // Possibly make the `unbonded` LazyMaps epoched so that it is done - // automatically? } - Ok(()) + // Transfer the withdrawable tokens from the PoS address back to the source + let staking_token = staking_token_address(storage); + token::transfer( + storage, + &staking_token, + &ADDRESS, + source, + withdrawable_amount, + )?; + + // TODO: Transfer the slashed tokens from the PoS address to the Slash Pool + // address + // token::transfer( + // storage, + // &staking_token, + // &ADDRESS, + // &SLASH_POOL_ADDRESS, + // total_slashed, + // )?; + + Ok(withdrawable_amount) } -/// Process a slash by (i) slashing the misbehaving validator; and (ii) any -/// validator to which it has redelegated some tokens and the slash misbehaving -/// epoch is within the redelegation slashing window. -/// -/// `validator` - the misbehaving validator. -/// `slash_rate` - the slash rate. -/// `slashed_amounts_map` - a map from validator address to a map from epoch to -/// already processed slash amounts. -/// -/// Adds any newly processed slash amount of any involved validator to -/// `slashed_amounts_map`. -// Quint `processSlash` -fn process_validator_slash( +/// Change the commission rate of a validator +pub fn change_validator_commission_rate( storage: &mut S, - params: &PosParams, validator: &Address, - slash_rate: Dec, + new_rate: Dec, current_epoch: Epoch, - slashed_amount_map: &mut EagerRedelegatedBondsMap, ) -> storage_api::Result<()> where S: StorageRead + StorageWrite, { - // `resultSlashValidator - let result_slash = slash_validator( - storage, - params, - validator, - slash_rate, - current_epoch, - &slashed_amount_map - .get(validator) - .cloned() - .unwrap_or_default(), - )?; - - // `updatedSlashedAmountMap` - let validator_slashes = - slashed_amount_map.entry(validator.clone()).or_default(); - *validator_slashes = result_slash; - - // `outgoingRedelegation` - let outgoing_redelegations = - validator_outgoing_redelegations_handle(validator); - - // Final loop in `processSlash` - let dest_validators = outgoing_redelegations - .iter(storage)? - .map(|res| { - let ( - NestedSubKey::Data { - key: dest_validator, - nested_sub_key: _, - }, - _redelegation, - ) = res?; - Ok(dest_validator) - }) - .collect::>>()?; - - for dest_validator in dest_validators { - let to_modify = slashed_amount_map - .entry(dest_validator.clone()) - .or_default(); - - tracing::debug!( - "Slashing {} redelegation to {}", - validator, - &dest_validator - ); + if new_rate.is_negative() { + return Err(CommissionRateChangeError::NegativeRate( + new_rate, + validator.clone(), + ) + .into()); + } - // `slashValidatorRedelegation` - slash_validator_redelegation( - storage, - params, - validator, - current_epoch, - &outgoing_redelegations.at(&dest_validator), - &validator_slashes_handle(validator), - &validator_total_redelegated_unbonded_handle(&dest_validator), - slash_rate, - to_modify, - )?; + if new_rate > Dec::one() { + return Err(CommissionRateChangeError::LargerThanOne( + new_rate, + validator.clone(), + ) + .into()); } - Ok(()) -} + let max_change = + read_validator_max_commission_rate_change(storage, validator)?; + if max_change.is_none() { + return Err(CommissionRateChangeError::NoMaxSetInStorage( + validator.clone(), + ) + .into()); + } -/// In the context of a redelegation, the function computes how much a validator -/// (the destination validator of the redelegation) should be slashed due to the -/// misbehaving of a second validator (the source validator of the -/// redelegation). The function computes how much the validator would be -/// slashed at all epochs between the current epoch (curEpoch) + 1 and the -/// current epoch + 1 + PIPELINE_OFFSET, accounting for any tokens of the -/// redelegation already unbonded. -/// -/// - `src_validator` - the source validator -/// - `outgoing_redelegations` - a map from pair of epochs to int that includes -/// all the redelegations from the source validator to the destination -/// validator. -/// - The outer key is epoch at which the bond started at the source -/// validator. -/// - The inner key is epoch at which the redelegation started (the epoch at -/// which was issued). -/// - `slashes` a list of slashes of the source validator. -/// - `dest_total_redelegated_unbonded` - a map of unbonded redelegated tokens -/// at the destination validator. -/// - `slash_rate` - the rate of the slash being processed. -/// - `dest_slashed_amounts` - a map from epoch to already processed slash -/// amounts. -/// -/// Adds any newly processed slash amount to `dest_slashed_amounts`. -#[allow(clippy::too_many_arguments)] -fn slash_validator_redelegation( - storage: &S, - params: &OwnedPosParams, - src_validator: &Address, - current_epoch: Epoch, - outgoing_redelegations: &NestedMap>, - slashes: &Slashes, - dest_total_redelegated_unbonded: &TotalRedelegatedUnbonded, - slash_rate: Dec, - dest_slashed_amounts: &mut BTreeMap, -) -> storage_api::Result<()> -where - S: StorageRead, -{ - let infraction_epoch = - current_epoch - params.slash_processing_epoch_offset(); + let params = read_pos_params(storage)?; + let commission_handle = validator_commission_rate_handle(validator); + let pipeline_epoch = current_epoch + params.pipeline_len; - for res in outgoing_redelegations.iter(storage)? { - let ( - NestedSubKey::Data { - key: bond_start, - nested_sub_key: SubKey::Data(redel_start), - }, - amount, - ) = res?; + let rate_at_pipeline = commission_handle + .get(storage, pipeline_epoch, ¶ms)? + .expect("Could not find a rate in given epoch"); + if new_rate == rate_at_pipeline { + return Ok(()); + } + let rate_before_pipeline = commission_handle + .get(storage, pipeline_epoch.prev(), ¶ms)? + .expect("Could not find a rate in given epoch"); - if params.in_redelegation_slashing_window( - infraction_epoch, - redel_start, - params.redelegation_end_epoch_from_start(redel_start), - ) && bond_start <= infraction_epoch - { - slash_redelegation( - storage, - params, - amount, - bond_start, - params.redelegation_end_epoch_from_start(redel_start), - src_validator, - current_epoch, - slashes, - dest_total_redelegated_unbonded, - slash_rate, - dest_slashed_amounts, - )?; - } + let change_from_prev = new_rate.abs_diff(&rate_before_pipeline); + if change_from_prev > max_change.unwrap() { + return Err(CommissionRateChangeError::RateChangeTooLarge( + change_from_prev, + validator.clone(), + ) + .into()); } - Ok(()) + commission_handle.set(storage, new_rate, current_epoch, params.pipeline_len) } -#[allow(clippy::too_many_arguments)] -fn slash_redelegation( +/// Get the total bond amount, including slashes, for a given bond ID and epoch. +/// Returns the bond amount after slashing. For future epochs the value is +/// subject to change. +pub fn bond_amount( storage: &S, - params: &OwnedPosParams, - amount: token::Amount, - bond_start: Epoch, - redel_bond_start: Epoch, - src_validator: &Address, - current_epoch: Epoch, - slashes: &Slashes, - total_redelegated_unbonded: &TotalRedelegatedUnbonded, - slash_rate: Dec, - slashed_amounts: &mut BTreeMap, -) -> storage_api::Result<()> + bond_id: &BondId, + epoch: Epoch, +) -> storage_api::Result where S: StorageRead, { - tracing::debug!( - "\nSlashing redelegation amount {} - bond start {} and \ - redel_bond_start {} - at rate {}\n", - amount.to_string_native(), - bond_start, - redel_bond_start, - slash_rate - ); + let params = read_pos_params(storage)?; + // Outer key is the start epoch used to calculate slashes. The inner + // keys are discarded after applying slashes. + let mut amounts: BTreeMap = BTreeMap::default(); - let infraction_epoch = - current_epoch - params.slash_processing_epoch_offset(); - - // Slash redelegation destination validator from the next epoch only - // as they won't be jailed - let set_update_epoch = current_epoch.next(); - - let mut init_tot_unbonded = - Epoch::iter_bounds_inclusive(infraction_epoch.next(), set_update_epoch) - .map(|epoch| { - let redelegated_unbonded = total_redelegated_unbonded - .at(&epoch) - .at(&redel_bond_start) - .at(src_validator) - .get(storage, &bond_start)? - .unwrap_or_default(); - Ok(redelegated_unbonded) - }) - .sum::>()?; + // Bonds + let bonds = + bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); + for next in bonds.iter(storage)? { + let (start, delta) = next?; + if start <= epoch { + let amount = amounts.entry(start).or_default(); + *amount += delta; + } + } - for epoch in Epoch::iter_range(set_update_epoch, params.pipeline_len) { - let updated_total_unbonded = { - let redelegated_unbonded = total_redelegated_unbonded - .at(&epoch) - .at(&redel_bond_start) - .at(src_validator) - .get(storage, &bond_start)? - .unwrap_or_default(); - init_tot_unbonded + redelegated_unbonded - }; + // Add unbonds that are still contributing to stake + let unbonds = unbond_handle(&bond_id.source, &bond_id.validator); + for next in unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: start, + nested_sub_key: SubKey::Data(withdrawable_epoch), + }, + delta, + ) = next?; + // This is the first epoch in which the unbond stops contributing to + // voting power + let end = withdrawable_epoch - params.withdrawable_epoch_offset() + + params.pipeline_len; - let list_slashes = slashes - .iter(storage)? - .map(Result::unwrap) - .filter(|slash| { - params.in_redelegation_slashing_window( - slash.epoch, - params.redelegation_start_epoch_from_end(redel_bond_start), - redel_bond_start, - ) && bond_start <= slash.epoch - && slash.epoch + params.slash_processing_epoch_offset() - // We're looking for slashes that were processed before or in the epoch - // in which slashes that are currently being processed - // occurred. Because we're slashing in the beginning of an - // epoch, we're also taking slashes that were processed in - // the infraction epoch as they would still be processed - // before any infraction occurred. - <= infraction_epoch - }) - .collect::>(); + if start <= epoch && end > epoch { + let amount = amounts.entry(start).or_default(); + *amount += delta; + } + } - let slashable_amount = amount - .checked_sub(updated_total_unbonded) - .unwrap_or_default(); + if bond_id.validator != bond_id.source { + // Add outgoing redelegations that are still contributing to the source + // validator's stake + let redelegated_bonds = + delegator_redelegated_bonds_handle(&bond_id.source); + for res in redelegated_bonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: _dest_validator, + nested_sub_key: + NestedSubKey::Data { + key: end, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + }, + delta, + ) = res?; + if src_validator == bond_id.validator + && start <= epoch + && end > epoch + { + let amount = amounts.entry(start).or_default(); + *amount += delta; + } + } - let slashed = - apply_list_slashes(params, &list_slashes, slashable_amount) - .mul_ceil(slash_rate); + // Add outgoing redelegation unbonds that are still contributing to + // the source validator's stake + let redelegated_unbonds = + delegator_redelegated_unbonds_handle(&bond_id.source); + for res in redelegated_unbonds.iter(storage)? { + let ( + NestedSubKey::Data { + key: _dest_validator, + nested_sub_key: + NestedSubKey::Data { + key: redelegation_epoch, + nested_sub_key: + NestedSubKey::Data { + key: _withdraw_epoch, + nested_sub_key: + NestedSubKey::Data { + key: src_validator, + nested_sub_key: SubKey::Data(start), + }, + }, + }, + }, + delta, + ) = res?; + if src_validator == bond_id.validator + // If the unbonded bond was redelegated after this epoch ... + && redelegation_epoch > epoch + // ... the start was before or at this epoch + && start <= epoch + { + let amount = amounts.entry(start).or_default(); + *amount += delta; + } + } + } - let list_slashes = slashes - .iter(storage)? - .map(Result::unwrap) - .filter(|slash| { - params.in_redelegation_slashing_window( - slash.epoch, - params.redelegation_start_epoch_from_end(redel_bond_start), - redel_bond_start, - ) && bond_start <= slash.epoch - }) - .collect::>(); + if !amounts.is_empty() { + let slashes = find_validator_slashes(storage, &bond_id.validator)?; - let slashable_stake = - apply_list_slashes(params, &list_slashes, slashable_amount) - .mul_ceil(slash_rate); + // Apply slashes + for (&start, amount) in amounts.iter_mut() { + let list_slashes = slashes + .iter() + .filter(|slash| { + let processing_epoch = + slash.epoch + params.slash_processing_epoch_offset(); + // Only use slashes that were processed before or at the + // epoch associated with the bond amount. This assumes + // that slashes are applied before inflation. + processing_epoch <= epoch && start <= slash.epoch + }) + .cloned() + .collect::>(); - init_tot_unbonded = updated_total_unbonded; - let to_slash = cmp::min(slashed, slashable_stake); - if !to_slash.is_zero() { - let map_value = slashed_amounts.entry(epoch).or_default(); - *map_value += to_slash; + *amount = apply_list_slashes(¶ms, &list_slashes, *amount); } } - Ok(()) + Ok(amounts.values().cloned().sum()) } -/// Computes for a given validator and a slash how much should be slashed at all -/// epochs between the currentÃ¥ epoch (curEpoch) + 1 and the current epoch + 1 + -/// PIPELINE_OFFSET, accounting for any tokens already unbonded. -/// -/// - `validator` - the misbehaving validator. -/// - `slash_rate` - the rate of the slash being processed. -/// - `slashed_amounts_map` - a map from epoch to already processed slash -/// amounts. -/// -/// Returns a map that adds any newly processed slash amount to -/// `slashed_amounts_map`. -// `def slashValidator` -fn slash_validator( +/// Get bond amounts within the `claim_start..=claim_end` epoch range for +/// claiming rewards for a given bond ID. Returns a map of bond amounts +/// associated with every epoch within the given epoch range (accumulative) in +/// which an amount contributed to the validator's stake. +/// This function will only consider slashes that were processed before or at +/// the epoch in which we're calculating the bond amount to correspond to the +/// validator stake that was used to calculate reward products (slashes do *not* +/// retrospectively affect the rewards calculated before slash processing). +pub fn bond_amounts_for_rewards( storage: &S, - params: &OwnedPosParams, - validator: &Address, - slash_rate: Dec, - current_epoch: Epoch, - slashed_amounts_map: &BTreeMap, + bond_id: &BondId, + claim_start: Epoch, + claim_end: Epoch, ) -> storage_api::Result> where S: StorageRead, { - tracing::debug!("Slashing validator {} at rate {}", validator, slash_rate); - let infraction_epoch = - current_epoch - params.slash_processing_epoch_offset(); - - let total_unbonded = total_unbonded_handle(validator); - let total_redelegated_unbonded = - validator_total_redelegated_unbonded_handle(validator); - let total_bonded = total_bonded_handle(validator); - let total_redelegated_bonded = - validator_total_redelegated_bonded_handle(validator); - - let mut slashed_amounts = slashed_amounts_map.clone(); + let params = read_pos_params(storage)?; + // Outer key is every epoch in which the a bond amount contributed to stake + // and the inner key is the start epoch used to calculate slashes. The inner + // keys are discarded after applying slashes. + let mut amounts: BTreeMap> = + BTreeMap::default(); - let mut tot_bonds = total_bonded - .get_data_handler() - .iter(storage)? - .map(Result::unwrap) - .filter(|&(epoch, bonded)| { - epoch <= infraction_epoch && bonded > 0.into() - }) - .collect::>(); - - let mut redelegated_bonds = tot_bonds - .keys() - .filter(|&epoch| { - !total_redelegated_bonded - .at(epoch) - .is_empty(storage) - .unwrap() - }) - .map(|epoch| { - let tot_redel_bonded = total_redelegated_bonded - .at(epoch) - .collect_map(storage) - .unwrap(); - (*epoch, tot_redel_bonded) - }) - .collect::>(); - - let mut sum = token::Amount::zero(); - - let eps = current_epoch - .iter_range(params.pipeline_len) - .collect::>(); - for epoch in eps.into_iter().rev() { - let amount = tot_bonds.iter().fold( - token::Amount::zero(), - |acc, (bond_start, bond_amount)| { - acc + compute_slash_bond_at_epoch( - storage, - params, - validator, - epoch, - infraction_epoch, - *bond_start, - *bond_amount, - redelegated_bonds.get(bond_start), - slash_rate, - ) - .unwrap() - }, - ); + // Only need to do bonds since rewwards are accumulated during + // `unbond_tokens` + let bonds = + bond_handle(&bond_id.source, &bond_id.validator).get_data_handler(); + for next in bonds.iter(storage)? { + let (start, delta) = next?; - let new_bonds = total_unbonded.at(&epoch); - tot_bonds = new_bonds - .collect_map(storage) - .unwrap() - .into_iter() - .filter(|(ep, _)| *ep <= infraction_epoch) - .collect::>(); - - let new_redelegated_bonds = tot_bonds - .keys() - .filter(|&ep| { - !total_redelegated_unbonded.at(ep).is_empty(storage).unwrap() - }) - .map(|ep| { - ( - *ep, - total_redelegated_unbonded - .at(&epoch) - .at(ep) - .collect_map(storage) - .unwrap(), - ) - }) - .collect::>(); + for ep in Epoch::iter_bounds_inclusive(claim_start, claim_end) { + // A bond that wasn't unbonded is added to all epochs up to + // `claim_end` + if start <= ep { + let amount = + amounts.entry(ep).or_default().entry(start).or_default(); + *amount += delta; + } + } + } - redelegated_bonds = new_redelegated_bonds; + if !amounts.is_empty() { + let slashes = find_validator_slashes(storage, &bond_id.validator)?; + let redelegated_bonded = + delegator_redelegated_bonds_handle(&bond_id.source) + .at(&bond_id.validator); - // `newSum` - sum += amount; + // Apply slashes + for (&ep, amounts) in amounts.iter_mut() { + for (&start, amount) in amounts.iter_mut() { + let list_slashes = slashes + .iter() + .filter(|slash| { + let processing_epoch = slash.epoch + + params.slash_processing_epoch_offset(); + // Only use slashes that were processed before or at the + // epoch associated with the bond amount. This assumes + // that slashes are applied before inflation. + processing_epoch <= ep && start <= slash.epoch + }) + .cloned() + .collect::>(); - // `newSlashesMap` - let cur = slashed_amounts.entry(epoch).or_default(); - *cur += sum; - } - // Hack - should this be done differently? (think this is safe) - let pipeline_epoch = current_epoch + params.pipeline_len; - let last_amt = slashed_amounts - .get(&pipeline_epoch.prev()) - .cloned() - .unwrap(); - slashed_amounts.insert(pipeline_epoch, last_amt); + let slash_epoch_filter = + |e: Epoch| e + params.slash_processing_epoch_offset() <= ep; - Ok(slashed_amounts) -} + let redelegated_bonds = + redelegated_bonded.at(&start).collect_map(storage)?; -/// Get the remaining token amount in a bond after applying a set of slashes. -/// -/// - `validator` - the bond's validator -/// - `epoch` - the latest slash epoch to consider. -/// - `start` - the start epoch of the bond -/// - `redelegated_bonds` -fn compute_bond_at_epoch( - storage: &S, - params: &OwnedPosParams, - validator: &Address, - epoch: Epoch, - start: Epoch, - amount: token::Amount, - redelegated_bonds: Option<&EagerRedelegatedBondsMap>, -) -> storage_api::Result -where - S: StorageRead, -{ - let list_slashes = validator_slashes_handle(validator) - .iter(storage)? - .map(Result::unwrap) - .filter(|slash| { - start <= slash.epoch - && slash.epoch + params.slash_processing_epoch_offset() <= epoch - }) - .collect::>(); + let result_fold = fold_and_slash_redelegated_bonds( + storage, + ¶ms, + &redelegated_bonds, + start, + &list_slashes, + slash_epoch_filter, + ); - let slash_epoch_filter = - |e: Epoch| e + params.slash_processing_epoch_offset() <= epoch; + let total_not_redelegated = + *amount - result_fold.total_redelegated; - let result_fold = redelegated_bonds - .map(|redelegated_bonds| { - fold_and_slash_redelegated_bonds( - storage, - params, - redelegated_bonds, - start, - &list_slashes, - slash_epoch_filter, - ) - }) - .unwrap_or_default(); + let after_not_redelegated = apply_list_slashes( + ¶ms, + &list_slashes, + total_not_redelegated, + ); - let total_not_redelegated = amount - result_fold.total_redelegated; - let after_not_redelegated = - apply_list_slashes(params, &list_slashes, total_not_redelegated); + *amount = + after_not_redelegated + result_fold.total_after_slashing; + } + } + } - Ok(after_not_redelegated + result_fold.total_after_slashing) + Ok(amounts + .into_iter() + // Flatten the inner maps to discard bond start epochs + .map(|(ep, amounts)| (ep, amounts.values().cloned().sum())) + .collect()) } -/// Uses `fn compute_bond_at_epoch` to compute the token amount to slash in -/// order to prevent overslashing. -#[allow(clippy::too_many_arguments)] -fn compute_slash_bond_at_epoch( +/// Get the genesis consensus validators stake and consensus key for Tendermint, +/// converted from [`ValidatorSetUpdate`]s using the given function. +pub fn genesis_validator_set_tendermint( storage: &S, - params: &OwnedPosParams, - validator: &Address, - epoch: Epoch, - infraction_epoch: Epoch, - bond_start: Epoch, - bond_amount: token::Amount, - redelegated_bonds: Option<&EagerRedelegatedBondsMap>, - slash_rate: Dec, -) -> storage_api::Result + params: &PosParams, + current_epoch: Epoch, + mut f: impl FnMut(ValidatorSetUpdate) -> T, +) -> storage_api::Result> where S: StorageRead, { - let amount_due = compute_bond_at_epoch( - storage, - params, - validator, - infraction_epoch, - bond_start, - bond_amount, - redelegated_bonds, - )? - .mul_ceil(slash_rate); - let slashable_amount = compute_bond_at_epoch( - storage, - params, - validator, - epoch, - bond_start, - bond_amount, - redelegated_bonds, - )?; - Ok(cmp::min(amount_due, slashable_amount)) + let consensus_validator_handle = + consensus_validator_set_handle().at(¤t_epoch); + let iter = consensus_validator_handle.iter(storage)?; + + iter.map(|validator| { + let ( + NestedSubKey::Data { + key: new_stake, + nested_sub_key: _, + }, + address, + ) = validator?; + let consensus_key = validator_consensus_key_handle(&address) + .get(storage, current_epoch, params)? + .unwrap(); + let converted = f(ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key, + bonded_stake: new_stake, + })); + Ok(converted) + }) + .collect() } /// Unjail a validator that is currently jailed. @@ -5240,36 +1922,11 @@ pub fn get_total_consensus_stake( where S: StorageRead, { - total_consensus_stake_key_handle() + total_consensus_stake_handle() .get(storage, epoch, params) .map(|o| o.expect("Total consensus stake could not be retrieved.")) } -/// Find slashes applicable to a validator with inclusive `start` and exclusive -/// `end` epoch. -#[allow(dead_code)] -fn find_slashes_in_range( - storage: &S, - start: Epoch, - end: Option, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - let mut slashes = BTreeMap::::new(); - for slash in validator_slashes_handle(validator).iter(storage)? { - let slash = slash?; - if start <= slash.epoch - && end.map(|end| slash.epoch < end).unwrap_or(true) - { - let cur_rate = slashes.entry(slash.epoch).or_default(); - *cur_rate = cmp::min(*cur_rate + slash.rate, Dec::one()); - } - } - Ok(slashes) -} - /// Redelegate bonded tokens from a source validator to a destination validator pub fn redelegate_tokens( storage: &mut S, @@ -5504,35 +2161,32 @@ where } }; - let pipeline_stake = - read_validator_stake(storage, ¶ms, validator, pipeline_epoch)?; - // Remove the validator from the validator set. If it is in the consensus // set, promote the next validator. match pipeline_state { - ValidatorState::Consensus => deactivate_consensus_validator( - storage, - validator, - pipeline_epoch, - pipeline_stake, - )?, + ValidatorState::Consensus => { + // Remove from the consensus set first + remove_consensus_validator( + storage, + ¶ms, + pipeline_epoch, + validator, + )?; + + // Promote the next below-capacity validator to consensus + promote_next_below_capacity_validator_to_consensus( + storage, + pipeline_epoch, + )?; + } ValidatorState::BelowCapacity => { - let below_capacity_set = below_capacity_validator_set_handle() - .at(&pipeline_epoch) - .at(&pipeline_stake.into()); - // TODO: handle the unwrap better here - let val_position = validator_set_positions_handle() - .at(&pipeline_epoch) - .get(storage, validator)? - .unwrap(); - let removed = below_capacity_set.remove(storage, &val_position)?; - debug_assert_eq!(removed, Some(validator.clone())); - - // Remove position - validator_set_positions_handle() - .at(&pipeline_epoch) - .remove(storage, validator)?; + remove_below_capacity_validator( + storage, + ¶ms, + pipeline_epoch, + validator, + )?; } ValidatorState::BelowThreshold => {} ValidatorState::Inactive => { @@ -5562,65 +2216,6 @@ where Ok(()) } -fn deactivate_consensus_validator( - storage: &mut S, - - validator: &Address, - target_epoch: Epoch, - stake: token::Amount, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let consensus_set = consensus_validator_set_handle() - .at(&target_epoch) - .at(&stake); - // TODO: handle the unwrap better here - let val_position = validator_set_positions_handle() - .at(&target_epoch) - .get(storage, validator)? - .unwrap(); - let removed = consensus_set.remove(storage, &val_position)?; - debug_assert_eq!(removed, Some(validator.clone())); - - // Remove position - validator_set_positions_handle() - .at(&target_epoch) - .remove(storage, validator)?; - - // Now promote the next below-capacity validator to the consensus - // set - let below_cap_set = below_capacity_validator_set_handle().at(&target_epoch); - let max_below_capacity_validator_amount = - get_max_below_capacity_validator_amount(&below_cap_set, storage)?; - - if let Some(max_bc_amount) = max_below_capacity_validator_amount { - let below_cap_vals_max = below_cap_set.at(&max_bc_amount.into()); - let lowest_position = - find_first_position(&below_cap_vals_max, storage)?.unwrap(); - let removed_max_below_capacity = below_cap_vals_max - .remove(storage, &lowest_position)? - .expect("Must have been removed"); - - insert_validator_into_set( - &consensus_validator_set_handle() - .at(&target_epoch) - .at(&max_bc_amount), - storage, - &target_epoch, - &removed_max_below_capacity, - )?; - validator_state_handle(&removed_max_below_capacity).set( - storage, - ValidatorState::Consensus, - target_epoch, - 0, - )?; - } - - Ok(()) -} - /// Re-activate an inactive validator pub fn reactivate_validator( storage: &mut S, @@ -5867,6 +2462,7 @@ pub mod test_utils { use super::*; use crate::parameters::PosParams; + use crate::storage::read_non_pos_owned_params; use crate::types::GenesisValidator; /// Helper function to initialize storage with PoS data @@ -5945,152 +2541,12 @@ pub mod test_utils { { let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); gov_params.init_storage(storage)?; - let params = crate::read_non_pos_owned_params(storage, owned)?; + let params = read_non_pos_owned_params(storage, owned)?; init_genesis_helper(storage, ¶ms, validators, current_epoch)?; Ok(params) } } -/// Read PoS validator's email. -pub fn read_validator_email( - storage: &S, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - storage.read(&validator_email_key(validator)) -} - -/// Write PoS validator's email. The email cannot be removed, so an empty string -/// will result in an error. -pub fn write_validator_email( - storage: &mut S, - validator: &Address, - email: &String, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = validator_email_key(validator); - if email.is_empty() { - Err(MetadataError::CannotRemoveEmail.into()) - } else { - storage.write(&key, email) - } -} - -/// Read PoS validator's description. -pub fn read_validator_description( - storage: &S, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - storage.read(&validator_description_key(validator)) -} - -/// Write PoS validator's description. If the provided arg is an empty string, -/// remove the data. -pub fn write_validator_description( - storage: &mut S, - validator: &Address, - description: &String, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = validator_description_key(validator); - if description.is_empty() { - storage.delete(&key) - } else { - storage.write(&key, description) - } -} - -/// Read PoS validator's website. -pub fn read_validator_website( - storage: &S, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - storage.read(&validator_website_key(validator)) -} - -/// Write PoS validator's website. If the provided arg is an empty string, -/// remove the data. -pub fn write_validator_website( - storage: &mut S, - validator: &Address, - website: &String, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = validator_website_key(validator); - if website.is_empty() { - storage.delete(&key) - } else { - storage.write(&key, website) - } -} - -/// Read PoS validator's discord handle. -pub fn read_validator_discord_handle( - storage: &S, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - storage.read(&validator_discord_key(validator)) -} - -/// Write PoS validator's discord handle. If the provided arg is an empty -/// string, remove the data. -pub fn write_validator_discord_handle( - storage: &mut S, - validator: &Address, - discord_handle: &String, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = validator_discord_key(validator); - if discord_handle.is_empty() { - storage.delete(&key) - } else { - storage.write(&key, discord_handle) - } -} - -/// Write validator's metadata. -pub fn write_validator_metadata( - storage: &mut S, - validator: &Address, - metadata: &ValidatorMetaData, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - // Email is the only required field in the metadata - write_validator_email(storage, validator, &metadata.email)?; - - if let Some(description) = metadata.description.as_ref() { - write_validator_description(storage, validator, description)?; - } - if let Some(website) = metadata.website.as_ref() { - write_validator_website(storage, validator, website)?; - } - if let Some(discord) = metadata.discord_handle.as_ref() { - write_validator_discord_handle(storage, validator, discord)?; - } - Ok(()) -} - /// Change validator's metadata. In addition to changing any of the data from /// [`ValidatorMetaData`], the validator's commission rate can be changed within /// here as well. @@ -6131,63 +2587,6 @@ where Ok(()) } -/// Compute the current available rewards amount due only to existing bonds. -/// This does not include pending rewards held in the rewards counter due to -/// unbonds and redelegations. -pub fn compute_current_rewards_from_bonds( - storage: &S, - source: &Address, - validator: &Address, - current_epoch: Epoch, -) -> storage_api::Result -where - S: StorageRead, -{ - if current_epoch == Epoch::default() { - // Nothing to claim in the first epoch - return Ok(token::Amount::zero()); - } - - let last_claim_epoch = - get_last_reward_claim_epoch(storage, source, validator)?; - if let Some(last_epoch) = last_claim_epoch { - if last_epoch == current_epoch { - // Already claimed in this epoch - return Ok(token::Amount::zero()); - } - } - - let mut reward_tokens = token::Amount::zero(); - - // Want to claim from `last_claim_epoch` to `current_epoch.prev()` since - // rewards are computed at the end of an epoch - let (claim_start, claim_end) = ( - last_claim_epoch.unwrap_or_default(), - // Safe because of the check above - current_epoch.prev(), - ); - let bond_amounts = bond_amounts_for_rewards( - storage, - &BondId { - source: source.clone(), - validator: validator.clone(), - }, - claim_start, - claim_end, - )?; - - let rewards_products = validator_rewards_products_handle(validator); - for (ep, bond_amount) in bond_amounts { - debug_assert!(ep >= claim_start); - debug_assert!(ep <= claim_end); - let rp = rewards_products.get(storage, &ep)?.unwrap_or_default(); - let reward = rp * bond_amount; - reward_tokens += reward; - } - - Ok(reward_tokens) -} - /// Claim available rewards, triggering an immediate transfer of tokens from the /// PoS account to the source address. pub fn claim_reward_tokens( @@ -6248,77 +2647,6 @@ where Ok(rewards_from_bonds + rewards_from_counter) } -/// Get the last epoch in which rewards were claimed from storage, if any -pub fn get_last_reward_claim_epoch( - storage: &S, - delegator: &Address, - validator: &Address, -) -> storage_api::Result> -where - S: StorageRead, -{ - let key = last_pos_reward_claim_epoch_key(delegator, validator); - storage.read(&key) -} - -fn write_last_reward_claim_epoch( - storage: &mut S, - delegator: &Address, - validator: &Address, - epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = last_pos_reward_claim_epoch_key(delegator, validator); - storage.write(&key, epoch) -} - -/// Read the current token value in the rewards counter. -fn read_rewards_counter( - storage: &S, - source: &Address, - validator: &Address, -) -> storage_api::Result -where - S: StorageRead, -{ - let key = rewards_counter_key(source, validator); - Ok(storage.read::(&key)?.unwrap_or_default()) -} - -/// Add tokens to a rewards counter. -fn add_rewards_to_counter( - storage: &mut S, - source: &Address, - validator: &Address, - new_rewards: token::Amount, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - let key = rewards_counter_key(source, validator); - let current_rewards = - storage.read::(&key)?.unwrap_or_default(); - storage.write(&key, current_rewards + new_rewards) -} - -/// Take tokens from a rewards counter. Deletes the record after reading. -fn take_rewards_from_counter( - storage: &mut S, - source: &Address, - validator: &Address, -) -> storage_api::Result -where - S: StorageRead + StorageWrite, -{ - let key = rewards_counter_key(source, validator); - let current_rewards = - storage.read::(&key)?.unwrap_or_default(); - storage.delete(&key)?; - Ok(current_rewards) -} - /// Jail a validator by removing it from and updating the validator sets and /// changing a its state to `Jailed`. Validators are jailed for liveness and for /// misbehaving. @@ -6353,61 +2681,15 @@ where "Removing validator from the consensus set in epoch {}", epoch ); - let amount_pre = - read_validator_stake(storage, params, validator, epoch)?; - let val_position = validator_set_positions_handle() - .at(&epoch) - .get(storage, validator)? - .expect("Could not find validator's position in storage."); - let _ = consensus_validator_set_handle() - .at(&epoch) - .at(&amount_pre) - .remove(storage, &val_position)?; - validator_set_positions_handle() - .at(&epoch) - .remove(storage, validator)?; + remove_consensus_validator(storage, params, epoch, validator)?; // For the pipeline epoch only: // promote the next max inactive validator to the active // validator set at the pipeline offset if epoch == pipeline_epoch { - let below_capacity_handle = - below_capacity_validator_set_handle().at(&epoch); - let max_below_capacity_amount = - get_max_below_capacity_validator_amount( - &below_capacity_handle, - storage, - )?; - if let Some(max_below_capacity_amount) = - max_below_capacity_amount - { - let position_to_promote = find_first_position( - &below_capacity_handle - .at(&max_below_capacity_amount.into()), - storage, - )? - .expect("Should return a position."); - let max_bc_validator = below_capacity_handle - .at(&max_below_capacity_amount.into()) - .remove(storage, &position_to_promote)? - .expect( - "Should have returned a removed validator.", - ); - insert_validator_into_set( - &consensus_validator_set_handle() - .at(&epoch) - .at(&max_below_capacity_amount), - storage, - &epoch, - &max_bc_validator, - )?; - validator_state_handle(&max_bc_validator).set( - storage, - ValidatorState::Consensus, - current_epoch, - params.pipeline_len, - )?; - } + promote_next_below_capacity_validator_to_consensus( + storage, epoch, + )?; } } ValidatorState::BelowCapacity => { @@ -6416,22 +2698,9 @@ where {}", epoch ); - - let amount_pre = validator_deltas_handle(validator) - .get_sum(storage, epoch, params)? - .unwrap_or_default(); - debug_assert!(amount_pre.non_negative()); - let val_position = validator_set_positions_handle() - .at(&epoch) - .get(storage, validator)? - .expect("Could not find validator's position in storage."); - let _ = below_capacity_validator_set_handle() - .at(&epoch) - .at(&token::Amount::from_change(amount_pre).into()) - .remove(storage, &val_position)?; - validator_set_positions_handle() - .at(&epoch) - .remove(storage, validator)?; + remove_below_capacity_validator( + storage, params, epoch, validator, + )?; } ValidatorState::BelowThreshold => { tracing::debug!( diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index ecacdde206..cb963d9e36 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -1,5 +1,7 @@ //! Proof-of-Stake system parameters +use std::str::FromStr; + use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::ledger::governance::parameters::GovernanceParameters; use namada_core::types::dec::Dec; @@ -64,6 +66,10 @@ pub struct OwnedPosParams { /// The minimum required activity of consesus validators, in percentage, /// over the `liveness_window_check` pub liveness_threshold: Dec, + /// PoS gain p (read only) + pub rewards_gain_p: Dec, + /// PoS gain d (read only) + pub rewards_gain_d: Dec, } impl Default for PosParams { @@ -101,6 +107,8 @@ impl Default for OwnedPosParams { validator_stake_threshold: token::Amount::native_whole(1_u64), liveness_window_check: 10_000, liveness_threshold: Dec::new(9, 1).expect("Test failed"), + rewards_gain_p: Dec::from_str("0.25").expect("Test failed"), + rewards_gain_d: Dec::from_str("0.25").expect("Test failed"), } } } diff --git a/proof_of_stake/src/pos_queries.rs b/proof_of_stake/src/pos_queries.rs index c0d0fbaf28..6effbcfc51 100644 --- a/proof_of_stake/src/pos_queries.rs +++ b/proof_of_stake/src/pos_queries.rs @@ -11,11 +11,12 @@ use namada_core::types::storage::{BlockHeight, Epoch}; use namada_core::types::{key, token}; use thiserror::Error; +use crate::storage::find_validator_by_raw_hash; use crate::types::WeightedValidator; use crate::{ - consensus_validator_set_handle, find_validator_by_raw_hash, - get_total_consensus_stake, read_pos_params, validator_eth_cold_key_handle, - validator_eth_hot_key_handle, ConsensusValidatorSet, PosParams, + consensus_validator_set_handle, get_total_consensus_stake, read_pos_params, + validator_eth_cold_key_handle, validator_eth_hot_key_handle, + ConsensusValidatorSet, PosParams, }; /// Errors returned by [`PosQueries`] operations. diff --git a/proof_of_stake/src/queries.rs b/proof_of_stake/src/queries.rs new file mode 100644 index 0000000000..137d5fdf7a --- /dev/null +++ b/proof_of_stake/src/queries.rs @@ -0,0 +1,457 @@ +//! Queriezzz + +use std::cmp; +use std::collections::{BTreeMap, HashMap, HashSet}; + +use borsh::BorshDeserialize; +use namada_core::ledger::storage_api::collections::lazy_map::{ + NestedSubKey, SubKey, +}; +use namada_core::ledger::storage_api::{self, StorageRead}; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::storage::Epoch; +use namada_core::types::token; + +use crate::slashing::{find_validator_slashes, get_slashed_amount}; +use crate::storage::{bond_handle, read_pos_params, unbond_handle}; +use crate::types::{ + BondDetails, BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, Slash, + UnbondDetails, +}; +use crate::{storage_key, PosParams}; + +/// Find all validators to which a given bond `owner` (or source) has a +/// delegation +pub fn find_delegation_validators( + storage: &S, + owner: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + let bonds_prefix = storage_key::bonds_for_source_prefix(owner); + let mut delegations: HashSet
= HashSet::new(); + + for iter_result in storage_api::iter_prefix_bytes(storage, &bonds_prefix)? { + let (key, _bond_bytes) = iter_result?; + let validator_address = storage_key::get_validator_address_from_bond( + &key, + ) + .ok_or_else(|| { + storage_api::Error::new_const( + "Delegation key should contain validator address.", + ) + })?; + delegations.insert(validator_address); + } + Ok(delegations) +} + +/// Find all validators to which a given bond `owner` (or source) has a +/// delegation with the amount +pub fn find_delegations( + storage: &S, + owner: &Address, + epoch: &Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + let bonds_prefix = storage_key::bonds_for_source_prefix(owner); + let params = read_pos_params(storage)?; + let mut delegations: HashMap = HashMap::new(); + + for iter_result in storage_api::iter_prefix_bytes(storage, &bonds_prefix)? { + let (key, _bond_bytes) = iter_result?; + let validator_address = storage_key::get_validator_address_from_bond( + &key, + ) + .ok_or_else(|| { + storage_api::Error::new_const( + "Delegation key should contain validator address.", + ) + })?; + let deltas_sum = bond_handle(owner, &validator_address) + .get_sum(storage, *epoch, ¶ms)? + .unwrap_or_default(); + delegations.insert(validator_address, deltas_sum); + } + Ok(delegations) +} + +/// Find if the given source address has any bonds. +pub fn has_bonds(storage: &S, source: &Address) -> storage_api::Result +where + S: StorageRead, +{ + let max_epoch = Epoch(u64::MAX); + let delegations = find_delegations(storage, source, &max_epoch)?; + Ok(!delegations + .values() + .cloned() + .sum::() + .is_zero()) +} + +/// Find raw bond deltas for the given source and validator address. +pub fn find_bonds( + storage: &S, + source: &Address, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + bond_handle(source, validator) + .get_data_handler() + .iter(storage)? + .collect() +} + +/// Find raw unbond deltas for the given source and validator address. +pub fn find_unbonds( + storage: &S, + source: &Address, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + unbond_handle(source, validator) + .iter(storage)? + .map(|next_result| { + let ( + NestedSubKey::Data { + key: start_epoch, + nested_sub_key: SubKey::Data(withdraw_epoch), + }, + amount, + ) = next_result?; + Ok(((start_epoch, withdraw_epoch), amount)) + }) + .collect() +} + +/// Collect the details of all bonds and unbonds that match the source and +/// validator arguments. If either source or validator is `None`, then grab the +/// information for all sources or validators, respectively. +pub fn bonds_and_unbonds( + storage: &S, + source: Option
, + validator: Option
, +) -> storage_api::Result +where + S: StorageRead, +{ + let params = read_pos_params(storage)?; + + match (source.clone(), validator.clone()) { + (Some(source), Some(validator)) => { + find_bonds_and_unbonds_details(storage, ¶ms, source, validator) + } + _ => { + get_multiple_bonds_and_unbonds(storage, ¶ms, source, validator) + } + } +} + +fn get_multiple_bonds_and_unbonds( + storage: &S, + params: &PosParams, + source: Option
, + validator: Option
, +) -> storage_api::Result +where + S: StorageRead, +{ + debug_assert!( + source.is_none() || validator.is_none(), + "Use `find_bonds_and_unbonds_details` when full bond ID is known" + ); + let mut slashes_cache = HashMap::>::new(); + // Applied slashes grouped by validator address + let mut applied_slashes = HashMap::>::new(); + + // TODO: if validator is `Some`, look-up all its bond owners (including + // self-bond, if any) first + + let prefix = match source.as_ref() { + Some(source) => storage_key::bonds_for_source_prefix(source), + None => storage_key::bonds_prefix(), + }; + // We have to iterate raw bytes, cause the epoched data `last_update` field + // gets matched here too + let mut raw_bonds = storage_api::iter_prefix_bytes(storage, &prefix)? + .filter_map(|result| { + if let Ok((key, val_bytes)) = result { + if let Some((bond_id, start)) = storage_key::is_bond_key(&key) { + if source.is_some() + && source.as_ref().unwrap() != &bond_id.source + { + return None; + } + if validator.is_some() + && validator.as_ref().unwrap() != &bond_id.validator + { + return None; + } + let change: token::Amount = + BorshDeserialize::try_from_slice(&val_bytes).ok()?; + if change.is_zero() { + return None; + } + return Some((bond_id, start, change)); + } + } + None + }); + + let prefix = match source.as_ref() { + Some(source) => storage_key::unbonds_for_source_prefix(source), + None => storage_key::unbonds_prefix(), + }; + let mut raw_unbonds = storage_api::iter_prefix_bytes(storage, &prefix)? + .filter_map(|result| { + if let Ok((key, val_bytes)) = result { + if let Some((bond_id, start, withdraw)) = + storage_key::is_unbond_key(&key) + { + if source.is_some() + && source.as_ref().unwrap() != &bond_id.source + { + return None; + } + if validator.is_some() + && validator.as_ref().unwrap() != &bond_id.validator + { + return None; + } + match (source.clone(), validator.clone()) { + (None, Some(validator)) => { + if bond_id.validator != validator { + return None; + } + } + (Some(owner), None) => { + if owner != bond_id.source { + return None; + } + } + _ => {} + } + let amount: token::Amount = + BorshDeserialize::try_from_slice(&val_bytes).ok()?; + return Some((bond_id, start, withdraw, amount)); + } + } + None + }); + + let mut bonds_and_unbonds = + HashMap::, Vec)>::new(); + + raw_bonds.try_for_each(|(bond_id, start, change)| { + if !slashes_cache.contains_key(&bond_id.validator) { + let slashes = find_validator_slashes(storage, &bond_id.validator)?; + slashes_cache.insert(bond_id.validator.clone(), slashes); + } + let slashes = slashes_cache + .get(&bond_id.validator) + .expect("We must have inserted it if it's not cached already"); + let validator = bond_id.validator.clone(); + let (bonds, _unbonds) = bonds_and_unbonds.entry(bond_id).or_default(); + bonds.push(make_bond_details( + params, + &validator, + change, + start, + slashes, + &mut applied_slashes, + )); + Ok::<_, storage_api::Error>(()) + })?; + + raw_unbonds.try_for_each(|(bond_id, start, withdraw, amount)| { + if !slashes_cache.contains_key(&bond_id.validator) { + let slashes = find_validator_slashes(storage, &bond_id.validator)?; + slashes_cache.insert(bond_id.validator.clone(), slashes); + } + let slashes = slashes_cache + .get(&bond_id.validator) + .expect("We must have inserted it if it's not cached already"); + let validator = bond_id.validator.clone(); + let (_bonds, unbonds) = bonds_and_unbonds.entry(bond_id).or_default(); + unbonds.push(make_unbond_details( + params, + &validator, + amount, + (start, withdraw), + slashes, + &mut applied_slashes, + )); + Ok::<_, storage_api::Error>(()) + })?; + + Ok(bonds_and_unbonds + .into_iter() + .map(|(bond_id, (bonds, unbonds))| { + let details = BondsAndUnbondsDetail { + bonds, + unbonds, + slashes: applied_slashes + .get(&bond_id.validator) + .cloned() + .unwrap_or_default(), + }; + (bond_id, details) + }) + .collect()) +} + +fn find_bonds_and_unbonds_details( + storage: &S, + params: &PosParams, + source: Address, + validator: Address, +) -> storage_api::Result +where + S: StorageRead, +{ + let slashes = find_validator_slashes(storage, &validator)?; + let mut applied_slashes = HashMap::>::new(); + + let bonds = find_bonds(storage, &source, &validator)? + .into_iter() + .filter(|(_start, amount)| *amount > token::Amount::zero()) + .map(|(start, amount)| { + make_bond_details( + params, + &validator, + amount, + start, + &slashes, + &mut applied_slashes, + ) + }) + .collect(); + + let unbonds = find_unbonds(storage, &source, &validator)? + .into_iter() + .map(|(epoch_range, change)| { + make_unbond_details( + params, + &validator, + change, + epoch_range, + &slashes, + &mut applied_slashes, + ) + }) + .collect(); + + let details = BondsAndUnbondsDetail { + bonds, + unbonds, + slashes: applied_slashes.get(&validator).cloned().unwrap_or_default(), + }; + let bond_id = BondId { source, validator }; + Ok(HashMap::from_iter([(bond_id, details)])) +} + +fn make_bond_details( + params: &PosParams, + validator: &Address, + deltas_sum: token::Amount, + start: Epoch, + slashes: &[Slash], + applied_slashes: &mut HashMap>, +) -> BondDetails { + let prev_applied_slashes = applied_slashes + .clone() + .get(validator) + .cloned() + .unwrap_or_default(); + + let mut slash_rates_by_epoch = BTreeMap::::new(); + + let validator_slashes = + applied_slashes.entry(validator.clone()).or_default(); + for slash in slashes { + if slash.epoch >= start { + let cur_rate = slash_rates_by_epoch.entry(slash.epoch).or_default(); + *cur_rate = cmp::min(Dec::one(), *cur_rate + slash.rate); + + if !prev_applied_slashes.iter().any(|s| s == slash) { + validator_slashes.push(slash.clone()); + } + } + } + + let slashed_amount = if slash_rates_by_epoch.is_empty() { + None + } else { + let amount_after_slashing = + get_slashed_amount(params, deltas_sum, &slash_rates_by_epoch) + .unwrap(); + Some(deltas_sum - amount_after_slashing) + }; + + BondDetails { + start, + amount: deltas_sum, + slashed_amount, + } +} + +fn make_unbond_details( + params: &PosParams, + validator: &Address, + amount: token::Amount, + (start, withdraw): (Epoch, Epoch), + slashes: &[Slash], + applied_slashes: &mut HashMap>, +) -> UnbondDetails { + let prev_applied_slashes = applied_slashes + .clone() + .get(validator) + .cloned() + .unwrap_or_default(); + let mut slash_rates_by_epoch = BTreeMap::::new(); + + let validator_slashes = + applied_slashes.entry(validator.clone()).or_default(); + for slash in slashes { + if slash.epoch >= start + && slash.epoch + < withdraw + .checked_sub( + params.unbonding_len + + params.cubic_slashing_window_length, + ) + .unwrap_or_default() + { + let cur_rate = slash_rates_by_epoch.entry(slash.epoch).or_default(); + *cur_rate = cmp::min(Dec::one(), *cur_rate + slash.rate); + + if !prev_applied_slashes.iter().any(|s| s == slash) { + validator_slashes.push(slash.clone()); + } + } + } + + let slashed_amount = if slash_rates_by_epoch.is_empty() { + None + } else { + let amount_after_slashing = + get_slashed_amount(params, amount, &slash_rates_by_epoch).unwrap(); + Some(amount - amount_after_slashing) + }; + + UnbondDetails { + start, + withdraw, + amount, + slashed_amount, + } +} diff --git a/proof_of_stake/src/rewards.rs b/proof_of_stake/src/rewards.rs index 449c8a9867..880e7404c4 100644 --- a/proof_of_stake/src/rewards.rs +++ b/proof_of_stake/src/rewards.rs @@ -1,10 +1,33 @@ //! PoS rewards distribution. +use std::collections::{HashMap, HashSet}; + +use namada_core::ledger::inflation; +use namada_core::ledger::parameters::storage as params_storage; +use namada_core::ledger::storage_api::collections::lazy_map::NestedSubKey; +use namada_core::ledger::storage_api::token::credit_tokens; +use namada_core::ledger::storage_api::{ + self, ResultExt, StorageRead, StorageWrite, +}; +use namada_core::types::address::{self, Address}; use namada_core::types::dec::Dec; -use namada_core::types::token::Amount; +use namada_core::types::storage::Epoch; +use namada_core::types::token::{self, Amount}; use namada_core::types::uint::{Uint, I256}; use thiserror::Error; +use crate::storage::{ + consensus_validator_set_handle, get_last_reward_claim_epoch, + read_pos_params, read_total_stake, read_validator_stake, + rewards_accumulator_handle, validator_commission_rate_handle, + validator_rewards_products_handle, validator_state_handle, +}; +use crate::types::{into_tm_voting_power, BondId, ValidatorState, VoteInfo}; +use crate::{ + bond_amounts_for_rewards, get_total_consensus_stake, staking_token_address, + storage_key, InflationError, PosParams, +}; + /// This is equal to 0.01. const MIN_PROPOSER_REWARD: Dec = Dec(I256(Uint([10000000000u64, 0u64, 0u64, 0u64]))); @@ -99,3 +122,429 @@ impl PosRewardsCalculator { / 3u64 } } + +/// Tally a running sum of the fraction of rewards owed to each validator in +/// the consensus set. This is used to keep track of the rewards due to each +/// consensus validator over the lifetime of an epoch. +pub fn log_block_rewards( + storage: &mut S, + epoch: impl Into, + proposer_address: &Address, + votes: Vec, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // The votes correspond to the last committed block (n-1 if we are + // finalizing block n) + + let epoch: Epoch = epoch.into(); + let params = read_pos_params(storage)?; + let consensus_validators = consensus_validator_set_handle().at(&epoch); + + // Get total stake of the consensus validator set + let total_consensus_stake = + get_total_consensus_stake(storage, epoch, ¶ms)?; + + // Get set of signing validator addresses and the combined stake of + // these signers + let mut signer_set: HashSet
= HashSet::new(); + let mut total_signing_stake = token::Amount::zero(); + for VoteInfo { + validator_address, + validator_vp, + } in votes + { + if validator_vp == 0 { + continue; + } + // Ensure that the validator is not currently jailed or other + let state = validator_state_handle(&validator_address) + .get(storage, epoch, ¶ms)?; + if state != Some(ValidatorState::Consensus) { + return Err(InflationError::ExpectedValidatorInConsensus( + validator_address, + state, + )) + .into_storage_result(); + } + + let stake_from_deltas = + read_validator_stake(storage, ¶ms, &validator_address, epoch)?; + + // Ensure TM stake updates properly with a debug_assert + if cfg!(debug_assertions) { + debug_assert_eq!( + into_tm_voting_power( + params.tm_votes_per_token, + stake_from_deltas, + ), + i64::try_from(validator_vp).unwrap_or_default(), + ); + } + + signer_set.insert(validator_address); + total_signing_stake += stake_from_deltas; + } + + // Get the block rewards coefficients (proposing, signing/voting, + // consensus set status) + let rewards_calculator = PosRewardsCalculator { + proposer_reward: params.block_proposer_reward, + signer_reward: params.block_vote_reward, + signing_stake: total_signing_stake, + total_stake: total_consensus_stake, + }; + let coeffs = rewards_calculator + .get_reward_coeffs() + .map_err(InflationError::Rewards) + .into_storage_result()?; + tracing::debug!( + "PoS rewards coefficients {coeffs:?}, inputs: {rewards_calculator:?}." + ); + + // tracing::debug!( + // "TOTAL SIGNING STAKE (LOGGING BLOCK REWARDS) = {}", + // signing_stake + // ); + + // Compute the fractional block rewards for each consensus validator and + // update the reward accumulators + let consensus_stake_unscaled: Dec = total_consensus_stake.into(); + let signing_stake_unscaled: Dec = total_signing_stake.into(); + let mut values: HashMap = HashMap::new(); + for validator in consensus_validators.iter(storage)? { + let ( + NestedSubKey::Data { + key: stake, + nested_sub_key: _, + }, + address, + ) = validator?; + + if stake.is_zero() { + continue; + } + + let mut rewards_frac = Dec::zero(); + let stake_unscaled: Dec = stake.into(); + // tracing::debug!( + // "NAMADA VALIDATOR STAKE (LOGGING BLOCK REWARDS) OF EPOCH {} = + // {}", epoch, stake + // ); + + // Proposer reward + if address == *proposer_address { + rewards_frac += coeffs.proposer_coeff; + } + // Signer reward + if signer_set.contains(&address) { + let signing_frac = stake_unscaled / signing_stake_unscaled; + rewards_frac += coeffs.signer_coeff * signing_frac; + } + // Consensus validator reward + rewards_frac += coeffs.active_val_coeff + * (stake_unscaled / consensus_stake_unscaled); + + // To be added to the rewards accumulator + values.insert(address, rewards_frac); + } + for (address, value) in values.into_iter() { + // Update the rewards accumulator + rewards_accumulator_handle().update(storage, address, |prev| { + prev.unwrap_or_default() + value + })?; + } + + Ok(()) +} + +/// Apply inflation to the Proof of Stake system. +pub fn apply_inflation( + storage: &mut S, + last_epoch: Epoch, + num_blocks_in_last_epoch: u64, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // Read from Parameters storage + let epochs_per_year: u64 = storage + .read(¶ms_storage::get_epochs_per_year_key())? + .expect("Epochs per year should exist in storage"); + let pos_last_staked_ratio: Dec = storage + .read(¶ms_storage::get_staked_ratio_key())? + .expect("PoS staked ratio should exist in storage"); + let pos_last_inflation_amount: token::Amount = storage + .read(¶ms_storage::get_pos_inflation_amount_key())? + .expect("PoS inflation amount should exist in storage"); + + // Read from PoS storage + let params = read_pos_params(storage)?; + let staking_token = staking_token_address(storage); + let pos_p_gain_nom = params.rewards_gain_p; + let pos_d_gain_nom = params.rewards_gain_d; + + let total_tokens: token::Amount = storage + .read(&token::minted_balance_key(&staking_token))? + .expect("Total NAM balance should exist in storage"); + let pos_locked_supply = read_total_stake(storage, ¶ms, last_epoch)?; + let pos_locked_ratio_target = params.target_staked_ratio; + let pos_max_inflation_rate = params.max_inflation_rate; + + // Run rewards PD controller + let pos_controller = inflation::RewardsController { + locked_tokens: pos_locked_supply.raw_amount(), + total_tokens: total_tokens.raw_amount(), + total_native_tokens: total_tokens.raw_amount(), + locked_ratio_target: pos_locked_ratio_target, + locked_ratio_last: pos_last_staked_ratio, + max_reward_rate: pos_max_inflation_rate, + last_inflation_amount: pos_last_inflation_amount.raw_amount(), + p_gain_nom: pos_p_gain_nom, + d_gain_nom: pos_d_gain_nom, + epochs_per_year, + }; + // Run the rewards controllers + let inflation::ValsToUpdate { + locked_ratio, + inflation, + } = pos_controller.run(); + + let inflation = + token::Amount::from_uint(inflation, 0).into_storage_result()?; + + update_rewards_products_and_mint_inflation( + storage, + ¶ms, + last_epoch, + num_blocks_in_last_epoch, + inflation, + &staking_token, + )?; + + // Write new rewards parameters that will be used for the inflation of + // the current new epoch + storage + .write(¶ms_storage::get_pos_inflation_amount_key(), inflation)?; + storage.write(¶ms_storage::get_staked_ratio_key(), locked_ratio)?; + + Ok(()) +} + +#[derive(Clone, Debug)] +struct Rewards { + product: Dec, + commissions: token::Amount, +} + +/// Update validator and delegators rewards products and mint the inflation +/// tokens into the PoS account. +/// Any left-over inflation tokens from rounding error of the sum of the +/// rewards is given to the governance address. +pub fn update_rewards_products_and_mint_inflation( + storage: &mut S, + params: &PosParams, + last_epoch: Epoch, + num_blocks_in_last_epoch: u64, + inflation: token::Amount, + staking_token: &Address, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // Read the rewards accumulator and calculate the new rewards products + // for the previous epoch + let mut reward_tokens_remaining = inflation; + let mut new_rewards_products: HashMap = HashMap::new(); + let mut accumulators_sum = Dec::zero(); + for acc in rewards_accumulator_handle().iter(storage)? { + let (validator, value) = acc?; + accumulators_sum += value; + + // Get reward token amount for this validator + let fractional_claim = value / num_blocks_in_last_epoch; + let reward_tokens = fractional_claim * inflation; + + // Get validator stake at the last epoch + let stake = Dec::from(read_validator_stake( + storage, params, &validator, last_epoch, + )?); + + let commission_rate = validator_commission_rate_handle(&validator) + .get(storage, last_epoch, params)? + .expect("Should be able to find validator commission rate"); + + // Calculate the reward product from the whole validator stake and take + // out the commissions. Because we're using the whole stake to work with + // a single product, we're also taking out commission on validator's + // self-bonds, but it is then included in the rewards claimable by the + // validator so they get it back. + let product = + (Dec::one() - commission_rate) * Dec::from(reward_tokens) / stake; + + // Tally the commission tokens earned by the validator. + // TODO: think abt Dec rounding and if `new_product` should be used + // instead of `reward_tokens` + let commissions = commission_rate * reward_tokens; + + new_rewards_products.insert( + validator, + Rewards { + product, + commissions, + }, + ); + + reward_tokens_remaining -= reward_tokens; + } + for ( + validator, + Rewards { + product, + commissions, + }, + ) in new_rewards_products + { + validator_rewards_products_handle(&validator) + .insert(storage, last_epoch, product)?; + // The commissions belong to the validator + add_rewards_to_counter(storage, &validator, &validator, commissions)?; + } + + // Mint tokens to the PoS account for the last epoch's inflation + let pos_reward_tokens = inflation - reward_tokens_remaining; + tracing::info!( + "Minting tokens for PoS rewards distribution into the PoS account. \ + Amount: {}. Total inflation: {}, number of blocks in the last epoch: \ + {num_blocks_in_last_epoch}, reward accumulators sum: \ + {accumulators_sum}.", + pos_reward_tokens.to_string_native(), + inflation.to_string_native(), + ); + credit_tokens(storage, staking_token, &address::POS, pos_reward_tokens)?; + + if reward_tokens_remaining > token::Amount::zero() { + tracing::info!( + "Minting tokens remaining from PoS rewards distribution into the \ + Governance account. Amount: {}.", + reward_tokens_remaining.to_string_native() + ); + credit_tokens( + storage, + staking_token, + &address::GOV, + reward_tokens_remaining, + )?; + } + + // Clear validator rewards accumulators + storage.delete_prefix( + // The prefix of `rewards_accumulator_handle` + &storage_key::consensus_validator_rewards_accumulator_key(), + )?; + + Ok(()) +} + +/// Compute the current available rewards amount due only to existing bonds. +/// This does not include pending rewards held in the rewards counter due to +/// unbonds and redelegations. +pub fn compute_current_rewards_from_bonds( + storage: &S, + source: &Address, + validator: &Address, + current_epoch: Epoch, +) -> storage_api::Result +where + S: StorageRead, +{ + if current_epoch == Epoch::default() { + // Nothing to claim in the first epoch + return Ok(token::Amount::zero()); + } + + let last_claim_epoch = + get_last_reward_claim_epoch(storage, source, validator)?; + if let Some(last_epoch) = last_claim_epoch { + if last_epoch == current_epoch { + // Already claimed in this epoch + return Ok(token::Amount::zero()); + } + } + + let mut reward_tokens = token::Amount::zero(); + + // Want to claim from `last_claim_epoch` to `current_epoch.prev()` since + // rewards are computed at the end of an epoch + let (claim_start, claim_end) = ( + last_claim_epoch.unwrap_or_default(), + // Safe because of the check above + current_epoch.prev(), + ); + let bond_amounts = bond_amounts_for_rewards( + storage, + &BondId { + source: source.clone(), + validator: validator.clone(), + }, + claim_start, + claim_end, + )?; + + let rewards_products = validator_rewards_products_handle(validator); + for (ep, bond_amount) in bond_amounts { + debug_assert!(ep >= claim_start); + debug_assert!(ep <= claim_end); + let rp = rewards_products.get(storage, &ep)?.unwrap_or_default(); + let reward = rp * bond_amount; + reward_tokens += reward; + } + + Ok(reward_tokens) +} + +/// Add tokens to a rewards counter. +pub fn add_rewards_to_counter( + storage: &mut S, + source: &Address, + validator: &Address, + new_rewards: token::Amount, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::rewards_counter_key(source, validator); + let current_rewards = + storage.read::(&key)?.unwrap_or_default(); + storage.write(&key, current_rewards + new_rewards) +} + +/// Take tokens from a rewards counter. Deletes the record after reading. +pub fn take_rewards_from_counter( + storage: &mut S, + source: &Address, + validator: &Address, +) -> storage_api::Result +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::rewards_counter_key(source, validator); + let current_rewards = + storage.read::(&key)?.unwrap_or_default(); + storage.delete(&key)?; + Ok(current_rewards) +} + +/// Read the current token value in the rewards counter. +pub fn read_rewards_counter( + storage: &S, + source: &Address, + validator: &Address, +) -> storage_api::Result +where + S: StorageRead, +{ + let key = storage_key::rewards_counter_key(source, validator); + Ok(storage.read::(&key)?.unwrap_or_default()) +} diff --git a/proof_of_stake/src/slashing.rs b/proof_of_stake/src/slashing.rs new file mode 100644 index 0000000000..4b91a84527 --- /dev/null +++ b/proof_of_stake/src/slashing.rs @@ -0,0 +1,1130 @@ +//! Slashing tingzzzz + +use std::cmp::{self, Reverse}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use borsh::BorshDeserialize; +use namada_core::ledger::storage_api::collections::lazy_map::{ + Collectable, NestedMap, NestedSubKey, SubKey, +}; +use namada_core::ledger::storage_api::collections::LazyMap; +use namada_core::ledger::storage_api::{self, StorageRead, StorageWrite}; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::storage::Epoch; +use namada_core::types::token; + +use crate::storage::{ + enqueued_slashes_handle, read_pos_params, read_validator_last_slash_epoch, + read_validator_stake, total_bonded_handle, total_unbonded_handle, + update_total_deltas, update_validator_deltas, + validator_outgoing_redelegations_handle, validator_slashes_handle, + validator_state_handle, validator_total_redelegated_bonded_handle, + validator_total_redelegated_unbonded_handle, + write_validator_last_slash_epoch, +}; +use crate::types::{ + EagerRedelegatedBondsMap, ResultSlashing, Slash, SlashType, SlashedAmount, + Slashes, TotalRedelegatedUnbonded, ValidatorState, +}; +use crate::validator_set_update::update_validator_set; +use crate::{ + fold_and_slash_redelegated_bonds, get_total_consensus_stake, + jail_validator, storage_key, EagerRedelegatedUnbonds, + FoldRedelegatedBondsResult, OwnedPosParams, PosParams, +}; + +/// Record a slash for a misbehavior that has been received from Tendermint and +/// then jail the validator, removing it from the validator set. The slash rate +/// will be computed at a later epoch. +#[allow(clippy::too_many_arguments)] +pub fn slash( + storage: &mut S, + params: &PosParams, + current_epoch: Epoch, + evidence_epoch: Epoch, + evidence_block_height: impl Into, + slash_type: SlashType, + validator: &Address, + validator_set_update_epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let evidence_block_height: u64 = evidence_block_height.into(); + let slash = Slash { + epoch: evidence_epoch, + block_height: evidence_block_height, + r#type: slash_type, + rate: Dec::zero(), // Let the rate be 0 initially before processing + }; + // Need `+1` because we process at the beginning of a new epoch + let processing_epoch = + evidence_epoch + params.slash_processing_epoch_offset(); + + // Add the slash to the list of enqueued slashes to be processed at a later + // epoch + enqueued_slashes_handle() + .get_data_handler() + .at(&processing_epoch) + .at(validator) + .push(storage, slash)?; + + // Update the most recent slash (infraction) epoch for the validator + let last_slash_epoch = read_validator_last_slash_epoch(storage, validator)?; + if last_slash_epoch.is_none() + || evidence_epoch.0 > last_slash_epoch.unwrap_or_default().0 + { + write_validator_last_slash_epoch(storage, validator, evidence_epoch)?; + } + + // Jail the validator and update validator sets + jail_validator( + storage, + params, + validator, + current_epoch, + validator_set_update_epoch, + )?; + + // No other actions are performed here until the epoch in which the slash is + // processed. + + Ok(()) +} + +/// Process enqueued slashes that were discovered earlier. This function is +/// called upon a new epoch. The final slash rate considering according to the +/// cubic slashing rate is computed. Then, each slash is recorded in storage +/// along with its computed rate, and stake is deducted from the affected +/// validators. +pub fn process_slashes( + storage: &mut S, + current_epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let params = read_pos_params(storage)?; + + if current_epoch.0 < params.slash_processing_epoch_offset() { + return Ok(()); + } + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + // Slashes to be processed in the current epoch + let enqueued_slashes = enqueued_slashes_handle().at(¤t_epoch); + if enqueued_slashes.is_empty(storage)? { + return Ok(()); + } + tracing::debug!( + "Processing slashes at the beginning of epoch {} (committed in epoch \ + {})", + current_epoch, + infraction_epoch + ); + + // Compute the cubic slash rate + let cubic_slash_rate = + compute_cubic_slash_rate(storage, ¶ms, infraction_epoch)?; + + // Collect the enqueued slashes and update their rates + let mut eager_validator_slashes: BTreeMap> = + BTreeMap::new(); + let mut eager_validator_slash_rates: HashMap = HashMap::new(); + + // `slashPerValidator` and `slashesMap` while also updating in storage + for enqueued_slash in enqueued_slashes.iter(storage)? { + let ( + NestedSubKey::Data { + key: validator, + nested_sub_key: _, + }, + enqueued_slash, + ) = enqueued_slash?; + debug_assert_eq!(enqueued_slash.epoch, infraction_epoch); + + let slash_rate = cmp::min( + Dec::one(), + cmp::max( + enqueued_slash.r#type.get_slash_rate(¶ms), + cubic_slash_rate, + ), + ); + let updated_slash = Slash { + epoch: enqueued_slash.epoch, + block_height: enqueued_slash.block_height, + r#type: enqueued_slash.r#type, + rate: slash_rate, + }; + + let cur_slashes = eager_validator_slashes + .entry(validator.clone()) + .or_default(); + cur_slashes.push(updated_slash); + let cur_rate = + eager_validator_slash_rates.entry(validator).or_default(); + *cur_rate = cmp::min(Dec::one(), *cur_rate + slash_rate); + } + + // Update the epochs of enqueued slashes in storage + enqueued_slashes_handle().update_data(storage, ¶ms, current_epoch)?; + + // `resultSlashing` + let mut map_validator_slash: EagerRedelegatedBondsMap = BTreeMap::new(); + for (validator, slash_rate) in eager_validator_slash_rates { + process_validator_slash( + storage, + ¶ms, + &validator, + slash_rate, + current_epoch, + &mut map_validator_slash, + )?; + } + tracing::debug!("Slashed amounts for validators: {map_validator_slash:#?}"); + + // Now update the remaining parts of storage + + // Write slashes themselves into storage + for (validator, slashes) in eager_validator_slashes { + let validator_slashes = validator_slashes_handle(&validator); + for slash in slashes { + validator_slashes.push(storage, slash)?; + } + } + + // Update the validator stakes + for (validator, slash_amounts) in map_validator_slash { + let mut slash_acc = token::Amount::zero(); + + // Update validator sets first because it needs to be able to read + // validator stake before we make any changes to it + for (&epoch, &slash_amount) in &slash_amounts { + let state = validator_state_handle(&validator) + .get(storage, epoch, ¶ms)? + .unwrap(); + if state != ValidatorState::Jailed { + update_validator_set( + storage, + ¶ms, + &validator, + -slash_amount.change(), + epoch, + Some(0), + )?; + } + } + // Then update validator and total deltas + for (epoch, slash_amount) in slash_amounts { + let slash_delta = slash_amount - slash_acc; + slash_acc += slash_delta; + + update_validator_deltas( + storage, + ¶ms, + &validator, + -slash_delta.change(), + epoch, + Some(0), + )?; + update_total_deltas( + storage, + ¶ms, + -slash_delta.change(), + epoch, + Some(0), + )?; + } + + // TODO: should we clear some storage here as is done in Quint?? + // Possibly make the `unbonded` LazyMaps epoched so that it is done + // automatically? + } + + Ok(()) +} + +/// In the context of a redelegation, the function computes how much a validator +/// (the destination validator of the redelegation) should be slashed due to the +/// misbehaving of a second validator (the source validator of the +/// redelegation). The function computes how much the validator whould be +/// slashed at all epochs between the current epoch (curEpoch) + 1 and the +/// current epoch + 1 + PIPELINE_OFFSET, accounting for any tokens of the +/// redelegation already unbonded. +/// +/// - `src_validator` - the source validator +/// - `outgoing_redelegations` - a map from pair of epochs to int that includes +/// all the redelegations from the source validator to the destination +/// validator. +/// - The outer key is epoch at which the bond started at the source +/// validator. +/// - The inner key is epoch at which the redelegation started (the epoch at +/// which was issued). +/// - `slashes` a list of slashes of the source validator. +/// - `dest_total_redelegated_unbonded` - a map of unbonded redelegated tokens +/// at the destination validator. +/// - `slash_rate` - the rate of the slash being processed. +/// - `dest_slashed_amounts` - a map from epoch to already processed slash +/// amounts. +/// +/// Adds any newly processed slash amount to `dest_slashed_amounts`. +#[allow(clippy::too_many_arguments)] +pub fn slash_validator_redelegation( + storage: &S, + params: &OwnedPosParams, + src_validator: &Address, + current_epoch: Epoch, + outgoing_redelegations: &NestedMap>, + slashes: &Slashes, + dest_total_redelegated_unbonded: &TotalRedelegatedUnbonded, + slash_rate: Dec, + dest_slashed_amounts: &mut BTreeMap, +) -> storage_api::Result<()> +where + S: StorageRead, +{ + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + for res in outgoing_redelegations.iter(storage)? { + let ( + NestedSubKey::Data { + key: bond_start, + nested_sub_key: SubKey::Data(redel_start), + }, + amount, + ) = res?; + + if params.in_redelegation_slashing_window( + infraction_epoch, + redel_start, + params.redelegation_end_epoch_from_start(redel_start), + ) && bond_start <= infraction_epoch + { + slash_redelegation( + storage, + params, + amount, + bond_start, + params.redelegation_end_epoch_from_start(redel_start), + src_validator, + current_epoch, + slashes, + dest_total_redelegated_unbonded, + slash_rate, + dest_slashed_amounts, + )?; + } + } + + Ok(()) +} + +/// Computes how many tokens will be slashed from a redelegated bond, +/// considering that the bond may have been completely or partially unbonded and +/// that the source validator may have misbehaved within the redelegation +/// slashing window. +#[allow(clippy::too_many_arguments)] +pub fn slash_redelegation( + storage: &S, + params: &OwnedPosParams, + amount: token::Amount, + bond_start: Epoch, + redel_bond_start: Epoch, + src_validator: &Address, + current_epoch: Epoch, + slashes: &Slashes, + total_redelegated_unbonded: &TotalRedelegatedUnbonded, + slash_rate: Dec, + slashed_amounts: &mut BTreeMap, +) -> storage_api::Result<()> +where + S: StorageRead, +{ + tracing::debug!( + "\nSlashing redelegation amount {} - bond start {} and \ + redel_bond_start {} - at rate {}\n", + amount.to_string_native(), + bond_start, + redel_bond_start, + slash_rate + ); + + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + // Slash redelegation destination validator from the next epoch only + // as they won't be jailed + let set_update_epoch = current_epoch.next(); + + let mut init_tot_unbonded = + Epoch::iter_bounds_inclusive(infraction_epoch.next(), set_update_epoch) + .map(|epoch| { + let redelegated_unbonded = total_redelegated_unbonded + .at(&epoch) + .at(&redel_bond_start) + .at(src_validator) + .get(storage, &bond_start)? + .unwrap_or_default(); + Ok(redelegated_unbonded) + }) + .sum::>()?; + + for epoch in Epoch::iter_range(set_update_epoch, params.pipeline_len) { + let updated_total_unbonded = { + let redelegated_unbonded = total_redelegated_unbonded + .at(&epoch) + .at(&redel_bond_start) + .at(src_validator) + .get(storage, &bond_start)? + .unwrap_or_default(); + init_tot_unbonded + redelegated_unbonded + }; + + let list_slashes = slashes + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end(redel_bond_start), + redel_bond_start, + ) && bond_start <= slash.epoch + && slash.epoch + params.slash_processing_epoch_offset() + // We're looking for slashes that were processed before or in the epoch + // in which slashes that are currently being processed + // occurred. Because we're slashing in the beginning of an + // epoch, we're also taking slashes that were processed in + // the infraction epoch as they would still be processed + // before any infraction occurred. + <= infraction_epoch + }) + .collect::>(); + + let slashable_amount = amount + .checked_sub(updated_total_unbonded) + .unwrap_or_default(); + + let slashed = + apply_list_slashes(params, &list_slashes, slashable_amount) + .mul_ceil(slash_rate); + + let list_slashes = slashes + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + params.in_redelegation_slashing_window( + slash.epoch, + params.redelegation_start_epoch_from_end(redel_bond_start), + redel_bond_start, + ) && bond_start <= slash.epoch + }) + .collect::>(); + + let slashable_stake = + apply_list_slashes(params, &list_slashes, slashable_amount) + .mul_ceil(slash_rate); + + init_tot_unbonded = updated_total_unbonded; + let to_slash = cmp::min(slashed, slashable_stake); + if !to_slash.is_zero() { + let map_value = slashed_amounts.entry(epoch).or_default(); + *map_value += to_slash; + } + } + + Ok(()) +} + +/// Computes for a given validator and a slash how much should be slashed at all +/// epochs between the currentÃ¥ epoch (curEpoch) + 1 and the current epoch + 1 + +/// PIPELINE_OFFSET, accounting for any tokens already unbonded. +/// +/// - `validator` - the misbehaving validator. +/// - `slash_rate` - the rate of the slash being processed. +/// - `slashed_amounts_map` - a map from epoch to already processed slash +/// amounts. +/// +/// Returns a map that adds any newly processed slash amount to +/// `slashed_amounts_map`. +// `def slashValidator` +pub fn slash_validator( + storage: &S, + params: &OwnedPosParams, + validator: &Address, + slash_rate: Dec, + current_epoch: Epoch, + slashed_amounts_map: &BTreeMap, +) -> storage_api::Result> +where + S: StorageRead, +{ + tracing::debug!("Slashing validator {} at rate {}", validator, slash_rate); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + let total_unbonded = total_unbonded_handle(validator); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(validator); + let total_bonded = total_bonded_handle(validator); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(validator); + + let mut slashed_amounts = slashed_amounts_map.clone(); + + let mut tot_bonds = total_bonded + .get_data_handler() + .iter(storage)? + .map(Result::unwrap) + .filter(|&(epoch, bonded)| { + epoch <= infraction_epoch && bonded > 0.into() + }) + .collect::>(); + + let mut redelegated_bonds = tot_bonds + .keys() + .filter(|&epoch| { + !total_redelegated_bonded + .at(epoch) + .is_empty(storage) + .unwrap() + }) + .map(|epoch| { + let tot_redel_bonded = total_redelegated_bonded + .at(epoch) + .collect_map(storage) + .unwrap(); + (*epoch, tot_redel_bonded) + }) + .collect::>(); + + let mut sum = token::Amount::zero(); + + let eps = current_epoch + .iter_range(params.pipeline_len) + .collect::>(); + for epoch in eps.into_iter().rev() { + let amount = tot_bonds.iter().fold( + token::Amount::zero(), + |acc, (bond_start, bond_amount)| { + acc + compute_slash_bond_at_epoch( + storage, + params, + validator, + epoch, + infraction_epoch, + *bond_start, + *bond_amount, + redelegated_bonds.get(bond_start), + slash_rate, + ) + .unwrap() + }, + ); + + let new_bonds = total_unbonded.at(&epoch); + tot_bonds = new_bonds + .collect_map(storage) + .unwrap() + .into_iter() + .filter(|(ep, _)| *ep <= infraction_epoch) + .collect::>(); + + let new_redelegated_bonds = tot_bonds + .keys() + .filter(|&ep| { + !total_redelegated_unbonded.at(ep).is_empty(storage).unwrap() + }) + .map(|ep| { + ( + *ep, + total_redelegated_unbonded + .at(&epoch) + .at(ep) + .collect_map(storage) + .unwrap(), + ) + }) + .collect::>(); + + redelegated_bonds = new_redelegated_bonds; + + // `newSum` + sum += amount; + + // `newSlashesMap` + let cur = slashed_amounts.entry(epoch).or_default(); + *cur += sum; + } + // Hack - should this be done differently? (think this is safe) + let pipeline_epoch = current_epoch + params.pipeline_len; + let last_amt = slashed_amounts + .get(&pipeline_epoch.prev()) + .cloned() + .unwrap(); + slashed_amounts.insert(pipeline_epoch, last_amt); + + Ok(slashed_amounts) +} + +/// Get the remaining token amount in a bond after applying a set of slashes. +/// +/// - `validator` - the bond's validator +/// - `epoch` - the latest slash epoch to consider. +/// - `start` - the start epoch of the bond +/// - `redelegated_bonds` +pub fn compute_bond_at_epoch( + storage: &S, + params: &OwnedPosParams, + validator: &Address, + epoch: Epoch, + start: Epoch, + amount: token::Amount, + redelegated_bonds: Option<&EagerRedelegatedBondsMap>, +) -> storage_api::Result +where + S: StorageRead, +{ + let list_slashes = validator_slashes_handle(validator) + .iter(storage)? + .map(Result::unwrap) + .filter(|slash| { + start <= slash.epoch + && slash.epoch + params.slash_processing_epoch_offset() <= epoch + }) + .collect::>(); + + let slash_epoch_filter = + |e: Epoch| e + params.slash_processing_epoch_offset() <= epoch; + + let result_fold = redelegated_bonds + .map(|redelegated_bonds| { + fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_bonds, + start, + &list_slashes, + slash_epoch_filter, + ) + }) + .unwrap_or_default(); + + let total_not_redelegated = amount - result_fold.total_redelegated; + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + + Ok(after_not_redelegated + result_fold.total_after_slashing) +} + +/// Uses `fn compute_bond_at_epoch` to compute the token amount to slash in +/// order to prevent overslashing. +#[allow(clippy::too_many_arguments)] +pub fn compute_slash_bond_at_epoch( + storage: &S, + params: &OwnedPosParams, + validator: &Address, + epoch: Epoch, + infraction_epoch: Epoch, + bond_start: Epoch, + bond_amount: token::Amount, + redelegated_bonds: Option<&EagerRedelegatedBondsMap>, + slash_rate: Dec, +) -> storage_api::Result +where + S: StorageRead, +{ + let amount_due = compute_bond_at_epoch( + storage, + params, + validator, + infraction_epoch, + bond_start, + bond_amount, + redelegated_bonds, + )? + .mul_ceil(slash_rate); + let slashable_amount = compute_bond_at_epoch( + storage, + params, + validator, + epoch, + bond_start, + bond_amount, + redelegated_bonds, + )?; + Ok(cmp::min(amount_due, slashable_amount)) +} + +/// Find slashes applicable to a validator with inclusive `start` and exclusive +/// `end` epoch. +#[allow(dead_code)] +pub fn find_slashes_in_range( + storage: &S, + start: Epoch, + end: Option, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + let mut slashes = BTreeMap::::new(); + for slash in validator_slashes_handle(validator).iter(storage)? { + let slash = slash?; + if start <= slash.epoch + && end.map(|end| slash.epoch < end).unwrap_or(true) + { + let cur_rate = slashes.entry(slash.epoch).or_default(); + *cur_rate = cmp::min(*cur_rate + slash.rate, Dec::one()); + } + } + Ok(slashes) +} + +/// Computes how much remains from an amount of tokens after applying a list of +/// slashes. +/// +/// - `slashes` - a list of slashes ordered by misbehaving epoch. +/// - `amount` - the amount of slashable tokens. +// `def applyListSlashes` +pub fn apply_list_slashes( + params: &OwnedPosParams, + slashes: &[Slash], + amount: token::Amount, +) -> token::Amount { + let mut final_amount = amount; + let mut computed_slashes = BTreeMap::::new(); + for slash in slashes { + let slashed_amount = + compute_slashable_amount(params, slash, amount, &computed_slashes); + final_amount = + final_amount.checked_sub(slashed_amount).unwrap_or_default(); + computed_slashes.insert(slash.epoch, slashed_amount); + } + final_amount +} + +/// Computes how much is left from a bond or unbond after applying a slash given +/// that a set of slashes may have been previously applied. +// `def computeSlashableAmount` +pub fn compute_slashable_amount( + params: &OwnedPosParams, + slash: &Slash, + amount: token::Amount, + computed_slashes: &BTreeMap, +) -> token::Amount { + let updated_amount = computed_slashes + .iter() + .filter(|(&epoch, _)| { + // Keep slashes that have been applied and processed before the + // current slash occurred. We use `<=` because slashes processed at + // `slash.epoch` (at the start of the epoch) are also processed + // before this slash occurred. + epoch + params.slash_processing_epoch_offset() <= slash.epoch + }) + .fold(amount, |acc, (_, &amnt)| { + acc.checked_sub(amnt).unwrap_or_default() + }); + updated_amount.mul_ceil(slash.rate) +} + +/// Find all slashes and the associated validators in the PoS system +pub fn find_all_slashes( + storage: &S, +) -> storage_api::Result>> +where + S: StorageRead, +{ + let mut slashes: HashMap> = HashMap::new(); + let slashes_iter = storage_api::iter_prefix_bytes( + storage, + &storage_key::slashes_prefix(), + )? + .filter_map(|result| { + if let Ok((key, val_bytes)) = result { + if let Some(validator) = storage_key::is_validator_slashes_key(&key) + { + let slash: Slash = + BorshDeserialize::try_from_slice(&val_bytes).ok()?; + return Some((validator, slash)); + } + } + None + }); + + slashes_iter.for_each(|(address, slash)| match slashes.get(&address) { + Some(vec) => { + let mut vec = vec.clone(); + vec.push(slash); + slashes.insert(address, vec); + } + None => { + slashes.insert(address, vec![slash]); + } + }); + Ok(slashes) +} + +/// Collect the details of all of the enqueued slashes to be processed in future +/// epochs into a nested map +pub fn find_all_enqueued_slashes( + storage: &S, + epoch: Epoch, +) -> storage_api::Result>>> +where + S: StorageRead, +{ + let mut enqueued = HashMap::>>::new(); + for res in enqueued_slashes_handle().get_data_handler().iter(storage)? { + let ( + NestedSubKey::Data { + key: processing_epoch, + nested_sub_key: + NestedSubKey::Data { + key: address, + nested_sub_key: _, + }, + }, + slash, + ) = res?; + if processing_epoch <= epoch { + continue; + } + + let slashes = enqueued + .entry(address) + .or_default() + .entry(processing_epoch) + .or_default(); + slashes.push(slash); + } + Ok(enqueued) +} + +/// Find PoS slashes applied to a validator, if any +pub fn find_validator_slashes( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + validator_slashes_handle(validator).iter(storage)?.collect() +} + +/// Compute a token amount after slashing, given the initial amount and a set of +/// slashes. It is assumed that the input `slashes` are those commited while the +/// `amount` was contributing to voting power. +pub fn get_slashed_amount( + params: &PosParams, + amount: token::Amount, + slashes: &BTreeMap, +) -> storage_api::Result { + let mut updated_amount = amount; + let mut computed_amounts = Vec::::new(); + + for (&infraction_epoch, &slash_rate) in slashes { + let mut computed_to_remove = BTreeSet::>::new(); + for (ix, slashed_amount) in computed_amounts.iter().enumerate() { + // Update amount with slashes that happened more than unbonding_len + // epochs before this current slash + if slashed_amount.epoch + params.slash_processing_epoch_offset() + <= infraction_epoch + { + updated_amount = updated_amount + .checked_sub(slashed_amount.amount) + .unwrap_or_default(); + computed_to_remove.insert(Reverse(ix)); + } + } + // Invariant: `computed_to_remove` must be in reverse ord to avoid + // left-shift of the `computed_amounts` after call to `remove` + // invalidating the rest of the indices. + for item in computed_to_remove { + computed_amounts.remove(item.0); + } + computed_amounts.push(SlashedAmount { + amount: updated_amount.mul_ceil(slash_rate), + epoch: infraction_epoch, + }); + } + + let total_computed_amounts = computed_amounts + .into_iter() + .map(|slashed| slashed.amount) + .sum(); + + let final_amount = updated_amount + .checked_sub(total_computed_amounts) + .unwrap_or_default(); + + Ok(final_amount) +} + +/// Compute the total amount of tokens from a set of unbonds, both redelegated +/// and not, after applying slashes. Used in `unbond_tokens`. +// `def computeAmountAfterSlashingUnbond` +pub fn compute_amount_after_slashing_unbond( + storage: &S, + params: &OwnedPosParams, + unbonds: &BTreeMap, + redelegated_unbonds: &EagerRedelegatedUnbonds, + slashes: Vec, +) -> storage_api::Result +where + S: StorageRead, +{ + let mut result_slashing = ResultSlashing::default(); + for (&start_epoch, amount) in unbonds { + // `val listSlashes` + let list_slashes: Vec = slashes + .iter() + .filter(|slash| slash.epoch >= start_epoch) + .cloned() + .collect(); + // `val resultFold` + let result_fold = if let Some(redelegated_unbonds) = + redelegated_unbonds.get(&start_epoch) + { + fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_unbonds, + start_epoch, + &list_slashes, + |_| true, + ) + } else { + FoldRedelegatedBondsResult::default() + }; + // `val totalNoRedelegated` + let total_not_redelegated = amount + .checked_sub(result_fold.total_redelegated) + .unwrap_or_default(); + // `val afterNoRedelegated` + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + // `val amountAfterSlashing` + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + // Accumulation step + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(start_epoch, amount_after_slashing); + } + Ok(result_slashing) +} + +/// Compute the total amount of tokens from a set of unbonds, both redelegated +/// and not, after applying slashes. Used in `withdraw_tokens`. +// `def computeAmountAfterSlashingWithdraw` +pub fn compute_amount_after_slashing_withdraw( + storage: &S, + params: &OwnedPosParams, + unbonds_and_redelegated_unbonds: &BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + >, + slashes: Vec, +) -> storage_api::Result +where + S: StorageRead, +{ + let mut result_slashing = ResultSlashing::default(); + + for ((start_epoch, withdraw_epoch), (amount, redelegated_unbonds)) in + unbonds_and_redelegated_unbonds.iter() + { + // TODO: check if slashes in the same epoch can be + // folded into one effective slash + let end_epoch = *withdraw_epoch + - params.unbonding_len + - params.cubic_slashing_window_length; + // Find slashes that apply to `start_epoch..end_epoch` + let list_slashes = slashes + .iter() + .filter(|slash| { + // Started before the slash occurred + start_epoch <= &slash.epoch + // Ends after the slash + && end_epoch > slash.epoch + }) + .cloned() + .collect::>(); + + // Find the sum and the sum after slashing of the redelegated unbonds + let result_fold = fold_and_slash_redelegated_bonds( + storage, + params, + redelegated_unbonds, + *start_epoch, + &list_slashes, + |_| true, + ); + + // Unbond amount that didn't come from a redelegation + let total_not_redelegated = *amount - result_fold.total_redelegated; + // Find how much remains after slashing non-redelegated amount + let after_not_redelegated = + apply_list_slashes(params, &list_slashes, total_not_redelegated); + + // Add back the unbond and redelegated unbond amount after slashing + let amount_after_slashing = + after_not_redelegated + result_fold.total_after_slashing; + + result_slashing.sum += amount_after_slashing; + result_slashing + .epoch_map + .insert(*start_epoch, amount_after_slashing); + } + + Ok(result_slashing) +} + +/// Process a slash by (i) slashing the misbehaving validator; and (ii) any +/// validator to which it has redelegated some tokens and the slash misbehaving +/// epoch is wihtin the redelegation slashing window. +/// +/// `validator` - the misbehaving validator. +/// `slash_rate` - the slash rate. +/// `slashed_amounts_map` - a map from validator address to a map from epoch to +/// already processed slash amounts. +/// +/// Adds any newly processed slash amount of any involved validator to +/// `slashed_amounts_map`. +// Quint `processSlash` +fn process_validator_slash( + storage: &mut S, + params: &PosParams, + validator: &Address, + slash_rate: Dec, + current_epoch: Epoch, + slashed_amount_map: &mut EagerRedelegatedBondsMap, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // `resultSlashValidator + let result_slash = slash_validator( + storage, + params, + validator, + slash_rate, + current_epoch, + &slashed_amount_map + .get(validator) + .cloned() + .unwrap_or_default(), + )?; + + // `updatedSlashedAmountMap` + let validator_slashes = + slashed_amount_map.entry(validator.clone()).or_default(); + *validator_slashes = result_slash; + + // `outgoingRedelegation` + let outgoing_redelegations = + validator_outgoing_redelegations_handle(validator); + + // Final loop in `processSlash` + let dest_validators = outgoing_redelegations + .iter(storage)? + .map(|res| { + let ( + NestedSubKey::Data { + key: dest_validator, + nested_sub_key: _, + }, + _redelegation, + ) = res?; + Ok(dest_validator) + }) + .collect::>>()?; + + for dest_validator in dest_validators { + let to_modify = slashed_amount_map + .entry(dest_validator.clone()) + .or_default(); + + tracing::debug!( + "Slashing {} redelegation to {}", + validator, + &dest_validator + ); + + // `slashValidatorRedelegation` + slash_validator_redelegation( + storage, + params, + validator, + current_epoch, + &outgoing_redelegations.at(&dest_validator), + &validator_slashes_handle(validator), + &validator_total_redelegated_unbonded_handle(&dest_validator), + slash_rate, + to_modify, + )?; + } + + Ok(()) +} + +/// Calculate the cubic slashing rate using all slashes within a window around +/// the given infraction epoch. There is no cap on the rate applied within this +/// function. +fn compute_cubic_slash_rate( + storage: &S, + params: &PosParams, + infraction_epoch: Epoch, +) -> storage_api::Result +where + S: StorageRead, +{ + tracing::debug!( + "Computing the cubic slash rate for infraction epoch \ + {infraction_epoch}." + ); + let mut sum_vp_fraction = Dec::zero(); + let (start_epoch, end_epoch) = + params.cubic_slash_epoch_window(infraction_epoch); + + for epoch in Epoch::iter_bounds_inclusive(start_epoch, end_epoch) { + let consensus_stake = + Dec::from(get_total_consensus_stake(storage, epoch, params)?); + tracing::debug!( + "Total consensus stake in epoch {}: {}", + epoch, + consensus_stake + ); + let processing_epoch = epoch + params.slash_processing_epoch_offset(); + let slashes = enqueued_slashes_handle().at(&processing_epoch); + let infracting_stake = slashes.iter(storage)?.fold( + Ok(Dec::zero()), + |acc: storage_api::Result, res| { + let acc = acc?; + let ( + NestedSubKey::Data { + key: validator, + nested_sub_key: _, + }, + _slash, + ) = res?; + + let validator_stake = + read_validator_stake(storage, params, &validator, epoch)?; + // tracing::debug!("Val {} stake: {}", &validator, + // validator_stake); + + Ok(acc + Dec::from(validator_stake)) + }, + )?; + sum_vp_fraction += infracting_stake / consensus_stake; + } + let cubic_rate = + Dec::new(9, 0).unwrap() * sum_vp_fraction * sum_vp_fraction; + tracing::debug!("Cubic slash rate: {}", cubic_rate); + Ok(cubic_rate) +} diff --git a/proof_of_stake/src/storage.rs b/proof_of_stake/src/storage.rs index b76760650b..bab677b3a0 100644 --- a/proof_of_stake/src/storage.rs +++ b/proof_of_stake/src/storage.rs @@ -1,852 +1,848 @@ -//! Proof-of-Stake storage keys and storage integration. +//! PoS functions for reading and writing to storage and lazy collection handles +//! associated with given `storage_key`s. -use namada_core::ledger::storage_api::collections::{lazy_map, lazy_vec}; -use namada_core::types::address::Address; -use namada_core::types::storage::{DbKeySeg, Epoch, Key, KeySeg}; - -use super::ADDRESS; -use crate::epoched::LAZY_MAP_SUB_KEY; -use crate::types::BondId; - -const PARAMS_STORAGE_KEY: &str = "params"; -const VALIDATOR_ADDRESSES_KEY: &str = "validator_addresses"; -#[allow(missing_docs)] -pub const VALIDATOR_STORAGE_PREFIX: &str = "validator"; -const VALIDATOR_ADDRESS_RAW_HASH: &str = "address_raw_hash"; -const VALIDATOR_CONSENSUS_KEY_STORAGE_KEY: &str = "consensus_key"; -const VALIDATOR_ETH_COLD_KEY_STORAGE_KEY: &str = "eth_cold_key"; -const VALIDATOR_ETH_HOT_KEY_STORAGE_KEY: &str = "eth_hot_key"; -const VALIDATOR_STATE_STORAGE_KEY: &str = "state"; -const VALIDATOR_DELTAS_STORAGE_KEY: &str = "deltas"; -const VALIDATOR_COMMISSION_RATE_STORAGE_KEY: &str = "commission_rate"; -const VALIDATOR_MAX_COMMISSION_CHANGE_STORAGE_KEY: &str = - "max_commission_rate_change"; -const VALIDATOR_REWARDS_PRODUCT_KEY: &str = "validator_rewards_product"; -const VALIDATOR_LAST_KNOWN_PRODUCT_EPOCH_KEY: &str = - "last_known_rewards_product_epoch"; -const SLASHES_PREFIX: &str = "slash"; -const ENQUEUED_SLASHES_KEY: &str = "enqueued_slashes"; -const VALIDATOR_LAST_SLASH_EPOCH: &str = "last_slash_epoch"; -const BOND_STORAGE_KEY: &str = "bond"; -const UNBOND_STORAGE_KEY: &str = "unbond"; -const VALIDATOR_TOTAL_BONDED_STORAGE_KEY: &str = "total_bonded"; -const VALIDATOR_TOTAL_UNBONDED_STORAGE_KEY: &str = "total_unbonded"; -const VALIDATOR_SETS_STORAGE_PREFIX: &str = "validator_sets"; -const CONSENSUS_VALIDATOR_SET_STORAGE_KEY: &str = "consensus"; -const BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY: &str = "below_capacity"; -const TOTAL_CONSENSUS_STAKE_STORAGE_KEY: &str = "total_consensus_stake"; -const TOTAL_DELTAS_STORAGE_KEY: &str = "total_deltas"; -const VALIDATOR_SET_POSITIONS_KEY: &str = "validator_set_positions"; -const CONSENSUS_KEYS: &str = "consensus_keys"; -const LAST_BLOCK_PROPOSER_STORAGE_KEY: &str = "last_block_proposer"; -const CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY: &str = - "validator_rewards_accumulator"; -const LAST_REWARD_CLAIM_EPOCH: &str = "last_reward_claim_epoch"; -const REWARDS_COUNTER_KEY: &str = "validator_rewards_commissions"; -const VALIDATOR_INCOMING_REDELEGATIONS_KEY: &str = "incoming_redelegations"; -const VALIDATOR_OUTGOING_REDELEGATIONS_KEY: &str = "outgoing_redelegations"; -const VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY: &str = "total_redelegated_bonded"; -const VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY: &str = - "total_redelegated_unbonded"; -const DELEGATOR_REDELEGATED_BONDS_KEY: &str = "delegator_redelegated_bonds"; -const DELEGATOR_REDELEGATED_UNBONDS_KEY: &str = "delegator_redelegated_unbonds"; -const VALIDATOR_EMAIL_KEY: &str = "email"; -const VALIDATOR_DESCRIPTION_KEY: &str = "description"; -const VALIDATOR_WEBSITE_KEY: &str = "website"; -const VALIDATOR_DISCORD_KEY: &str = "discord_handle"; -const LIVENESS_PREFIX: &str = "liveness"; -const LIVENESS_MISSED_VOTES: &str = "missed_votes"; -const LIVENESS_MISSED_VOTES_SUM: &str = "sum_missed_votes"; - -/// Is the given key a PoS storage key? -pub fn is_pos_key(key: &Key) -> bool { - match &key.segments.get(0) { - Some(DbKeySeg::AddressSeg(addr)) => addr == &ADDRESS, - _ => false, - } -} - -/// Storage key for PoS parameters. -pub fn params_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&PARAMS_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for PoS parameters? -pub fn is_params_key(key: &Key) -> bool { - matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == PARAMS_STORAGE_KEY) -} - -/// Storage key prefix for validator data. -fn validator_prefix(validator: &Address) -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&VALIDATOR_STORAGE_PREFIX.to_owned()) - .expect("Cannot obtain a storage key") - .push(&validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for validator's address raw hash for look-up from raw hash of an -/// address to address. -pub fn validator_address_raw_hash_key(raw_hash: impl AsRef) -> Key { - let raw_hash = raw_hash.as_ref().to_owned(); - Key::from(ADDRESS.to_db_key()) - .push(&VALIDATOR_ADDRESS_RAW_HASH.to_owned()) - .expect("Cannot obtain a storage key") - .push(&raw_hash) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's address raw hash? -pub fn is_validator_address_raw_hash_key(key: &Key) -> Option<&str> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(raw_hash), - ] if addr == &ADDRESS && prefix == VALIDATOR_ADDRESS_RAW_HASH => { - Some(raw_hash) - } - _ => None, - } -} +use std::collections::{BTreeSet, HashSet}; -/// Storage key for validator's consensus key. -pub fn validator_consensus_key_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_CONSENSUS_KEY_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's consensus key? -pub fn is_validator_consensus_key_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_CONSENSUS_KEY_STORAGE_KEY => - { - Some(validator) - } - _ => None, - } +use namada_core::ledger::storage_api::collections::lazy_map::NestedSubKey; +use namada_core::ledger::storage_api::collections::{LazyCollection, LazySet}; +use namada_core::ledger::storage_api::governance::get_max_proposal_period; +use namada_core::ledger::storage_api::{ + self, Result, StorageRead, StorageWrite, +}; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::key::{ + common, protocol_pk_key, tm_consensus_key_raw_hash, +}; +use namada_core::types::storage::Epoch; +use namada_core::types::token; + +use crate::storage_key::consensus_keys_key; +use crate::types::{ + BelowCapacityValidatorSets, BondId, Bonds, CommissionRates, + ConsensusValidatorSets, DelegatorRedelegatedBonded, + DelegatorRedelegatedUnbonded, EpochedSlashes, IncomingRedelegations, + LivenessMissedVotes, LivenessSumMissedVotes, OutgoingRedelegations, + ReverseOrdTokenAmount, RewardsAccumulator, RewardsProducts, Slashes, + TotalConsensusStakes, TotalDeltas, TotalRedelegatedBonded, + TotalRedelegatedUnbonded, Unbonds, ValidatorAddresses, + ValidatorConsensusKeys, ValidatorDeltas, ValidatorEthColdKeys, + ValidatorEthHotKeys, ValidatorMetaData, ValidatorProtocolKeys, + ValidatorSetPositions, ValidatorState, ValidatorStates, + ValidatorTotalUnbonded, WeightedValidator, +}; +use crate::{storage_key, MetadataError, OwnedPosParams, PosParams}; + +// ---- Storage handles ---- + +/// Get the storage handle to the epoched consensus validator set +pub fn consensus_validator_set_handle() -> ConsensusValidatorSets { + let key = storage_key::consensus_validator_set_key(); + ConsensusValidatorSets::open(key) +} + +/// Get the storage handle to the epoched below-capacity validator set +pub fn below_capacity_validator_set_handle() -> BelowCapacityValidatorSets { + let key = storage_key::below_capacity_validator_set_key(); + BelowCapacityValidatorSets::open(key) +} + +/// Get the storage handle to a PoS validator's consensus key (used for +/// signing block votes). +pub fn validator_consensus_key_handle( + validator: &Address, +) -> ValidatorConsensusKeys { + let key = storage_key::validator_consensus_key_key(validator); + ValidatorConsensusKeys::open(key) } -/// Storage key for validator's eth cold key. -pub fn validator_eth_cold_key_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_ETH_COLD_KEY_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's eth cold key? -pub fn is_validator_eth_cold_key_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_ETH_COLD_KEY_STORAGE_KEY => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to a PoS validator's protocol key key. +pub fn validator_protocol_key_handle( + validator: &Address, +) -> ValidatorProtocolKeys { + let key = protocol_pk_key(validator); + ValidatorProtocolKeys::open(key) } -/// Storage key for validator's eth hot key. -pub fn validator_eth_hot_key_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_ETH_HOT_KEY_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's eth hot key? -pub fn is_validator_eth_hot_key_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_ETH_HOT_KEY_STORAGE_KEY => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to a PoS validator's eth hot key. +pub fn validator_eth_hot_key_handle( + validator: &Address, +) -> ValidatorEthHotKeys { + let key = storage_key::validator_eth_hot_key_key(validator); + ValidatorEthHotKeys::open(key) } -/// Storage key for validator's commission rate. -pub fn validator_commission_rate_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_COMMISSION_RATE_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's commission rate? -pub fn is_validator_commission_rate_key( - key: &Key, -) -> Option<(&Address, Epoch)> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - DbKeySeg::StringSeg(lazy_map), - DbKeySeg::StringSeg(data), - DbKeySeg::StringSeg(epoch), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_COMMISSION_RATE_STORAGE_KEY - && lazy_map == LAZY_MAP_SUB_KEY - && data == lazy_map::DATA_SUBKEY => - { - let epoch = Epoch::parse(epoch.clone()) - .expect("Should be able to parse the epoch"); - Some((validator, epoch)) - } - _ => None, - } +/// Get the storage handle to a PoS validator's eth cold key. +pub fn validator_eth_cold_key_handle( + validator: &Address, +) -> ValidatorEthColdKeys { + let key = storage_key::validator_eth_cold_key_key(validator); + ValidatorEthColdKeys::open(key) } -/// Storage key for validator's maximum commission rate change per epoch. -pub fn validator_max_commission_rate_change_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_MAX_COMMISSION_CHANGE_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's maximum commission rate change per epoch? -pub fn is_validator_max_commission_rate_change_key( - key: &Key, -) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_MAX_COMMISSION_CHANGE_STORAGE_KEY => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to the total consensus validator stake +pub fn total_consensus_stake_handle() -> TotalConsensusStakes { + let key = storage_key::total_consensus_stake_key(); + TotalConsensusStakes::open(key) } -/// Is storage key for some piece of validator metadata? -pub fn is_validator_metadata_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(metadata), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && matches!( - metadata.as_str(), - VALIDATOR_EMAIL_KEY - | VALIDATOR_DESCRIPTION_KEY - | VALIDATOR_WEBSITE_KEY - | VALIDATOR_DISCORD_KEY - ) => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to a PoS validator's state +pub fn validator_state_handle(validator: &Address) -> ValidatorStates { + let key = storage_key::validator_state_key(validator); + ValidatorStates::open(key) } -/// Storage key for validator's rewards products. -pub fn validator_rewards_product_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_REWARDS_PRODUCT_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's rewards products? -pub fn is_validator_rewards_product_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_REWARDS_PRODUCT_KEY => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to a PoS validator's deltas +pub fn validator_deltas_handle(validator: &Address) -> ValidatorDeltas { + let key = storage_key::validator_deltas_key(validator); + ValidatorDeltas::open(key) } -/// Storage prefix for rewards counter. -pub fn rewards_counter_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&REWARDS_COUNTER_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for rewards counter. -pub fn rewards_counter_key(source: &Address, validator: &Address) -> Key { - rewards_counter_prefix() - .push(&source.to_db_key()) - .expect("Cannot obtain a storage key") - .push(&validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for a validator's incoming redelegations, where the prefixed -/// validator is the destination validator. -pub fn validator_incoming_redelegations_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_INCOMING_REDELEGATIONS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for a validator's outgoing redelegations, where the prefixed -/// validator is the source validator. -pub fn validator_outgoing_redelegations_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_OUTGOING_REDELEGATIONS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for validator's total-redelegated-bonded amount to track for -/// slashing -pub fn validator_total_redelegated_bonded_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for validator's total-redelegated-unbonded amount to track for -/// slashing -pub fn validator_total_redelegated_unbonded_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key prefix for all delegators' redelegated bonds. -pub fn delegator_redelegated_bonds_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&DELEGATOR_REDELEGATED_BONDS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for a particular delegator's redelegated bond information. -pub fn delegator_redelegated_bonds_key(delegator: &Address) -> Key { - delegator_redelegated_bonds_prefix() - .push(&delegator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key prefix for all delegators' redelegated unbonds. -pub fn delegator_redelegated_unbonds_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&DELEGATOR_REDELEGATED_UNBONDS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for a particular delegator's redelegated unbond information. -pub fn delegator_redelegated_unbonds_key(delegator: &Address) -> Key { - delegator_redelegated_unbonds_prefix() - .push(&delegator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for validator's last known rewards product epoch. -pub fn validator_last_known_product_epoch_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_LAST_KNOWN_PRODUCT_EPOCH_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's last known rewards product epoch? -pub fn is_validator_last_known_product_epoch_key( - key: &Key, -) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_LAST_KNOWN_PRODUCT_EPOCH_KEY => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to the total deltas +pub fn total_deltas_handle() -> TotalDeltas { + let key = storage_key::total_deltas_key(); + TotalDeltas::open(key) } -/// Storage key for validator's consensus key. -pub fn validator_state_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_STATE_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's state? -pub fn is_validator_state_key(key: &Key) -> Option<(&Address, Epoch)> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - DbKeySeg::StringSeg(lazy_map), - DbKeySeg::StringSeg(data), - DbKeySeg::StringSeg(epoch), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_STATE_STORAGE_KEY - && lazy_map == LAZY_MAP_SUB_KEY - && data == lazy_map::DATA_SUBKEY => - { - let epoch = Epoch::parse(epoch.clone()) - .expect("Should be able to parse the epoch"); - Some((validator, epoch)) - } - _ => None, - } +/// Get the storage handle to the set of all validators +pub fn validator_addresses_handle() -> ValidatorAddresses { + let key = storage_key::validator_addresses_key(); + ValidatorAddresses::open(key) } -/// Storage key for validator's deltas. -pub fn validator_deltas_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_DELTAS_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for validator's total deltas? -pub fn is_validator_deltas_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(key), - DbKeySeg::StringSeg(lazy_map), - DbKeySeg::StringSeg(data), - DbKeySeg::StringSeg(_epoch), - ] if addr == &ADDRESS - && prefix == VALIDATOR_STORAGE_PREFIX - && key == VALIDATOR_DELTAS_STORAGE_KEY - && lazy_map == LAZY_MAP_SUB_KEY - && data == lazy_map::DATA_SUBKEY => - { - Some(validator) - } - _ => None, - } +/// Get the storage handle to a PoS validator's commission rate +pub fn validator_commission_rate_handle( + validator: &Address, +) -> CommissionRates { + let key = storage_key::validator_commission_rate_key(validator); + CommissionRates::open(key) } -/// Storage prefix for all active validators (consensus, below-capacity, jailed) -pub fn validator_addresses_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&VALIDATOR_ADDRESSES_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage prefix for slashes. -pub fn slashes_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&SLASHES_PREFIX.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for all slashes. -pub fn enqueued_slashes_key() -> Key { - // slashes_prefix() - Key::from(ADDRESS.to_db_key()) - .push(&ENQUEUED_SLASHES_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for validator's slashes. -pub fn validator_slashes_key(validator: &Address) -> Key { - slashes_prefix() - .push(&validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for a validator's slashes -pub fn is_validator_slashes_key(key: &Key) -> Option
{ - if key.segments.len() >= 5 { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(data), - DbKeySeg::StringSeg(_index), - ] if addr == &ADDRESS - && prefix == SLASHES_PREFIX - && data == lazy_vec::DATA_SUBKEY => - { - Some(validator.clone()) - } - _ => None, - } - } else { - None - } +/// Get the storage handle to a bond, which is dynamically updated with when +/// unbonding +pub fn bond_handle(source: &Address, validator: &Address) -> Bonds { + let bond_id = BondId { + source: source.clone(), + validator: validator.clone(), + }; + let key = storage_key::bond_key(&bond_id); + Bonds::open(key) } -/// Storage key for the last (most recent) epoch in which a slashable offense -/// was detected for a given validator -pub fn validator_last_slash_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_LAST_SLASH_EPOCH.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key prefix for all bonds. -pub fn bonds_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&BOND_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key prefix for all bonds of the given source address. -pub fn bonds_for_source_prefix(source: &Address) -> Key { - bonds_prefix() - .push(&source.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for a bond with the given ID (source and validator). -pub fn bond_key(bond_id: &BondId) -> Key { - bonds_for_source_prefix(&bond_id.source) - .push(&bond_id.validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for a bond? Returns the bond ID and bond start epoch if so. -pub fn is_bond_key(key: &Key) -> Option<(BondId, Epoch)> { - if key.segments.len() >= 7 { - match &key.segments[..7] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(source), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(lazy_map), - DbKeySeg::StringSeg(data), - DbKeySeg::StringSeg(epoch_str), - ] if addr == &ADDRESS - && prefix == BOND_STORAGE_KEY - && lazy_map == crate::epoched::LAZY_MAP_SUB_KEY - && data == lazy_map::DATA_SUBKEY => - { - let start = Epoch::parse(epoch_str.clone()).ok()?; - Some(( - BondId { - source: source.clone(), - validator: validator.clone(), - }, - start, - )) - } - _ => None, - } - } else { - None - } +/// Get the storage handle to a validator's total bonds, which are not updated +/// due to unbonding +pub fn total_bonded_handle(validator: &Address) -> Bonds { + let key = storage_key::validator_total_bonded_key(validator); + Bonds::open(key) } -/// Storage key for the total bonds for a given validator. -pub fn validator_total_bonded_key(validator: &Address) -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&VALIDATOR_TOTAL_BONDED_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") - .push(&validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key prefix for all unbonds. -pub fn unbonds_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&UNBOND_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key prefix for all unbonds of the given source address. -pub fn unbonds_for_source_prefix(source: &Address) -> Key { - unbonds_prefix() - .push(&source.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for an unbond with the given ID (source and validator). -pub fn unbond_key(bond_id: &BondId) -> Key { - unbonds_for_source_prefix(&bond_id.source) - .push(&bond_id.validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for an unbond? Returns the bond ID and unbond start and -/// withdraw epoch if it is. -pub fn is_unbond_key(key: &Key) -> Option<(BondId, Epoch, Epoch)> { - if key.segments.len() >= 8 { - match &key.segments[..8] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::AddressSeg(source), - DbKeySeg::AddressSeg(validator), - DbKeySeg::StringSeg(data_1), - DbKeySeg::StringSeg(start_epoch_str), - DbKeySeg::StringSeg(data_2), - DbKeySeg::StringSeg(withdraw_epoch_str), - ] if addr == &ADDRESS - && prefix == UNBOND_STORAGE_KEY - && data_1 == lazy_map::DATA_SUBKEY - && data_2 == lazy_map::DATA_SUBKEY => - { - let withdraw = Epoch::parse(withdraw_epoch_str.clone()).ok()?; - let start = Epoch::parse(start_epoch_str.clone()).ok()?; - Some(( - BondId { - source: source.clone(), - validator: validator.clone(), - }, - start, - withdraw, - )) - } - _ => None, - } - } else { - None - } +/// Get the storage handle to an unbond +pub fn unbond_handle(source: &Address, validator: &Address) -> Unbonds { + let bond_id = BondId { + source: source.clone(), + validator: validator.clone(), + }; + let key = storage_key::unbond_key(&bond_id); + Unbonds::open(key) } -/// Storage key for validator's total-unbonded amount to track for slashing -pub fn validator_total_unbonded_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_TOTAL_UNBONDED_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to a validator's total-unbonded map +pub fn total_unbonded_handle(validator: &Address) -> ValidatorTotalUnbonded { + let key = storage_key::validator_total_unbonded_key(validator); + ValidatorTotalUnbonded::open(key) } -/// Storage prefix for validator sets. -pub fn validator_sets_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&VALIDATOR_SETS_STORAGE_PREFIX.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to a PoS validator's deltas +pub fn validator_set_positions_handle() -> ValidatorSetPositions { + let key = storage_key::validator_set_positions_key(); + ValidatorSetPositions::open(key) } -/// Storage key for consensus validator set -pub fn consensus_validator_set_key() -> Key { - validator_sets_prefix() - .push(&CONSENSUS_VALIDATOR_SET_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to a PoS validator's slashes +pub fn validator_slashes_handle(validator: &Address) -> Slashes { + let key = storage_key::validator_slashes_key(validator); + Slashes::open(key) } -/// Storage key for below-capacity validator set -pub fn below_capacity_validator_set_key() -> Key { - validator_sets_prefix() - .push(&BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to list of all slashes to be processed and ultimately +/// placed in the `validator_slashes_handle` +pub fn enqueued_slashes_handle() -> EpochedSlashes { + let key = storage_key::enqueued_slashes_key(); + EpochedSlashes::open(key) } -/// Is storage key for the consensus validator set? -pub fn is_consensus_validator_set_key(key: &Key) -> bool { - matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key), DbKeySeg::StringSeg(set_type), DbKeySeg::StringSeg(lazy_map), DbKeySeg::StringSeg(data), DbKeySeg::StringSeg(_epoch), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_amount), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_position)] if addr == &ADDRESS && key == VALIDATOR_SETS_STORAGE_PREFIX && set_type == CONSENSUS_VALIDATOR_SET_STORAGE_KEY && lazy_map == LAZY_MAP_SUB_KEY && data == lazy_map::DATA_SUBKEY) +/// Get the storage handle to the rewards accumulator for the consensus +/// validators in a given epoch +pub fn rewards_accumulator_handle() -> RewardsAccumulator { + let key = storage_key::consensus_validator_rewards_accumulator_key(); + RewardsAccumulator::open(key) } -/// Is storage key for the below-capacity validator set? -pub fn is_below_capacity_validator_set_key(key: &Key) -> bool { - matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key), DbKeySeg::StringSeg(set_type), DbKeySeg::StringSeg(lazy_map), DbKeySeg::StringSeg(data), DbKeySeg::StringSeg(_epoch), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_amount), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_position)] if addr == &ADDRESS && key == VALIDATOR_SETS_STORAGE_PREFIX && set_type == BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY && lazy_map == LAZY_MAP_SUB_KEY && data == lazy_map::DATA_SUBKEY) -} - -/// Storage key for total consensus stake -pub fn total_consensus_stake_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&TOTAL_CONSENSUS_STAKE_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a total consensus stake key") +/// Get the storage handle to a validator's rewards products +pub fn validator_rewards_products_handle( + validator: &Address, +) -> RewardsProducts { + let key = storage_key::validator_rewards_product_key(validator); + RewardsProducts::open(key) } -/// Is storage key for the total consensus stake? -pub fn is_total_consensus_stake_key(key: &Key) -> bool { - matches!(&key.segments[..], [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(key) - ] if addr == &ADDRESS && key == TOTAL_CONSENSUS_STAKE_STORAGE_KEY) +/// Get the storage handle to a validator's incoming redelegations +pub fn validator_incoming_redelegations_handle( + validator: &Address, +) -> IncomingRedelegations { + let key = storage_key::validator_incoming_redelegations_key(validator); + IncomingRedelegations::open(key) } -/// Storage key for total deltas of all validators. -pub fn total_deltas_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&TOTAL_DELTAS_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to a validator's outgoing redelegations +pub fn validator_outgoing_redelegations_handle( + validator: &Address, +) -> OutgoingRedelegations { + let key = storage_key::validator_outgoing_redelegations_key(validator); + OutgoingRedelegations::open(key) } -/// Is storage key for total deltas of all validators? -pub fn is_total_deltas_key(key: &Key) -> Option<&String> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(key), - DbKeySeg::StringSeg(lazy_map), - DbKeySeg::StringSeg(data), - DbKeySeg::StringSeg(epoch), - ] if addr == &ADDRESS - && key == TOTAL_DELTAS_STORAGE_KEY - && lazy_map == LAZY_MAP_SUB_KEY - && data == lazy_map::DATA_SUBKEY => - { - Some(epoch) - } - _ => None, - } +/// Get the storage handle to a validator's total redelegated bonds +pub fn validator_total_redelegated_bonded_handle( + validator: &Address, +) -> TotalRedelegatedBonded { + let key = storage_key::validator_total_redelegated_bonded_key(validator); + TotalRedelegatedBonded::open(key) } -/// Storage key for block proposer address of the previous block. -pub fn last_block_proposer_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&LAST_BLOCK_PROPOSER_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to a validator's outgoing redelegations +pub fn validator_total_redelegated_unbonded_handle( + validator: &Address, +) -> TotalRedelegatedUnbonded { + let key = storage_key::validator_total_redelegated_unbonded_key(validator); + TotalRedelegatedUnbonded::open(key) } -/// Is storage key for block proposer address of the previous block? -pub fn is_last_block_proposer_key(key: &Key) -> bool { - matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == LAST_BLOCK_PROPOSER_STORAGE_KEY) +/// Get the storage handle to a delegator's redelegated bonds information +pub fn delegator_redelegated_bonds_handle( + delegator: &Address, +) -> DelegatorRedelegatedBonded { + let key = storage_key::delegator_redelegated_bonds_key(delegator); + DelegatorRedelegatedBonded::open(key) } -/// Storage key for the consensus validator set rewards accumulator. -pub fn consensus_validator_rewards_accumulator_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the storage handle to a delegator's redelegated unbonds information +pub fn delegator_redelegated_unbonds_handle( + delegator: &Address, +) -> DelegatorRedelegatedUnbonded { + let key = storage_key::delegator_redelegated_unbonds_key(delegator); + DelegatorRedelegatedUnbonded::open(key) +} + +/// Get the storage handle to the missed votes for liveness tracking +pub fn liveness_missed_votes_handle() -> LivenessMissedVotes { + let key = storage_key::liveness_missed_votes_key(); + LivenessMissedVotes::open(key) +} + +/// Get the storage handle to the sum of missed votes for liveness tracking +pub fn liveness_sum_missed_votes_handle() -> LivenessSumMissedVotes { + let key = storage_key::liveness_sum_missed_votes_key(); + LivenessSumMissedVotes::open(key) +} + +// ---- Storage read + write ---- + +/// Read PoS parameters +pub fn read_pos_params(storage: &S) -> storage_api::Result +where + S: StorageRead, +{ + let params = storage + .read(&storage_key::params_key()) + .transpose() + .expect("PosParams should always exist in storage after genesis")?; + read_non_pos_owned_params(storage, params) +} + +/// Read non-PoS-owned parameters to add them to `OwnedPosParams` to construct +/// `PosParams`. +pub fn read_non_pos_owned_params( + storage: &S, + owned: OwnedPosParams, +) -> storage_api::Result +where + S: StorageRead, +{ + let max_proposal_period = get_max_proposal_period(storage)?; + Ok(PosParams { + owned, + max_proposal_period, + }) +} + +/// Write PoS parameters +pub fn write_pos_params( + storage: &mut S, + params: &OwnedPosParams, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::params_key(); + storage.write(&key, params) +} + +/// Get the validator address given the raw hash of the Tendermint consensus key +pub fn find_validator_by_raw_hash( + storage: &S, + raw_hash: impl AsRef, +) -> storage_api::Result> +where + S: StorageRead, +{ + let key = storage_key::validator_address_raw_hash_key(raw_hash); + storage.read(&key) +} + +/// Write PoS validator's address raw hash. +pub fn write_validator_address_raw_hash( + storage: &mut S, + validator: &Address, + consensus_key: &common::PublicKey, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let raw_hash = tm_consensus_key_raw_hash(consensus_key); + storage.write( + &storage_key::validator_address_raw_hash_key(raw_hash), + validator, + ) +} + +/// Read PoS validator's max commission rate change. +pub fn read_validator_max_commission_rate_change( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + let key = storage_key::validator_max_commission_rate_change_key(validator); + storage.read(&key) } -/// Is storage key for the consensus validator set? -pub fn is_consensus_validator_set_accumulator_key(key: &Key) -> bool { - matches!(&key.segments[..], [ - DbKeySeg::AddressSeg(addr), - DbKeySeg::StringSeg(key), - ] if addr == &ADDRESS - && key == CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY) +/// Write PoS validator's max commission rate change. +pub fn write_validator_max_commission_rate_change( + storage: &mut S, + validator: &Address, + change: Dec, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::validator_max_commission_rate_change_key(validator); + storage.write(&key, change) +} + +/// Read the most recent slash epoch for the given epoch +pub fn read_validator_last_slash_epoch( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + let key = storage_key::validator_last_slash_key(validator); + storage.read(&key) } -/// Storage prefix for epoch at which an account last claimed PoS inflationary -/// rewards. -pub fn last_pos_reward_claim_epoch_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&LAST_REWARD_CLAIM_EPOCH.to_owned()) - .expect("Cannot obtain a storage key") +/// Write the most recent slash epoch for the given epoch +pub fn write_validator_last_slash_epoch( + storage: &mut S, + validator: &Address, + epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::validator_last_slash_key(validator); + storage.write(&key, epoch) +} + +/// Read last block proposer address. +pub fn read_last_block_proposer_address( + storage: &S, +) -> storage_api::Result> +where + S: StorageRead, +{ + let key = storage_key::last_block_proposer_key(); + storage.read(&key) +} + +/// Write last block proposer address. +pub fn write_last_block_proposer_address( + storage: &mut S, + address: Address, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::last_block_proposer_key(); + storage.write(&key, address) +} + +/// Read PoS validator's delta value. +pub fn read_validator_deltas_value( + storage: &S, + validator: &Address, + epoch: &namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + let handle = validator_deltas_handle(validator); + handle.get_delta_val(storage, *epoch) +} + +/// Read PoS validator's stake (sum of deltas). +/// For non-validators and validators with `0` stake, this returns the default - +/// `token::Amount::zero()`. +pub fn read_validator_stake( + storage: &S, + params: &PosParams, + validator: &Address, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result +where + S: StorageRead, +{ + let handle = validator_deltas_handle(validator); + let amount = handle + .get_sum(storage, epoch, params)? + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) + .unwrap_or_default(); + Ok(amount) +} + +/// Add or remove PoS validator's stake delta value +pub fn update_validator_deltas( + storage: &mut S, + params: &OwnedPosParams, + validator: &Address, + delta: token::Change, + current_epoch: namada_core::types::storage::Epoch, + offset_opt: Option, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let handle = validator_deltas_handle(validator); + let offset = offset_opt.unwrap_or(params.pipeline_len); + let val = handle + .get_delta_val(storage, current_epoch + offset)? + .unwrap_or_default(); + handle.set( + storage, + val.checked_add(&delta) + .expect("Validator deltas updated amount should not overflow"), + current_epoch, + offset, + ) +} + +/// Read PoS total stake (sum of deltas). +pub fn read_total_stake( + storage: &S, + params: &PosParams, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result +where + S: StorageRead, +{ + let handle = total_deltas_handle(); + let amnt = handle + .get_sum(storage, epoch, params)? + .map(|change| { + debug_assert!(change.non_negative()); + token::Amount::from_change(change) + }) + .unwrap_or_default(); + Ok(amnt) +} + +/// Read all addresses from consensus validator set. +pub fn read_consensus_validator_set_addresses( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + consensus_validator_set_handle() + .at(&epoch) + .iter(storage)? + .map(|res| res.map(|(_sub_key, address)| address)) + .collect() +} + +/// Read all addresses from below-capacity validator set. +pub fn read_below_capacity_validator_set_addresses( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + below_capacity_validator_set_handle() + .at(&epoch) + .iter(storage)? + .map(|res| res.map(|(_sub_key, address)| address)) + .collect() +} + +/// Read all addresses from the below-threshold set +pub fn read_below_threshold_validator_set_addresses( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + let params = read_pos_params(storage)?; + Ok(validator_addresses_handle() + .at(&epoch) + .iter(storage)? + .map(Result::unwrap) + .filter(|address| { + matches!( + validator_state_handle(address).get(storage, epoch, ¶ms), + Ok(Some(ValidatorState::BelowThreshold)) + ) + }) + .collect()) +} + +/// Read all addresses from consensus validator set with their stake. +pub fn read_consensus_validator_set_addresses_with_stake( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + consensus_validator_set_handle() + .at(&epoch) + .iter(storage)? + .map(|res| { + res.map( + |( + NestedSubKey::Data { + key: bonded_stake, + nested_sub_key: _, + }, + address, + )| { + WeightedValidator { + address, + bonded_stake, + } + }, + ) + }) + .collect() +} + +/// Count the number of consensus validators +pub fn get_num_consensus_validators( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result +where + S: StorageRead, +{ + Ok(consensus_validator_set_handle() + .at(&epoch) + .iter(storage)? + .count() as u64) +} + +/// Read all addresses from below-capacity validator set with their stake. +pub fn read_below_capacity_validator_set_addresses_with_stake( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + below_capacity_validator_set_handle() + .at(&epoch) + .iter(storage)? + .map(|res| { + res.map( + |( + NestedSubKey::Data { + key: ReverseOrdTokenAmount(bonded_stake), + nested_sub_key: _, + }, + address, + )| { + WeightedValidator { + address, + bonded_stake, + } + }, + ) + }) + .collect() +} + +/// Read all validator addresses. +pub fn read_all_validator_addresses( + storage: &S, + epoch: namada_core::types::storage::Epoch, +) -> storage_api::Result> +where + S: StorageRead, +{ + validator_addresses_handle() + .at(&epoch) + .iter(storage)? + .collect() +} + +/// Update PoS total deltas. +/// Note: for EpochedDelta, write the value to change storage by +pub fn update_total_deltas( + storage: &mut S, + params: &OwnedPosParams, + delta: token::Change, + current_epoch: namada_core::types::storage::Epoch, + offset_opt: Option, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let handle = total_deltas_handle(); + let offset = offset_opt.unwrap_or(params.pipeline_len); + let val = handle + .get_delta_val(storage, current_epoch + offset)? + .unwrap_or_default(); + handle.set( + storage, + val.checked_add(&delta) + .expect("Total deltas updated amount should not overflow"), + current_epoch, + offset, + ) +} + +/// Read PoS validator's email. +pub fn read_validator_email( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + storage.read(&storage_key::validator_email_key(validator)) } -/// Storage key for epoch at which an account last claimed PoS inflationary -/// rewards. -pub fn last_pos_reward_claim_epoch_key( - delegator: &Address, +/// Write PoS validator's email. The email cannot be removed, so an empty string +/// will result in an error. +pub fn write_validator_email( + storage: &mut S, validator: &Address, -) -> Key { - last_pos_reward_claim_epoch_prefix() - .push(&delegator.to_db_key()) - .expect("Cannot obtain a storage key") - .push(&validator.to_db_key()) - .expect("Cannot obtain a storage key") -} - -/// Get validator address from bond key -pub fn get_validator_address_from_bond(key: &Key) -> Option
{ - match key.get_at(3) { - Some(segment) => match segment { - DbKeySeg::AddressSeg(addr) => Some(addr.clone()), - DbKeySeg::StringSeg(_) => None, - }, - None => None, + email: &String, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::validator_email_key(validator); + if email.is_empty() { + Err(MetadataError::CannotRemoveEmail.into()) + } else { + storage.write(&key, email) } } -/// Storage key for validator set positions -pub fn validator_set_positions_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&VALIDATOR_SET_POSITIONS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Storage key for consensus keys set. -pub fn consensus_keys_key() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&CONSENSUS_KEYS.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Is storage key for consensus keys set? -pub fn is_consensus_keys_key(key: &Key) -> bool { - matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == CONSENSUS_KEYS) +/// Read PoS validator's description. +pub fn read_validator_description( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + storage.read(&storage_key::validator_description_key(validator)) } -/// Storage key for a validator's email -pub fn validator_email_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_EMAIL_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Write PoS validator's description. If the provided arg is an empty string, +/// remove the data. +pub fn write_validator_description( + storage: &mut S, + validator: &Address, + description: &String, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::validator_description_key(validator); + if description.is_empty() { + storage.delete(&key) + } else { + storage.write(&key, description) + } } -/// Storage key for a validator's description -pub fn validator_description_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_DESCRIPTION_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Read PoS validator's website. +pub fn read_validator_website( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + storage.read(&storage_key::validator_website_key(validator)) } -/// Storage key for a validator's website -pub fn validator_website_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_WEBSITE_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Write PoS validator's website. If the provided arg is an empty string, +/// remove the data. +pub fn write_validator_website( + storage: &mut S, + validator: &Address, + website: &String, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::validator_website_key(validator); + if website.is_empty() { + storage.delete(&key) + } else { + storage.write(&key, website) + } } -/// Storage key for a validator's discord handle -pub fn validator_discord_key(validator: &Address) -> Key { - validator_prefix(validator) - .push(&VALIDATOR_DISCORD_KEY.to_owned()) - .expect("Cannot obtain a storage key") +/// Read PoS validator's discord handle. +pub fn read_validator_discord_handle( + storage: &S, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + storage.read(&storage_key::validator_discord_key(validator)) } -/// Storage prefix for the liveness data of the cosnensus validator set. -pub fn liveness_data_prefix() -> Key { - Key::from(ADDRESS.to_db_key()) - .push(&LIVENESS_PREFIX.to_owned()) - .expect("Cannot obtain a storage key") +/// Write PoS validator's discord handle. If the provided arg is an empty +/// string, remove the data. +pub fn write_validator_discord_handle( + storage: &mut S, + validator: &Address, + discord_handle: &String, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = storage_key::validator_discord_key(validator); + if discord_handle.is_empty() { + storage.delete(&key) + } else { + storage.write(&key, discord_handle) + } } -/// Storage key for the liveness records. -pub fn liveness_missed_votes_key() -> Key { - liveness_data_prefix() - .push(&LIVENESS_MISSED_VOTES.to_owned()) - .expect("Cannot obtain a storage key") +/// Write validator's metadata. +pub fn write_validator_metadata( + storage: &mut S, + validator: &Address, + metadata: &ValidatorMetaData, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // Email is the only required field in the metadata + write_validator_email(storage, validator, &metadata.email)?; + + if let Some(description) = metadata.description.as_ref() { + write_validator_description(storage, validator, description)?; + } + if let Some(website) = metadata.website.as_ref() { + write_validator_website(storage, validator, website)?; + } + if let Some(discord) = metadata.discord_handle.as_ref() { + write_validator_discord_handle(storage, validator, discord)?; + } + Ok(()) } -/// Storage key for the liveness data. -pub fn liveness_sum_missed_votes_key() -> Key { - liveness_data_prefix() - .push(&LIVENESS_MISSED_VOTES_SUM.to_owned()) - .expect("Cannot obtain a storage key") +/// Get the last epoch in which rewards were claimed from storage, if any +pub fn get_last_reward_claim_epoch( + storage: &S, + delegator: &Address, + validator: &Address, +) -> storage_api::Result> +where + S: StorageRead, +{ + let key = + storage_key::last_pos_reward_claim_epoch_key(delegator, validator); + storage.read(&key) +} + +/// Write the last epoch in which rewards were claimed for the +/// delegator-validator pair +pub fn write_last_reward_claim_epoch( + storage: &mut S, + delegator: &Address, + validator: &Address, + epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = + storage_key::last_pos_reward_claim_epoch_key(delegator, validator); + storage.write(&key, epoch) +} + +/// Check if the given consensus key is already being used to ensure uniqueness. +/// +/// If it's not being used, it will be inserted into the set that's being used +/// for this. If it's already used, this will return an Error. +pub fn try_insert_consensus_key( + storage: &mut S, + consensus_key: &common::PublicKey, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let key = consensus_keys_key(); + LazySet::open(key).try_insert(storage, consensus_key.clone()) +} + +/// Get the unique set of consensus keys in storage +pub fn get_consensus_key_set( + storage: &S, +) -> storage_api::Result> +where + S: StorageRead, +{ + let key = consensus_keys_key(); + let lazy_set = LazySet::::open(key); + Ok(lazy_set.iter(storage)?.map(Result::unwrap).collect()) +} + +/// Check if the given consensus key is already being used to ensure uniqueness. +pub fn is_consensus_key_used( + storage: &S, + consensus_key: &common::PublicKey, +) -> storage_api::Result +where + S: StorageRead, +{ + let key = consensus_keys_key(); + let handle = LazySet::open(key); + handle.contains(storage, consensus_key) } diff --git a/proof_of_stake/src/storage_key.rs b/proof_of_stake/src/storage_key.rs new file mode 100644 index 0000000000..2991526760 --- /dev/null +++ b/proof_of_stake/src/storage_key.rs @@ -0,0 +1,1037 @@ +//! Proof-of-Stake storage keys and storage integration. + +use namada_core::ledger::storage_api::collections::{lazy_map, lazy_vec}; +use namada_core::types::address::Address; +use namada_core::types::storage::{DbKeySeg, Epoch, Key, KeySeg}; + +use super::ADDRESS; +use crate::epoched; +use crate::types::BondId; + +const PARAMS_STORAGE_KEY: &str = "params"; +const VALIDATOR_ADDRESSES_KEY: &str = "validator_addresses"; +#[allow(missing_docs)] +pub const VALIDATOR_STORAGE_PREFIX: &str = "validator"; +const VALIDATOR_ADDRESS_RAW_HASH: &str = "address_raw_hash"; +const VALIDATOR_CONSENSUS_KEY_STORAGE_KEY: &str = "consensus_key"; +const VALIDATOR_ETH_COLD_KEY_STORAGE_KEY: &str = "eth_cold_key"; +const VALIDATOR_ETH_HOT_KEY_STORAGE_KEY: &str = "eth_hot_key"; +const VALIDATOR_STATE_STORAGE_KEY: &str = "state"; +const VALIDATOR_DELTAS_STORAGE_KEY: &str = "deltas"; +const VALIDATOR_COMMISSION_RATE_STORAGE_KEY: &str = "commission_rate"; +const VALIDATOR_MAX_COMMISSION_CHANGE_STORAGE_KEY: &str = + "max_commission_rate_change"; +const VALIDATOR_REWARDS_PRODUCT_KEY: &str = "validator_rewards_product"; +const VALIDATOR_LAST_KNOWN_PRODUCT_EPOCH_KEY: &str = + "last_known_rewards_product_epoch"; +const SLASHES_PREFIX: &str = "slash"; +const ENQUEUED_SLASHES_KEY: &str = "enqueued_slashes"; +const VALIDATOR_LAST_SLASH_EPOCH: &str = "last_slash_epoch"; +const BOND_STORAGE_KEY: &str = "bond"; +const UNBOND_STORAGE_KEY: &str = "unbond"; +const VALIDATOR_TOTAL_BONDED_STORAGE_KEY: &str = "total_bonded"; +const VALIDATOR_TOTAL_UNBONDED_STORAGE_KEY: &str = "total_unbonded"; +const VALIDATOR_SETS_STORAGE_PREFIX: &str = "validator_sets"; +const CONSENSUS_VALIDATOR_SET_STORAGE_KEY: &str = "consensus"; +const BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY: &str = "below_capacity"; +const TOTAL_CONSENSUS_STAKE_STORAGE_KEY: &str = "total_consensus_stake"; +const TOTAL_DELTAS_STORAGE_KEY: &str = "total_deltas"; +const VALIDATOR_SET_POSITIONS_KEY: &str = "validator_set_positions"; +const CONSENSUS_KEYS: &str = "consensus_keys"; +const LAST_BLOCK_PROPOSER_STORAGE_KEY: &str = "last_block_proposer"; +const CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY: &str = + "validator_rewards_accumulator"; +const LAST_REWARD_CLAIM_EPOCH: &str = "last_reward_claim_epoch"; +const REWARDS_COUNTER_KEY: &str = "validator_rewards_commissions"; +const VALIDATOR_INCOMING_REDELEGATIONS_KEY: &str = "incoming_redelegations"; +const VALIDATOR_OUTGOING_REDELEGATIONS_KEY: &str = "outgoing_redelegations"; +const VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY: &str = "total_redelegated_bonded"; +const VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY: &str = + "total_redelegated_unbonded"; +const DELEGATOR_REDELEGATED_BONDS_KEY: &str = "delegator_redelegated_bonds"; +const DELEGATOR_REDELEGATED_UNBONDS_KEY: &str = "delegator_redelegated_unbonds"; +const VALIDATOR_EMAIL_KEY: &str = "email"; +const VALIDATOR_DESCRIPTION_KEY: &str = "description"; +const VALIDATOR_WEBSITE_KEY: &str = "website"; +const VALIDATOR_DISCORD_KEY: &str = "discord_handle"; +const LIVENESS_PREFIX: &str = "liveness"; +const LIVENESS_MISSED_VOTES: &str = "missed_votes"; +const LIVENESS_MISSED_VOTES_SUM: &str = "sum_missed_votes"; + +/// Is the given key a PoS storage key? +pub fn is_pos_key(key: &Key) -> bool { + match &key.segments.get(0) { + Some(DbKeySeg::AddressSeg(addr)) => addr == &ADDRESS, + _ => false, + } +} + +/// Storage key for PoS parameters. +pub fn params_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&PARAMS_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for PoS parameters? +pub fn is_params_key(key: &Key) -> bool { + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == PARAMS_STORAGE_KEY) +} + +/// Storage key prefix for validator data. +fn validator_prefix(validator: &Address) -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&VALIDATOR_STORAGE_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") + .push(&validator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's address raw hash for look-up from raw hash of an +/// address to address. +pub fn validator_address_raw_hash_key(raw_hash: impl AsRef) -> Key { + let raw_hash = raw_hash.as_ref().to_owned(); + Key::from(ADDRESS.to_db_key()) + .push(&VALIDATOR_ADDRESS_RAW_HASH.to_owned()) + .expect("Cannot obtain a storage key") + .push(&raw_hash) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's address raw hash? +pub fn is_validator_address_raw_hash_key(key: &Key) -> Option<&str> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::StringSeg(raw_hash), + ] if addr == &ADDRESS && prefix == VALIDATOR_ADDRESS_RAW_HASH => { + Some(raw_hash) + } + _ => None, + } +} + +/// Storage key for validator's consensus key. +pub fn validator_consensus_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_CONSENSUS_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's consensus key? +pub fn is_validator_consensus_key_key(key: &Key) -> Option<&Address> { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_CONSENSUS_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for validator's eth cold key. +pub fn validator_eth_cold_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_ETH_COLD_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's eth cold key? +pub fn is_validator_eth_cold_key_key(key: &Key) -> Option<&Address> { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_ETH_COLD_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for validator's eth hot key. +pub fn validator_eth_hot_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_ETH_HOT_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's eth hot key? +pub fn is_validator_eth_hot_key_key(key: &Key) -> Option<&Address> { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_ETH_HOT_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for validator's commission rate. +pub fn validator_commission_rate_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_COMMISSION_RATE_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's commission rate? +pub fn is_validator_commission_rate_key(key: &Key) -> Option<&Address> { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_COMMISSION_RATE_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for validator's maximum commission rate change per epoch. +pub fn validator_max_commission_rate_change_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_MAX_COMMISSION_CHANGE_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's maximum commission rate change per epoch? +pub fn is_validator_max_commission_rate_change_key( + key: &Key, +) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_MAX_COMMISSION_CHANGE_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } +} + +/// Is storage key for some piece of validator metadata? +pub fn is_validator_metadata_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(metadata), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && matches!( + metadata.as_str(), + VALIDATOR_EMAIL_KEY + | VALIDATOR_DESCRIPTION_KEY + | VALIDATOR_WEBSITE_KEY + | VALIDATOR_DISCORD_KEY + ) => + { + Some(validator) + } + _ => None, + } +} + +/// Storage key for validator's rewards products. +pub fn validator_rewards_product_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_REWARDS_PRODUCT_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's rewards products? +pub fn is_validator_rewards_product_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_REWARDS_PRODUCT_KEY => + { + Some(validator) + } + _ => None, + } +} + +/// Storage prefix for rewards counter. +pub fn rewards_counter_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&REWARDS_COUNTER_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for rewards counter. +pub fn rewards_counter_key(source: &Address, validator: &Address) -> Key { + rewards_counter_prefix() + .push(&source.to_db_key()) + .expect("Cannot obtain a storage key") + .push(&validator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key for rewards counter? +pub fn is_rewards_counter_key(key: &Key) -> Option { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(key), + DbKeySeg::AddressSeg(source), + DbKeySeg::AddressSeg(validator), + ] if addr == &ADDRESS && key == REWARDS_COUNTER_KEY => Some(BondId { + source: source.clone(), + validator: validator.clone(), + }), + _ => None, + } +} + +/// Storage key for a validator's incoming redelegations, where the prefixed +/// validator is the destination validator. +pub fn validator_incoming_redelegations_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_INCOMING_REDELEGATIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a validator's outgoing redelegations, where the prefixed +/// validator is the source validator. +pub fn validator_outgoing_redelegations_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_OUTGOING_REDELEGATIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's total-redelegated-bonded amount to track for +/// slashing +pub fn validator_total_redelegated_bonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's total-redelegated-unbonded amount to track for +/// slashing +pub fn validator_total_redelegated_unbonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key's prefix matching one of validator's: +/// +/// - incoming or outgoing redelegations +/// - total redelegated bonded or unbond amounts +pub fn is_validator_redelegations_key(key: &Key) -> bool { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(val_prefix), + DbKeySeg::AddressSeg(_validator), + DbKeySeg::StringSeg(prefix), + ] => { + addr == &ADDRESS + && val_prefix == VALIDATOR_STORAGE_PREFIX + && (prefix == VALIDATOR_INCOMING_REDELEGATIONS_KEY + || prefix == VALIDATOR_OUTGOING_REDELEGATIONS_KEY + || prefix == VALIDATOR_TOTAL_REDELEGATED_BONDED_KEY + || prefix == VALIDATOR_TOTAL_REDELEGATED_UNBONDED_KEY) + } + _ => false, + } + } else { + false + } +} + +/// Storage key prefix for all delegators' redelegated bonds. +pub fn delegator_redelegated_bonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&DELEGATOR_REDELEGATED_BONDS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a particular delegator's redelegated bond information. +pub fn delegator_redelegated_bonds_key(delegator: &Address) -> Key { + delegator_redelegated_bonds_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all delegators' redelegated unbonds. +pub fn delegator_redelegated_unbonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&DELEGATOR_REDELEGATED_UNBONDS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a particular delegator's redelegated unbond information. +pub fn delegator_redelegated_unbonds_key(delegator: &Address) -> Key { + delegator_redelegated_unbonds_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key's prefix matching delegator's total redelegated bonded or +/// unbond amounts? If so, returns the delegator's address. +pub fn is_delegator_redelegations_key(key: &Key) -> Option<&Address> { + if key.segments.len() >= 3 { + match &key.segments[..3] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(delegator), + ] if addr == &ADDRESS + && (prefix == DELEGATOR_REDELEGATED_BONDS_KEY + || prefix == DELEGATOR_REDELEGATED_UNBONDS_KEY) => + { + Some(delegator) + } + + _ => None, + } + } else { + None + } +} + +/// Storage key for validator's last known rewards product epoch. +pub fn validator_last_known_product_epoch_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_LAST_KNOWN_PRODUCT_EPOCH_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's last known rewards product epoch? +pub fn is_validator_last_known_product_epoch_key( + key: &Key, +) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_LAST_KNOWN_PRODUCT_EPOCH_KEY => + { + Some(validator) + } + _ => None, + } +} + +/// Storage key for validator's consensus key. +pub fn validator_state_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_STATE_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's state? +pub fn is_validator_state_key(key: &Key) -> Option<(&Address, Epoch)> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + DbKeySeg::StringSeg(lazy_map), + DbKeySeg::StringSeg(data), + DbKeySeg::StringSeg(epoch), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_STATE_STORAGE_KEY + && lazy_map == epoched::LAZY_MAP_SUB_KEY + && data == lazy_map::DATA_SUBKEY => + { + let epoch = Epoch::parse(epoch.clone()) + .expect("Should be able to parse the epoch"); + Some((validator, epoch)) + } + _ => None, + } +} + +/// Is storage key for a validator state's last update or oldest epoch? +pub fn is_validator_state_epoched_meta_key(key: &Key) -> bool { + if key.segments.len() >= 5 { + match &key.segments[..5] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(_validator), + DbKeySeg::StringSeg(key), + DbKeySeg::StringSeg(data), + ] => { + addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_STATE_STORAGE_KEY + && (data == epoched::LAST_UPDATE_SUB_KEY + || data == epoched::OLDEST_EPOCH_SUB_KEY) + } + _ => false, + } + } else { + false + } +} + +/// Storage key for validator's deltas. +pub fn validator_deltas_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_DELTAS_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's total deltas? +pub fn is_validator_deltas_key(key: &Key) -> bool { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(_validator), + DbKeySeg::StringSeg(key), + ] => { + addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_DELTAS_STORAGE_KEY + } + _ => false, + } + } else { + false + } +} + +/// Storage prefix for all active validators (consensus, below-capacity, +/// below-threshold, inactive, jailed) +pub fn validator_addresses_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&VALIDATOR_ADDRESSES_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key a prefix for all active validators? +pub fn is_validator_addresses_key(key: &Key) -> bool { + if key.segments.len() >= 2 { + match &key.segments[..2] { + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix)] => { + addr == &ADDRESS && prefix == VALIDATOR_ADDRESSES_KEY + } + _ => false, + } + } else { + false + } +} + +/// Storage prefix for slashes. +pub fn slashes_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&SLASHES_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for all slashes. +pub fn enqueued_slashes_key() -> Key { + // slashes_prefix() + Key::from(ADDRESS.to_db_key()) + .push(&ENQUEUED_SLASHES_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for validator's slashes. +pub fn validator_slashes_key(validator: &Address) -> Key { + slashes_prefix() + .push(&validator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for a validator's slashes +pub fn is_validator_slashes_key(key: &Key) -> Option
{ + if key.segments.len() >= 5 { + match &key.segments[..5] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(data), + DbKeySeg::StringSeg(_index), + ] if addr == &ADDRESS + && prefix == SLASHES_PREFIX + && data == lazy_vec::DATA_SUBKEY => + { + Some(validator.clone()) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for the last (most recent) epoch in which a slashable offense +/// was detected for a given validator +pub fn validator_last_slash_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_LAST_SLASH_EPOCH.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all bonds. +pub fn bonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&BOND_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all bonds of the given source address. +pub fn bonds_for_source_prefix(source: &Address) -> Key { + bonds_prefix() + .push(&source.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a bond with the given ID (source and validator). +pub fn bond_key(bond_id: &BondId) -> Key { + bonds_for_source_prefix(&bond_id.source) + .push(&bond_id.validator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for a bond? Returns the bond ID and bond start epoch if so. +pub fn is_bond_key(key: &Key) -> Option<(BondId, Epoch)> { + if key.segments.len() >= 7 { + match &key.segments[..7] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(source), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(lazy_map), + DbKeySeg::StringSeg(data), + DbKeySeg::StringSeg(epoch_str), + ] if addr == &ADDRESS + && prefix == BOND_STORAGE_KEY + && lazy_map == epoched::LAZY_MAP_SUB_KEY + && data == lazy_map::DATA_SUBKEY => + { + let start = Epoch::parse(epoch_str.clone()).ok()?; + Some(( + BondId { + source: source.clone(), + validator: validator.clone(), + }, + start, + )) + } + _ => None, + } + } else { + None + } +} + +/// Is storage key for a bond last update or oldest epoch? Returns the bond ID +/// if so. +pub fn is_bond_epoched_meta_key(key: &Key) -> Option { + if key.segments.len() >= 5 { + match &key.segments[..5] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(source), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(data), + ] if addr == &ADDRESS + && prefix == BOND_STORAGE_KEY + && (data == epoched::LAST_UPDATE_SUB_KEY + || data == epoched::OLDEST_EPOCH_SUB_KEY) => + { + Some(BondId { + source: source.clone(), + validator: validator.clone(), + }) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for the total bonds for a given validator. +pub fn validator_total_bonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_BONDED_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key for the total bonds or unbonds for a validator? +pub fn is_validator_total_bond_or_unbond_key(key: &Key) -> bool { + if key.segments.len() >= 4 { + match &key.segments[..4] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(val_prefix), + DbKeySeg::AddressSeg(_validator), + DbKeySeg::StringSeg(prefix), + ] => { + addr == &ADDRESS + && val_prefix == VALIDATOR_STORAGE_PREFIX + && (prefix == VALIDATOR_TOTAL_BONDED_STORAGE_KEY + || prefix == VALIDATOR_TOTAL_UNBONDED_STORAGE_KEY) + } + _ => false, + } + } else { + false + } +} + +/// Storage key prefix for all unbonds. +pub fn unbonds_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&UNBOND_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key prefix for all unbonds of the given source address. +pub fn unbonds_for_source_prefix(source: &Address) -> Key { + unbonds_prefix() + .push(&source.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for an unbond with the given ID (source and validator). +pub fn unbond_key(bond_id: &BondId) -> Key { + unbonds_for_source_prefix(&bond_id.source) + .push(&bond_id.validator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for an unbond? Returns the bond ID and unbond start and +/// withdraw epoch if it is. +pub fn is_unbond_key(key: &Key) -> Option<(BondId, Epoch, Epoch)> { + if key.segments.len() >= 8 { + match &key.segments[..8] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(source), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(data_1), + DbKeySeg::StringSeg(start_epoch_str), + DbKeySeg::StringSeg(data_2), + DbKeySeg::StringSeg(withdraw_epoch_str), + ] if addr == &ADDRESS + && prefix == UNBOND_STORAGE_KEY + && data_1 == lazy_map::DATA_SUBKEY + && data_2 == lazy_map::DATA_SUBKEY => + { + let withdraw = Epoch::parse(withdraw_epoch_str.clone()).ok()?; + let start = Epoch::parse(start_epoch_str.clone()).ok()?; + Some(( + BondId { + source: source.clone(), + validator: validator.clone(), + }, + start, + withdraw, + )) + } + _ => None, + } + } else { + None + } +} + +/// Storage key for validator's total-unbonded amount to track for slashing +pub fn validator_total_unbonded_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_TOTAL_UNBONDED_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage prefix for validator sets. +pub fn validator_sets_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&VALIDATOR_SETS_STORAGE_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for consensus validator set +pub fn consensus_validator_set_key() -> Key { + validator_sets_prefix() + .push(&CONSENSUS_VALIDATOR_SET_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for below-capacity validator set +pub fn below_capacity_validator_set_key() -> Key { + validator_sets_prefix() + .push(&BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for the consensus validator set? +pub fn is_consensus_validator_set_key(key: &Key) -> bool { + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key), DbKeySeg::StringSeg(set_type), DbKeySeg::StringSeg(lazy_map), DbKeySeg::StringSeg(data), DbKeySeg::StringSeg(_epoch), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_amount), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_position)] if addr == &ADDRESS && key == VALIDATOR_SETS_STORAGE_PREFIX && set_type == CONSENSUS_VALIDATOR_SET_STORAGE_KEY && lazy_map == epoched::LAZY_MAP_SUB_KEY && data == lazy_map::DATA_SUBKEY) +} + +/// Is storage key for the below-capacity validator set? +pub fn is_below_capacity_validator_set_key(key: &Key) -> bool { + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key), DbKeySeg::StringSeg(set_type), DbKeySeg::StringSeg(lazy_map), DbKeySeg::StringSeg(data), DbKeySeg::StringSeg(_epoch), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_amount), DbKeySeg::StringSeg(_), DbKeySeg::StringSeg(_position)] if addr == &ADDRESS && key == VALIDATOR_SETS_STORAGE_PREFIX && set_type == BELOW_CAPACITY_VALIDATOR_SET_STORAGE_KEY && lazy_map == epoched::LAZY_MAP_SUB_KEY && data == lazy_map::DATA_SUBKEY) +} + +/// Storage key for total consensus stake +pub fn total_consensus_stake_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&TOTAL_CONSENSUS_STAKE_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a total consensus stake key") +} + +/// Is storage key for the total consensus stake? +pub fn is_total_consensus_stake_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(key) + ] if addr == &ADDRESS && key == TOTAL_CONSENSUS_STAKE_STORAGE_KEY) +} + +/// Storage key for total deltas of all validators. +pub fn total_deltas_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&TOTAL_DELTAS_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for total deltas of all validators? +pub fn is_total_deltas_key(key: &Key) -> bool { + if key.segments.len() >= 2 { + match &key.segments[..2] { + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix)] => { + addr == &ADDRESS && prefix == TOTAL_DELTAS_STORAGE_KEY + } + _ => false, + } + } else { + false + } +} + +/// Storage key for block proposer address of the previous block. +pub fn last_block_proposer_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&LAST_BLOCK_PROPOSER_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for block proposer address of the previous block? +pub fn is_last_block_proposer_key(key: &Key) -> bool { + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == LAST_BLOCK_PROPOSER_STORAGE_KEY) +} + +/// Storage key for the consensus validator set rewards accumulator. +pub fn consensus_validator_rewards_accumulator_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for the consensus validator set? +pub fn is_consensus_validator_set_accumulator_key(key: &Key) -> bool { + matches!(&key.segments[..], [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && key == CONSENSUS_VALIDATOR_SET_ACCUMULATOR_STORAGE_KEY) +} + +/// Storage prefix for epoch at which an account last claimed PoS inflationary +/// rewards. +pub fn last_pos_reward_claim_epoch_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&LAST_REWARD_CLAIM_EPOCH.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for epoch at which an account last claimed PoS inflationary +/// rewards. +pub fn last_pos_reward_claim_epoch_key( + delegator: &Address, + validator: &Address, +) -> Key { + last_pos_reward_claim_epoch_prefix() + .push(&delegator.to_db_key()) + .expect("Cannot obtain a storage key") + .push(&validator.to_db_key()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key for epoch at which an account last claimed PoS +/// inflationary rewards? Return the bond ID if so. +pub fn is_last_pos_reward_claim_epoch_key(key: &Key) -> Option { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(key), + DbKeySeg::AddressSeg(source), + DbKeySeg::AddressSeg(validator), + ] if addr == &ADDRESS && key == LAST_REWARD_CLAIM_EPOCH => { + Some(BondId { + source: source.clone(), + validator: validator.clone(), + }) + } + _ => None, + } +} + +/// Get validator address from bond key +pub fn get_validator_address_from_bond(key: &Key) -> Option
{ + match key.get_at(3) { + Some(segment) => match segment { + DbKeySeg::AddressSeg(addr) => Some(addr.clone()), + DbKeySeg::StringSeg(_) => None, + }, + None => None, + } +} + +/// Storage key for validator set positions +pub fn validator_set_positions_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&VALIDATOR_SET_POSITIONS_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is the storage key for validator set positions? +pub fn is_validator_set_positions_key(key: &Key) -> bool { + if key.segments.len() >= 2 { + match &key.segments[..2] { + [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(prefix)] => { + addr == &ADDRESS && prefix == VALIDATOR_SET_POSITIONS_KEY + } + _ => false, + } + } else { + false + } +} + +/// Storage key for consensus keys set. +pub fn consensus_keys_key() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&CONSENSUS_KEYS.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for consensus keys set? +pub fn is_consensus_keys_key(key: &Key) -> bool { + matches!(&key.segments[..], [DbKeySeg::AddressSeg(addr), DbKeySeg::StringSeg(key)] if addr == &ADDRESS && key == CONSENSUS_KEYS) +} + +/// Storage key for a validator's email +pub fn validator_email_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_EMAIL_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a validator's description +pub fn validator_description_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_DESCRIPTION_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a validator's website +pub fn validator_website_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_WEBSITE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for a validator's discord handle +pub fn validator_discord_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_DISCORD_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage prefix for the liveness data of the cosnensus validator set. +pub fn liveness_data_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&LIVENESS_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for the liveness records. +pub fn liveness_missed_votes_key() -> Key { + liveness_data_prefix() + .push(&LIVENESS_MISSED_VOTES.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for the liveness data. +pub fn liveness_sum_missed_votes_key() -> Key { + liveness_data_prefix() + .push(&LIVENESS_MISSED_VOTES_SUM.to_owned()) + .expect("Cannot obtain a storage key") +} diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs deleted file mode 100644 index bc6c71c841..0000000000 --- a/proof_of_stake/src/tests.rs +++ /dev/null @@ -1,6883 +0,0 @@ -//! PoS system tests - -mod state_machine; -mod state_machine_v2; -mod utils; - -use std::cmp::{max, min}; -use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::ops::{Deref, Range}; -use std::str::FromStr; - -use assert_matches::assert_matches; -use namada_core::ledger::storage::testing::TestWlStorage; -use namada_core::ledger::storage_api::collections::lazy_map::{ - self, Collectable, NestedMap, -}; -use namada_core::ledger::storage_api::collections::LazyCollection; -use namada_core::ledger::storage_api::token::{credit_tokens, read_balance}; -use namada_core::ledger::storage_api::StorageRead; -use namada_core::types::address::testing::{ - address_from_simple_seed, arb_established_address, established_address_1, - established_address_2, established_address_3, -}; -use namada_core::types::address::{Address, EstablishedAddressGen}; -use namada_core::types::dec::Dec; -use namada_core::types::key::common::{PublicKey, SecretKey}; -use namada_core::types::key::testing::{ - arb_common_keypair, common_sk_from_simple_seed, gen_keypair, -}; -use namada_core::types::key::RefTo; -use namada_core::types::storage::{BlockHeight, Epoch, Key}; -use namada_core::types::token::testing::arb_amount_non_zero_ceiled; -use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_core::types::{address, key, token}; -use proptest::prelude::*; -use proptest::test_runner::Config; -// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see -// `tracing` logs from tests -use test_log::test; - -use crate::epoched::DEFAULT_NUM_PAST_EPOCHS; -use crate::parameters::testing::arb_pos_params; -use crate::parameters::{OwnedPosParams, PosParams}; -use crate::rewards::PosRewardsCalculator; -use crate::test_utils::test_init_genesis; -use crate::types::{ - into_tm_voting_power, BondDetails, BondId, BondsAndUnbondsDetails, - ConsensusValidator, EagerRedelegatedBondsMap, GenesisValidator, Position, - RedelegatedTokens, ReverseOrdTokenAmount, Slash, SlashType, UnbondDetails, - ValidatorSetUpdate, ValidatorState, VoteInfo, WeightedValidator, -}; -use crate::{ - apply_list_slashes, become_validator, below_capacity_validator_set_handle, - bond_handle, bond_tokens, bonds_and_unbonds, change_consensus_key, - compute_amount_after_slashing_unbond, - compute_amount_after_slashing_withdraw, - compute_and_store_total_consensus_stake, compute_bond_at_epoch, - compute_modified_redelegation, compute_new_redelegated_unbonds, - compute_slash_bond_at_epoch, compute_slashable_amount, - consensus_validator_set_handle, copy_validator_sets_and_positions, - delegator_redelegated_bonds_handle, delegator_redelegated_unbonds_handle, - find_bonds_to_remove, find_validator_by_raw_hash, - fold_and_slash_redelegated_bonds, get_consensus_key_set, - get_num_consensus_validators, insert_validator_into_validator_set, - is_validator, process_slashes, - read_below_capacity_validator_set_addresses_with_stake, - read_below_threshold_validator_set_addresses, - read_consensus_validator_set_addresses_with_stake, read_total_stake, - read_validator_deltas_value, read_validator_stake, slash, - slash_redelegation, slash_validator, slash_validator_redelegation, - staking_token_address, total_bonded_handle, total_deltas_handle, - total_unbonded_handle, unbond_handle, unbond_tokens, unjail_validator, - update_validator_deltas, update_validator_set, - validator_consensus_key_handle, validator_incoming_redelegations_handle, - validator_outgoing_redelegations_handle, validator_set_positions_handle, - validator_set_update_tendermint, validator_slashes_handle, - validator_state_handle, validator_total_redelegated_bonded_handle, - validator_total_redelegated_unbonded_handle, withdraw_tokens, - write_pos_params, write_validator_address_raw_hash, BecomeValidator, - EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, - RedelegationError, -}; - -proptest! { - // Generate arb valid input for `test_test_init_genesis_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_test_init_genesis( - - (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..10), - start_epoch in (0_u64..1000).prop_map(Epoch), - - ) { - test_test_init_genesis_aux(pos_params, start_epoch, genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_bonds_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_bonds( - - (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..3), - - ) { - test_bonds_aux(pos_params, genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_become_validator_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_become_validator( - - (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..3), - new_validator in arb_established_address().prop_map(Address::Established), - new_validator_consensus_key in arb_common_keypair(), - - ) { - test_become_validator_aux(pos_params, new_validator, - new_validator_consensus_key, genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_slashes_with_unbonding_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_slashes_with_unbonding( - (params, genesis_validators, unbond_delay) - in test_slashes_with_unbonding_params() - ) { - test_slashes_with_unbonding_aux( - params, genesis_validators, unbond_delay) - } -} - -proptest! { - // Generate arb valid input for `test_unjail_validator_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_unjail_validator( - (pos_params, genesis_validators) - in arb_params_and_genesis_validators(Some(4),6..9) - ) { - test_unjail_validator_aux(pos_params, - genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_simple_redelegation_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_simple_redelegation( - - genesis_validators in arb_genesis_validators(2..4, None), - (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) - - ) { - test_simple_redelegation_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) - } -} - -proptest! { - // Generate arb valid input for `test_simple_redelegation_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_redelegation_with_slashing( - - genesis_validators in arb_genesis_validators(2..4, None), - (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) - - ) { - test_redelegation_with_slashing_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) - } -} - -proptest! { - // Generate arb valid input for `test_chain_redelegations_aux` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_chain_redelegations( - - genesis_validators in arb_genesis_validators(3..4, None), - - ) { - test_chain_redelegations_aux(genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_overslashing_aux` - #![proptest_config(Config { - cases: 1, - .. Config::default() - })] - #[test] - fn test_overslashing( - - genesis_validators in arb_genesis_validators(4..5, None), - - ) { - test_overslashing_aux(genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_unslashed_bond_amount_aux` - #![proptest_config(Config { - cases: 1, - .. Config::default() - })] - #[test] - fn test_unslashed_bond_amount( - - genesis_validators in arb_genesis_validators(4..5, None), - - ) { - test_unslashed_bond_amount_aux(genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_slashed_bond_amount_aux` - #![proptest_config(Config { - cases: 1, - .. Config::default() - })] - #[test] - fn test_slashed_bond_amount( - - genesis_validators in arb_genesis_validators(4..5, None), - - ) { - test_slashed_bond_amount_aux(genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_log_block_rewards_aux` - #![proptest_config(Config { - cases: 1, - .. Config::default() - })] - #[test] - fn test_log_block_rewards( - genesis_validators in arb_genesis_validators(4..10, None), - params in arb_pos_params(Some(5)) - - ) { - test_log_block_rewards_aux(genesis_validators, params) - } -} - -proptest! { - // Generate arb valid input for `test_update_rewards_products_aux` - #![proptest_config(Config { - cases: 1, - .. Config::default() - })] - #[test] - fn test_update_rewards_products( - genesis_validators in arb_genesis_validators(4..10, None), - - ) { - test_update_rewards_products_aux(genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_consensus_key_change` - #![proptest_config(Config { - cases: 1, - .. Config::default() - })] - #[test] - fn test_consensus_key_change( - - genesis_validators in arb_genesis_validators(1..2, None), - - ) { - test_consensus_key_change_aux(genesis_validators) - } -} - -proptest! { - // Generate arb valid input for `test_is_delegator` - #![proptest_config(Config { - cases: 100, - .. Config::default() - })] - #[test] - fn test_is_delegator( - - genesis_validators in arb_genesis_validators(2..3, None), - - ) { - test_is_delegator_aux(genesis_validators) - } -} - -fn arb_params_and_genesis_validators( - num_max_validator_slots: Option, - val_size: Range, -) -> impl Strategy)> { - let params = arb_pos_params(num_max_validator_slots); - params.prop_flat_map(move |params| { - let validators = arb_genesis_validators( - val_size.clone(), - Some(params.validator_stake_threshold), - ); - (Just(params), validators) - }) -} - -fn test_slashes_with_unbonding_params() --> impl Strategy, u64)> { - let params = arb_pos_params(Some(5)); - params.prop_flat_map(|params| { - let unbond_delay = 0..(params.slash_processing_epoch_offset() * 2); - // Must have at least 4 validators so we can slash one and the cubic - // slash rate will be less than 100% - let validators = arb_genesis_validators(4..10, None); - (Just(params), validators, unbond_delay) - }) -} - -/// Test genesis initialization -fn test_test_init_genesis_aux( - params: OwnedPosParams, - start_epoch: Epoch, - mut validators: Vec, -) { - println!( - "Test inputs: {params:?}, {start_epoch}, genesis validators: \ - {validators:#?}" - ); - let mut s = TestWlStorage::default(); - s.storage.block.epoch = start_epoch; - - validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - let params = test_init_genesis( - &mut s, - params, - validators.clone().into_iter(), - start_epoch, - ) - .unwrap(); - - let mut bond_details = bonds_and_unbonds(&s, None, None).unwrap(); - assert!(bond_details.iter().all(|(_id, details)| { - details.unbonds.is_empty() && details.slashes.is_empty() - })); - - for (i, validator) in validators.into_iter().enumerate() { - let addr = &validator.address; - let self_bonds = bond_details - .remove(&BondId { - source: addr.clone(), - validator: addr.clone(), - }) - .unwrap(); - assert_eq!(self_bonds.bonds.len(), 1); - assert_eq!( - self_bonds.bonds[0], - BondDetails { - start: start_epoch, - amount: validator.tokens, - slashed_amount: None, - } - ); - - let state = validator_state_handle(&validator.address) - .get(&s, start_epoch, ¶ms) - .unwrap(); - if (i as u64) < params.max_validator_slots - && validator.tokens >= params.validator_stake_threshold - { - // should be in consensus set - let handle = consensus_validator_set_handle().at(&start_epoch); - assert!(handle.at(&validator.tokens).iter(&s).unwrap().any( - |result| { - let (_pos, addr) = result.unwrap(); - addr == validator.address - } - )); - assert_eq!(state, Some(ValidatorState::Consensus)); - } else if validator.tokens >= params.validator_stake_threshold { - // Should be in below-capacity set if its tokens are greater than - // `validator_stake_threshold` - let handle = below_capacity_validator_set_handle().at(&start_epoch); - assert!(handle.at(&validator.tokens.into()).iter(&s).unwrap().any( - |result| { - let (_pos, addr) = result.unwrap(); - addr == validator.address - } - )); - assert_eq!(state, Some(ValidatorState::BelowCapacity)); - } else { - // Should be in below-threshold - let bt_addresses = - read_below_threshold_validator_set_addresses(&s, start_epoch) - .unwrap(); - assert!( - bt_addresses - .into_iter() - .any(|addr| { addr == validator.address }) - ); - assert_eq!(state, Some(ValidatorState::BelowThreshold)); - } - } -} - -/// Test bonding -/// NOTE: copy validator sets each time we advance the epoch -fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { - // This can be useful for debugging: - // params.pipeline_len = 2; - // params.unbonding_len = 4; - println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); - let mut s = TestWlStorage::default(); - - // Genesis - let start_epoch = s.storage.block.epoch; - let mut current_epoch = s.storage.block.epoch; - let params = test_init_genesis( - &mut s, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - s.commit_block().unwrap(); - - // Advance to epoch 1 - current_epoch = advance_epoch(&mut s, ¶ms); - let self_bond_epoch = current_epoch; - - let validator = validators.first().unwrap(); - - // Read some data before submitting bond - let pipeline_epoch = current_epoch + params.pipeline_len; - let staking_token = staking_token_address(&s); - let pos_balance_pre = s - .read::(&token::balance_key( - &staking_token, - &super::ADDRESS, - )) - .unwrap() - .unwrap_or_default(); - let total_stake_before = - read_total_stake(&s, ¶ms, pipeline_epoch).unwrap(); - - // Self-bond - let amount_self_bond = token::Amount::from_uint(100_500_000, 0).unwrap(); - credit_tokens(&mut s, &staking_token, &validator.address, amount_self_bond) - .unwrap(); - bond_tokens( - &mut s, - None, - &validator.address, - amount_self_bond, - current_epoch, - None, - ) - .unwrap(); - - // Check the bond delta - let self_bond = bond_handle(&validator.address, &validator.address); - let delta = self_bond.get_delta_val(&s, pipeline_epoch).unwrap(); - assert_eq!(delta, Some(amount_self_bond)); - - // Check the validator in the validator set - let set = - read_consensus_validator_set_addresses_with_stake(&s, pipeline_epoch) - .unwrap(); - assert!(set.into_iter().any( - |WeightedValidator { - bonded_stake, - address, - }| { - address == validator.address - && bonded_stake == validator.tokens + amount_self_bond - } - )); - - let val_deltas = - read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) - .unwrap(); - assert_eq!(val_deltas, Some(amount_self_bond.change())); - - let total_deltas_handle = total_deltas_handle(); - assert_eq!( - current_epoch, - total_deltas_handle.get_last_update(&s).unwrap().unwrap() - ); - let total_stake_after = - read_total_stake(&s, ¶ms, pipeline_epoch).unwrap(); - assert_eq!(total_stake_before + amount_self_bond, total_stake_after); - - // Check bond details after self-bond - let self_bond_id = BondId { - source: validator.address.clone(), - validator: validator.address.clone(), - }; - let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { - println!("Check index {ix}"); - let details = bond_details.get(&self_bond_id).unwrap(); - assert_eq!( - details.bonds.len(), - 2, - "Contains genesis and newly added self-bond" - ); - dbg!(&details.bonds); - assert_eq!( - details.bonds[0], - BondDetails { - start: start_epoch, - amount: validator.tokens, - slashed_amount: None - }, - ); - assert_eq!( - details.bonds[1], - BondDetails { - start: pipeline_epoch, - amount: amount_self_bond, - slashed_amount: None - }, - ); - }; - // Try to call it with different combinations of owner/validator args - check_bond_details(0, bonds_and_unbonds(&s, None, None).unwrap()); - check_bond_details( - 1, - bonds_and_unbonds(&s, Some(validator.address.clone()), None).unwrap(), - ); - check_bond_details( - 2, - bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), - ); - check_bond_details( - 3, - bonds_and_unbonds( - &s, - Some(validator.address.clone()), - Some(validator.address.clone()), - ) - .unwrap(), - ); - - // Get a non-validating account with tokens - let delegator = address::testing::gen_implicit_address(); - let amount_del = token::Amount::from_uint(201_000_000, 0).unwrap(); - credit_tokens(&mut s, &staking_token, &delegator, amount_del).unwrap(); - let balance_key = token::balance_key(&staking_token, &delegator); - let balance = s - .read::(&balance_key) - .unwrap() - .unwrap_or_default(); - assert_eq!(balance, amount_del); - - // Advance to epoch 3 - advance_epoch(&mut s, ¶ms); - current_epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = current_epoch + params.pipeline_len; - - // Delegation - let delegation_epoch = current_epoch; - bond_tokens( - &mut s, - Some(&delegator), - &validator.address, - amount_del, - current_epoch, - None, - ) - .unwrap(); - let val_stake_pre = read_validator_stake( - &s, - ¶ms, - &validator.address, - pipeline_epoch.prev(), - ) - .unwrap(); - let val_stake_post = - read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) - .unwrap(); - assert_eq!(validator.tokens + amount_self_bond, val_stake_pre); - assert_eq!( - validator.tokens + amount_self_bond + amount_del, - val_stake_post - ); - let delegation = bond_handle(&delegator, &validator.address); - assert_eq!( - delegation - .get_sum(&s, pipeline_epoch.prev(), ¶ms) - .unwrap() - .unwrap_or_default(), - token::Amount::zero() - ); - assert_eq!( - delegation - .get_sum(&s, pipeline_epoch, ¶ms) - .unwrap() - .unwrap_or_default(), - amount_del - ); - - // Check delegation bonds details after delegation - let delegation_bond_id = BondId { - source: delegator.clone(), - validator: validator.address.clone(), - }; - let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { - println!("Check index {ix}"); - assert_eq!(bond_details.len(), 1); - let details = bond_details.get(&delegation_bond_id).unwrap(); - assert_eq!(details.bonds.len(), 1,); - dbg!(&details.bonds); - assert_eq!( - details.bonds[0], - BondDetails { - start: pipeline_epoch, - amount: amount_del, - slashed_amount: None - }, - ); - }; - // Try to call it with different combinations of owner/validator args - check_bond_details( - 0, - bonds_and_unbonds(&s, Some(delegator.clone()), None).unwrap(), - ); - check_bond_details( - 1, - bonds_and_unbonds( - &s, - Some(delegator.clone()), - Some(validator.address.clone()), - ) - .unwrap(), - ); - - // Check all bond details (self-bonds and delegation) - let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { - println!("Check index {ix}"); - let self_bond_details = bond_details.get(&self_bond_id).unwrap(); - let delegation_details = bond_details.get(&delegation_bond_id).unwrap(); - assert_eq!( - self_bond_details.bonds.len(), - 2, - "Contains genesis and newly added self-bond" - ); - assert_eq!( - self_bond_details.bonds[0], - BondDetails { - start: start_epoch, - amount: validator.tokens, - slashed_amount: None - }, - ); - assert_eq!(self_bond_details.bonds[1].amount, amount_self_bond); - assert_eq!( - delegation_details.bonds[0], - BondDetails { - start: pipeline_epoch, - amount: amount_del, - slashed_amount: None - }, - ); - }; - // Try to call it with different combinations of owner/validator args - check_bond_details(0, bonds_and_unbonds(&s, None, None).unwrap()); - check_bond_details( - 1, - bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), - ); - - // Advance to epoch 5 - for _ in 0..2 { - current_epoch = advance_epoch(&mut s, ¶ms); - } - let pipeline_epoch = current_epoch + params.pipeline_len; - - // Unbond the self-bond with an amount that will remove all of the self-bond - // executed after genesis and some of the genesis bond - let amount_self_unbond: token::Amount = - amount_self_bond + (validator.tokens / 2); - // When the difference is 0, only the non-genesis self-bond is unbonded - let unbonded_genesis_self_bond = - amount_self_unbond - amount_self_bond != token::Amount::zero(); - dbg!( - amount_self_unbond, - amount_self_bond, - unbonded_genesis_self_bond - ); - let self_unbond_epoch = s.storage.block.epoch; - - unbond_tokens( - &mut s, - None, - &validator.address, - amount_self_unbond, - current_epoch, - false, - ) - .unwrap(); - - let val_stake_pre = read_validator_stake( - &s, - ¶ms, - &validator.address, - pipeline_epoch.prev(), - ) - .unwrap(); - - let val_stake_post = - read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) - .unwrap(); - - let val_delta = - read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) - .unwrap(); - let unbond = unbond_handle(&validator.address, &validator.address); - - assert_eq!(val_delta, Some(-amount_self_unbond.change())); - assert_eq!( - unbond - .at(&Epoch::default()) - .get( - &s, - &(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length) - ) - .unwrap(), - if unbonded_genesis_self_bond { - Some(amount_self_unbond - amount_self_bond) - } else { - None - } - ); - assert_eq!( - unbond - .at(&(self_bond_epoch + params.pipeline_len)) - .get( - &s, - &(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length) - ) - .unwrap(), - Some(amount_self_bond) - ); - assert_eq!( - val_stake_pre, - validator.tokens + amount_self_bond + amount_del - ); - assert_eq!( - val_stake_post, - validator.tokens + amount_self_bond + amount_del - amount_self_unbond - ); - - // Check all bond and unbond details (self-bonds and delegation) - let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { - println!("Check index {ix}"); - dbg!(&bond_details); - assert_eq!(bond_details.len(), 2); - let self_bond_details = bond_details.get(&self_bond_id).unwrap(); - let delegation_details = bond_details.get(&delegation_bond_id).unwrap(); - assert_eq!( - self_bond_details.bonds.len(), - 1, - "Contains only part of the genesis bond now" - ); - assert_eq!( - self_bond_details.bonds[0], - BondDetails { - start: start_epoch, - amount: validator.tokens + amount_self_bond - - amount_self_unbond, - slashed_amount: None - }, - ); - assert_eq!( - delegation_details.bonds[0], - BondDetails { - start: delegation_epoch + params.pipeline_len, - amount: amount_del, - slashed_amount: None - }, - ); - assert_eq!( - self_bond_details.unbonds.len(), - if unbonded_genesis_self_bond { 2 } else { 1 }, - "Contains a full unbond of the last self-bond and an unbond from \ - the genesis bond" - ); - if unbonded_genesis_self_bond { - assert_eq!( - self_bond_details.unbonds[0], - UnbondDetails { - start: start_epoch, - withdraw: self_unbond_epoch - + params.pipeline_len - + params.unbonding_len - + params.cubic_slashing_window_length, - amount: amount_self_unbond - amount_self_bond, - slashed_amount: None - } - ); - } - assert_eq!( - self_bond_details.unbonds[usize::from(unbonded_genesis_self_bond)], - UnbondDetails { - start: self_bond_epoch + params.pipeline_len, - withdraw: self_unbond_epoch - + params.pipeline_len - + params.unbonding_len - + params.cubic_slashing_window_length, - amount: amount_self_bond, - slashed_amount: None - } - ); - }; - check_bond_details( - 0, - bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), - ); - - // Unbond delegation - let amount_undel = token::Amount::from_uint(1_000_000, 0).unwrap(); - unbond_tokens( - &mut s, - Some(&delegator), - &validator.address, - amount_undel, - current_epoch, - false, - ) - .unwrap(); - - let val_stake_pre = read_validator_stake( - &s, - ¶ms, - &validator.address, - pipeline_epoch.prev(), - ) - .unwrap(); - let val_stake_post = - read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) - .unwrap(); - let val_delta = - read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) - .unwrap(); - let unbond = unbond_handle(&delegator, &validator.address); - - assert_eq!( - val_delta, - Some(-(amount_self_unbond + amount_undel).change()) - ); - assert_eq!( - unbond - .at(&(delegation_epoch + params.pipeline_len)) - .get( - &s, - &(pipeline_epoch - + params.unbonding_len - + params.cubic_slashing_window_length) - ) - .unwrap(), - Some(amount_undel) - ); - assert_eq!( - val_stake_pre, - validator.tokens + amount_self_bond + amount_del - ); - assert_eq!( - val_stake_post, - validator.tokens + amount_self_bond - amount_self_unbond + amount_del - - amount_undel - ); - - let withdrawable_offset = params.unbonding_len - + params.pipeline_len - + params.cubic_slashing_window_length; - - // Advance to withdrawable epoch - for _ in 0..withdrawable_offset { - current_epoch = advance_epoch(&mut s, ¶ms); - } - - dbg!(current_epoch); - - let pos_balance = s - .read::(&token::balance_key( - &staking_token, - &super::ADDRESS, - )) - .unwrap(); - - assert_eq!( - Some(pos_balance_pre + amount_self_bond + amount_del), - pos_balance - ); - - // Withdraw the self-unbond - withdraw_tokens(&mut s, None, &validator.address, current_epoch).unwrap(); - let unbond = unbond_handle(&validator.address, &validator.address); - let unbond_iter = unbond.iter(&s).unwrap().next(); - assert!(unbond_iter.is_none()); - - let pos_balance = s - .read::(&token::balance_key( - &staking_token, - &super::ADDRESS, - )) - .unwrap(); - assert_eq!( - Some( - pos_balance_pre + amount_self_bond - amount_self_unbond - + amount_del - ), - pos_balance - ); - - // Withdraw the delegation unbond - withdraw_tokens( - &mut s, - Some(&delegator), - &validator.address, - current_epoch, - ) - .unwrap(); - let unbond = unbond_handle(&delegator, &validator.address); - let unbond_iter = unbond.iter(&s).unwrap().next(); - assert!(unbond_iter.is_none()); - - let pos_balance = s - .read::(&token::balance_key( - &staking_token, - &super::ADDRESS, - )) - .unwrap(); - assert_eq!( - Some( - pos_balance_pre + amount_self_bond - amount_self_unbond - + amount_del - - amount_undel - ), - pos_balance - ); -} - -/// Test validator initialization. -fn test_become_validator_aux( - params: OwnedPosParams, - new_validator: Address, - new_validator_consensus_key: SecretKey, - validators: Vec, -) { - println!( - "Test inputs: {params:?}, new validator: {new_validator}, genesis \ - validators: {validators:#?}" - ); - - let mut s = TestWlStorage::default(); - - // Genesis - let mut current_epoch = s.storage.block.epoch; - let params = test_init_genesis( - &mut s, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - s.commit_block().unwrap(); - - // Advance to epoch 1 - current_epoch = advance_epoch(&mut s, ¶ms); - - let num_consensus_before = - get_num_consensus_validators(&s, current_epoch + params.pipeline_len) - .unwrap(); - let num_validators_over_thresh = validators - .iter() - .filter(|validator| { - validator.tokens >= params.validator_stake_threshold - }) - .count(); - - assert_eq!( - min( - num_validators_over_thresh as u64, - params.max_validator_slots - ), - num_consensus_before - ); - assert!(!is_validator(&s, &new_validator).unwrap()); - - // Credit the `new_validator` account - let staking_token = staking_token_address(&s); - let amount = token::Amount::from_uint(100_500_000, 0).unwrap(); - // Credit twice the amount as we're gonna bond it in delegation first, then - // self-bond - credit_tokens(&mut s, &staking_token, &new_validator, amount * 2).unwrap(); - - // Add a delegation from `new_validator` to `genesis_validator` - let genesis_validator = &validators.first().unwrap().address; - bond_tokens( - &mut s, - Some(&new_validator), - genesis_validator, - amount, - current_epoch, - None, - ) - .unwrap(); - - let consensus_key = new_validator_consensus_key.to_public(); - let protocol_sk = common_sk_from_simple_seed(0); - let protocol_key = protocol_sk.to_public(); - let eth_hot_key = key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::().ref_to(), - ); - let eth_cold_key = key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::().ref_to(), - ); - - // Try to become a validator - it should fail as there is a delegation - let result = become_validator( - &mut s, - BecomeValidator { - params: ¶ms, - address: &new_validator, - consensus_key: &consensus_key, - protocol_key: &protocol_key, - eth_cold_key: ð_cold_key, - eth_hot_key: ð_hot_key, - current_epoch, - commission_rate: Dec::new(5, 2).expect("Dec creation failed"), - max_commission_rate_change: Dec::new(5, 2) - .expect("Dec creation failed"), - metadata: Default::default(), - offset_opt: None, - }, - ); - assert!(result.is_err()); - assert!(!is_validator(&s, &new_validator).unwrap()); - - // Unbond the delegation - unbond_tokens( - &mut s, - Some(&new_validator), - genesis_validator, - amount, - current_epoch, - false, - ) - .unwrap(); - - // Try to become a validator account again - it should pass now - become_validator( - &mut s, - BecomeValidator { - params: ¶ms, - address: &new_validator, - consensus_key: &consensus_key, - protocol_key: &protocol_key, - eth_cold_key: ð_cold_key, - eth_hot_key: ð_hot_key, - current_epoch, - commission_rate: Dec::new(5, 2).expect("Dec creation failed"), - max_commission_rate_change: Dec::new(5, 2) - .expect("Dec creation failed"), - metadata: Default::default(), - offset_opt: None, - }, - ) - .unwrap(); - assert!(is_validator(&s, &new_validator).unwrap()); - - let num_consensus_after = - get_num_consensus_validators(&s, current_epoch + params.pipeline_len) - .unwrap(); - // The new validator is initialized with no stake and thus is in the - // below-threshold set - assert_eq!(num_consensus_before, num_consensus_after); - - // Advance to epoch 2 - current_epoch = advance_epoch(&mut s, ¶ms); - - // Self-bond to the new validator - bond_tokens(&mut s, None, &new_validator, amount, current_epoch, None) - .unwrap(); - - // Check the bond delta - let bond_handle = bond_handle(&new_validator, &new_validator); - let pipeline_epoch = current_epoch + params.pipeline_len; - let delta = bond_handle.get_delta_val(&s, pipeline_epoch).unwrap(); - assert_eq!(delta, Some(amount)); - - // Check the validator in the validator set - - // If the consensus validator slots are full and all the genesis validators - // have stake GTE the new validator's self-bond amount, the validator should - // be added to the below-capacity set, or the consensus otherwise - if params.max_validator_slots <= validators.len() as u64 - && validators - .iter() - .all(|validator| validator.tokens >= amount) - { - let set = read_below_capacity_validator_set_addresses_with_stake( - &s, - pipeline_epoch, - ) - .unwrap(); - assert!(set.into_iter().any( - |WeightedValidator { - bonded_stake, - address, - }| { - address == new_validator && bonded_stake == amount - } - )); - } else { - let set = read_consensus_validator_set_addresses_with_stake( - &s, - pipeline_epoch, - ) - .unwrap(); - assert!(set.into_iter().any( - |WeightedValidator { - bonded_stake, - address, - }| { - address == new_validator && bonded_stake == amount - } - )); - } - - // Advance to epoch 3 - current_epoch = advance_epoch(&mut s, ¶ms); - - // Unbond the self-bond - unbond_tokens(&mut s, None, &new_validator, amount, current_epoch, false) - .unwrap(); - - let withdrawable_offset = params.unbonding_len + params.pipeline_len; - - // Advance to withdrawable epoch - for _ in 0..withdrawable_offset { - current_epoch = advance_epoch(&mut s, ¶ms); - } - - // Withdraw the self-bond - withdraw_tokens(&mut s, None, &new_validator, current_epoch).unwrap(); -} - -fn test_slashes_with_unbonding_aux( - mut params: OwnedPosParams, - validators: Vec, - unbond_delay: u64, -) { - // This can be useful for debugging: - params.pipeline_len = 2; - params.unbonding_len = 4; - println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); - let mut s = TestWlStorage::default(); - - // Find the validator with the least stake to avoid the cubic slash rate - // going to 100% - let validator = - itertools::Itertools::sorted_by_key(validators.iter(), |v| v.tokens) - .next() - .unwrap(); - let val_addr = &validator.address; - let val_tokens = validator.tokens; - println!( - "Validator that will misbehave addr {val_addr}, tokens {}", - val_tokens.to_string_native() - ); - - // Genesis - // let start_epoch = s.storage.block.epoch; - let mut current_epoch = s.storage.block.epoch; - let params = test_init_genesis( - &mut s, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - s.commit_block().unwrap(); - - current_epoch = advance_epoch(&mut s, ¶ms); - super::process_slashes(&mut s, current_epoch).unwrap(); - - // Discover first slash - let slash_0_evidence_epoch = current_epoch; - // let slash_0_processing_epoch = - // slash_0_evidence_epoch + params.slash_processing_epoch_offset(); - let evidence_block_height = BlockHeight(0); // doesn't matter for slashing logic - let slash_0_type = SlashType::DuplicateVote; - slash( - &mut s, - ¶ms, - current_epoch, - slash_0_evidence_epoch, - evidence_block_height, - slash_0_type, - val_addr, - current_epoch.next(), - ) - .unwrap(); - - // Advance to an epoch in which we can unbond - let unfreeze_epoch = - slash_0_evidence_epoch + params.slash_processing_epoch_offset(); - while current_epoch < unfreeze_epoch { - current_epoch = advance_epoch(&mut s, ¶ms); - super::process_slashes(&mut s, current_epoch).unwrap(); - } - - // Advance more epochs randomly from the generated delay - for _ in 0..unbond_delay { - current_epoch = advance_epoch(&mut s, ¶ms); - } - - // Unbond half of the tokens - let unbond_amount = Dec::new(5, 1).unwrap() * val_tokens; - println!("Going to unbond {}", unbond_amount.to_string_native()); - let unbond_epoch = current_epoch; - unbond_tokens(&mut s, None, val_addr, unbond_amount, unbond_epoch, false) - .unwrap(); - - // Discover second slash - let slash_1_evidence_epoch = current_epoch; - // Ensure that both slashes happen before `unbond_epoch + pipeline` - let _slash_1_processing_epoch = - slash_1_evidence_epoch + params.slash_processing_epoch_offset(); - let evidence_block_height = BlockHeight(0); // doesn't matter for slashing logic - let slash_1_type = SlashType::DuplicateVote; - slash( - &mut s, - ¶ms, - current_epoch, - slash_1_evidence_epoch, - evidence_block_height, - slash_1_type, - val_addr, - current_epoch.next(), - ) - .unwrap(); - - // Advance to an epoch in which we can withdraw - let withdraw_epoch = unbond_epoch + params.withdrawable_epoch_offset(); - while current_epoch < withdraw_epoch { - current_epoch = advance_epoch(&mut s, ¶ms); - super::process_slashes(&mut s, current_epoch).unwrap(); - } - let token = staking_token_address(&s); - let val_balance_pre = read_balance(&s, &token, val_addr).unwrap(); - - let bond_id = BondId { - source: val_addr.clone(), - validator: val_addr.clone(), - }; - let binding = - super::bonds_and_unbonds(&s, None, Some(val_addr.clone())).unwrap(); - let details = binding.get(&bond_id).unwrap(); - let exp_withdraw_from_details = details.unbonds[0].amount - - details.unbonds[0].slashed_amount.unwrap_or_default(); - - withdraw_tokens(&mut s, None, val_addr, current_epoch).unwrap(); - - let val_balance_post = read_balance(&s, &token, val_addr).unwrap(); - let withdrawn_tokens = val_balance_post - val_balance_pre; - println!("Withdrew {} tokens", withdrawn_tokens.to_string_native()); - - assert_eq!(exp_withdraw_from_details, withdrawn_tokens); - - let slash_rate_0 = validator_slashes_handle(val_addr) - .get(&s, 0) - .unwrap() - .unwrap() - .rate; - let slash_rate_1 = validator_slashes_handle(val_addr) - .get(&s, 1) - .unwrap() - .unwrap() - .rate; - println!("Slash 0 rate {slash_rate_0}, slash 1 rate {slash_rate_1}"); - - let expected_withdrawn_amount = Dec::from( - (Dec::one() - slash_rate_1) - * (Dec::one() - slash_rate_0) - * unbond_amount, - ); - // Allow some rounding error, 1 NAMNAM per each slash - let rounding_error_tolerance = - Dec::new(2, NATIVE_MAX_DECIMAL_PLACES).unwrap(); - assert!( - dbg!(expected_withdrawn_amount.abs_diff(&Dec::from(withdrawn_tokens))) - <= rounding_error_tolerance - ); - - // TODO: finish once implemented - // let slash_0 = decimal_mult_amount(slash_rate_0, val_tokens); - // let slash_1 = decimal_mult_amount(slash_rate_1, val_tokens - slash_0); - // let expected_slash_pool = slash_0 + slash_1; - // let slash_pool_balance = - // read_balance(&s, &token, &SLASH_POOL_ADDRESS).unwrap(); - // assert_eq!(expected_slash_pool, slash_pool_balance); -} - -#[test] -fn test_validator_raw_hash() { - let mut storage = TestWlStorage::default(); - let address = address::testing::established_address_1(); - let consensus_sk = key::testing::keypair_1(); - let consensus_pk = consensus_sk.to_public(); - let expected_raw_hash = key::tm_consensus_key_raw_hash(&consensus_pk); - - assert!( - find_validator_by_raw_hash(&storage, &expected_raw_hash) - .unwrap() - .is_none() - ); - write_validator_address_raw_hash(&mut storage, &address, &consensus_pk) - .unwrap(); - let found = - find_validator_by_raw_hash(&storage, &expected_raw_hash).unwrap(); - assert_eq!(found, Some(address)); -} - -#[test] -fn test_validator_sets() { - let mut s = TestWlStorage::default(); - // Only 3 consensus validator slots - let params = OwnedPosParams { - max_validator_slots: 3, - ..Default::default() - }; - let addr_seed = "seed"; - let mut address_gen = EstablishedAddressGen::new(addr_seed); - let mut sk_seed = 0; - let mut gen_validator = || { - let res = ( - address_gen.generate_address(addr_seed), - key::testing::common_sk_from_simple_seed(sk_seed).to_public(), - ); - // bump the sk seed - sk_seed += 1; - res - }; - - // Create genesis validators - let ((val1, pk1), stake1) = - (gen_validator(), token::Amount::native_whole(1)); - let ((val2, pk2), stake2) = - (gen_validator(), token::Amount::native_whole(1)); - let ((val3, pk3), stake3) = - (gen_validator(), token::Amount::native_whole(10)); - let ((val4, pk4), stake4) = - (gen_validator(), token::Amount::native_whole(1)); - let ((val5, pk5), stake5) = - (gen_validator(), token::Amount::native_whole(100)); - let ((val6, pk6), stake6) = - (gen_validator(), token::Amount::native_whole(1)); - let ((val7, pk7), stake7) = - (gen_validator(), token::Amount::native_whole(1)); - println!("\nval1: {val1}, {pk1}, {}", stake1.to_string_native()); - println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); - println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); - println!("val4: {val4}, {pk4}, {}", stake4.to_string_native()); - println!("val5: {val5}, {pk5}, {}", stake5.to_string_native()); - println!("val6: {val6}, {pk6}, {}", stake6.to_string_native()); - println!("val7: {val7}, {pk7}, {}", stake7.to_string_native()); - - let start_epoch = Epoch::default(); - let epoch = start_epoch; - - let protocol_sk_1 = common_sk_from_simple_seed(0); - let protocol_sk_2 = common_sk_from_simple_seed(1); - - let params = test_init_genesis( - &mut s, - params, - [ - GenesisValidator { - address: val1.clone(), - tokens: stake1, - consensus_key: pk1.clone(), - protocol_key: protocol_sk_1.to_public(), - eth_hot_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - eth_cold_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - commission_rate: Dec::new(1, 1).expect("Dec creation failed"), - max_commission_rate_change: Dec::new(1, 1) - .expect("Dec creation failed"), - metadata: Default::default(), - }, - GenesisValidator { - address: val2.clone(), - tokens: stake2, - consensus_key: pk2.clone(), - protocol_key: protocol_sk_2.to_public(), - eth_hot_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - eth_cold_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - commission_rate: Dec::new(1, 1).expect("Dec creation failed"), - max_commission_rate_change: Dec::new(1, 1) - .expect("Dec creation failed"), - metadata: Default::default(), - }, - ] - .into_iter(), - epoch, - ) - .unwrap(); - - // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, - addr, - pk: &PublicKey, - stake: token::Amount, - epoch: Epoch| { - insert_validator_into_validator_set( - s, - ¶ms, - addr, - stake, - epoch, - params.pipeline_len, - ) - .unwrap(); - - update_validator_deltas(s, ¶ms, addr, stake.change(), epoch, None) - .unwrap(); - - // Set their consensus key (needed for - // `validator_set_update_tendermint` fn) - validator_consensus_key_handle(addr) - .set(s, pk.clone(), epoch, params.pipeline_len) - .unwrap(); - }; - - // Advance to EPOCH 1 - // - // We cannot call `get_tendermint_set_updates` for the genesis state as - // `validator_set_update_tendermint` is only called 2 blocks before the - // start of an epoch and so we need to give it a predecessor epoch (see - // `get_tendermint_set_updates`), which we cannot have on the first - // epoch. In any way, the initial validator set is given to Tendermint - // from InitChain, so `validator_set_update_tendermint` is - // not being used for it. - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Insert another validator with the greater stake 10 NAM - insert_validator(&mut s, &val3, &pk3, stake3, epoch); - // Insert validator with stake 1 NAM - insert_validator(&mut s, &val4, &pk4, stake4, epoch); - - // Validator `val3` and `val4` will be added at pipeline offset (2) - epoch - // 3 - let val3_and_4_epoch = pipeline_epoch; - - let consensus_vals: Vec<_> = consensus_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(consensus_vals.len(), 3); - assert!(matches!( - &consensus_vals[0], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val1 && stake == &stake1 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[1], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val2 && stake == &stake2 && *position == Position(1) - )); - assert!(matches!( - &consensus_vals[2], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val3 && stake == &stake3 && *position == Position(0) - )); - - // Check tendermint validator set updates - there should be none - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - assert!(tm_updates.is_empty()); - - // Advance to EPOCH 2 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Insert another validator with a greater stake still 1000 NAM. It should - // replace 2nd consensus validator with stake 1, which should become - // below-capacity - insert_validator(&mut s, &val5, &pk5, stake5, epoch); - // Validator `val5` will be added at pipeline offset (2) - epoch 4 - let val5_epoch = pipeline_epoch; - - let consensus_vals: Vec<_> = consensus_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(consensus_vals.len(), 3); - assert!(matches!( - &consensus_vals[0], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val1 && stake == &stake1 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[1], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val3 && stake == &stake3 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[2], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val5 && stake == &stake5 && *position == Position(0) - )); - - let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(below_capacity_vals.len(), 2); - assert!(matches!( - &below_capacity_vals[0], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) - )); - assert!(matches!( - &below_capacity_vals[1], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val2 && stake == &stake2 && *position == Position(1) - )); - - // Advance to EPOCH 3 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Check tendermint validator set updates - assert_eq!( - val3_and_4_epoch, epoch, - "val3 and val4 are in the validator sets now" - ); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - // `val4` is newly added below-capacity, must be skipped in updated in TM - assert_eq!(tm_updates.len(), 1); - assert_eq!( - tm_updates[0], - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: pk3, - bonded_stake: stake3, - }) - ); - - // Insert another validator with a stake 1 NAM. It should be added to the - // below-capacity set - insert_validator(&mut s, &val6, &pk6, stake6, epoch); - // Validator `val6` will be added at pipeline offset (2) - epoch 5 - let val6_epoch = pipeline_epoch; - - let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(below_capacity_vals.len(), 3); - assert!(matches!( - &below_capacity_vals[0], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) - )); - assert!(matches!( - &below_capacity_vals[1], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val2 && stake == &stake2 && *position == Position(1) - )); - assert!(matches!( - &below_capacity_vals[2], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val6 && stake == &stake6 && *position == Position(2) - )); - - // Advance to EPOCH 4 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Check tendermint validator set updates - assert_eq!(val5_epoch, epoch, "val5 is in the validator sets now"); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - assert_eq!(tm_updates.len(), 2); - assert_eq!( - tm_updates[0], - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: pk5, - bonded_stake: stake5, - }) - ); - assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk2)); - - // Unbond some stake from val1, it should be be swapped with the greatest - // below-capacity validator val2 into the below-capacity set. The stake of - // val1 will go below 1 NAM, which is the validator_stake_threshold, so it - // will enter the below-threshold validator set. - let unbond = token::Amount::from_uint(500_000, 0).unwrap(); - let stake1 = stake1 - unbond; - println!("val1 {val1} new stake {}", stake1.to_string_native()); - // Because `update_validator_set` and `update_validator_deltas` are - // effective from pipeline offset, we use pipeline epoch for the rest of the - // checks - update_validator_set(&mut s, ¶ms, &val1, -unbond.change(), epoch, None) - .unwrap(); - update_validator_deltas( - &mut s, - ¶ms, - &val1, - -unbond.change(), - epoch, - None, - ) - .unwrap(); - // Epoch 6 - let val1_unbond_epoch = pipeline_epoch; - - let consensus_vals: Vec<_> = consensus_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(consensus_vals.len(), 3); - assert!(matches!( - &consensus_vals[0], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[1], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val3 && stake == &stake3 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[2], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val5 && stake == &stake5 && *position == Position(0) - )); - - let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(below_capacity_vals.len(), 2); - assert!(matches!( - &below_capacity_vals[0], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val2 && stake == &stake2 && *position == Position(1) - )); - assert!(matches!( - &below_capacity_vals[1], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val6 && stake == &stake6 && *position == Position(2) - )); - - let below_threshold_vals = - read_below_threshold_validator_set_addresses(&s, pipeline_epoch) - .unwrap() - .into_iter() - .collect::>(); - - assert_eq!(below_threshold_vals.len(), 1); - assert_eq!(&below_threshold_vals[0], &val1); - - // Advance to EPOCH 5 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Check tendermint validator set updates - assert_eq!(val6_epoch, epoch, "val6 is in the validator sets now"); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - assert!(tm_updates.is_empty()); - - // Insert another validator with stake 1 - it should be added to below - // capacity set - insert_validator(&mut s, &val7, &pk7, stake7, epoch); - // Epoch 7 - let val7_epoch = pipeline_epoch; - - let consensus_vals: Vec<_> = consensus_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(consensus_vals.len(), 3); - assert!(matches!( - &consensus_vals[0], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val4 && stake == &stake4 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[1], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val3 && stake == &stake3 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[2], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val5 && stake == &stake5 && *position == Position(0) - )); - - let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(below_capacity_vals.len(), 3); - assert!(matches!( - &below_capacity_vals[0], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val2 && stake == &stake2 && *position == Position(1) - )); - assert!(matches!( - &below_capacity_vals[1], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val6 && stake == &stake6 && *position == Position(2) - )); - assert!(matches!( - &below_capacity_vals[2], - ( - lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, - address - ) - if address == &val7 && stake == &stake7 && *position == Position(3) - )); - - let below_threshold_vals = - read_below_threshold_validator_set_addresses(&s, pipeline_epoch) - .unwrap() - .into_iter() - .collect::>(); - - assert_eq!(below_threshold_vals.len(), 1); - assert_eq!(&below_threshold_vals[0], &val1); - - // Advance to EPOCH 6 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Check tendermint validator set updates - assert_eq!(val1_unbond_epoch, epoch, "val1's unbond is applied now"); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - assert_eq!(tm_updates.len(), 2); - assert_eq!( - tm_updates[0], - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: pk4.clone(), - bonded_stake: stake4, - }) - ); - assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk1)); - - // Bond some stake to val6, it should be be swapped with the lowest - // consensus validator val2 into the consensus set - let bond = token::Amount::from_uint(500_000, 0).unwrap(); - let stake6 = stake6 + bond; - println!("val6 {val6} new stake {}", stake6.to_string_native()); - update_validator_set(&mut s, ¶ms, &val6, bond.change(), epoch, None) - .unwrap(); - update_validator_deltas(&mut s, ¶ms, &val6, bond.change(), epoch, None) - .unwrap(); - let val6_bond_epoch = pipeline_epoch; - - let consensus_vals: Vec<_> = consensus_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(consensus_vals.len(), 3); - assert!(matches!( - &consensus_vals[0], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val6 && stake == &stake6 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[1], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val3 && stake == &stake3 && *position == Position(0) - )); - assert!(matches!( - &consensus_vals[2], - (lazy_map::NestedSubKey::Data { - key: stake, - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val5 && stake == &stake5 && *position == Position(0) - )); - - let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() - .at(&pipeline_epoch) - .iter(&s) - .unwrap() - .map(Result::unwrap) - .collect(); - - assert_eq!(below_capacity_vals.len(), 3); - dbg!(&below_capacity_vals); - assert!(matches!( - &below_capacity_vals[0], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val2 && stake == &stake2 && *position == Position(1) - )); - assert!(matches!( - &below_capacity_vals[1], - (lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, address) - if address == &val7 && stake == &stake7 && *position == Position(3) - )); - assert!(matches!( - &below_capacity_vals[2], - ( - lazy_map::NestedSubKey::Data { - key: ReverseOrdTokenAmount(stake), - nested_sub_key: lazy_map::SubKey::Data(position), - }, - address - ) - if address == &val4 && stake == &stake4 && *position == Position(4) - )); - - let below_threshold_vals = - read_below_threshold_validator_set_addresses(&s, pipeline_epoch) - .unwrap() - .into_iter() - .collect::>(); - - assert_eq!(below_threshold_vals.len(), 1); - assert_eq!(&below_threshold_vals[0], &val1); - - // Advance to EPOCH 7 - let epoch = advance_epoch(&mut s, ¶ms); - assert_eq!(val7_epoch, epoch, "val6 is in the validator sets now"); - - // Check tendermint validator set updates - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - assert!(tm_updates.is_empty()); - - // Advance to EPOCH 8 - let epoch = advance_epoch(&mut s, ¶ms); - - // Check tendermint validator set updates - assert_eq!(val6_bond_epoch, epoch, "val5's bond is applied now"); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - dbg!(&tm_updates); - assert_eq!(tm_updates.len(), 2); - assert_eq!( - tm_updates[0], - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: pk6, - bonded_stake: stake6, - }) - ); - assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk4)); - - // Check that the below-capacity validator set was purged for the old epochs - // but that the consensus_validator_set was not - let last_epoch = epoch; - for e in Epoch::iter_bounds_inclusive( - start_epoch, - last_epoch - .sub_or_default(Epoch(DEFAULT_NUM_PAST_EPOCHS)) - .sub_or_default(Epoch(1)), - ) { - assert!( - !consensus_validator_set_handle() - .at(&e) - .is_empty(&s) - .unwrap() - ); - assert!( - below_capacity_validator_set_handle() - .at(&e) - .is_empty(&s) - .unwrap() - ); - } -} - -/// When a consensus set validator with 0 voting power adds a bond in the same -/// epoch as another below-capacity set validator with 0 power, but who adds -/// more bonds than the validator who is in the consensus set, they get swapped -/// in the sets. But if both of their new voting powers are still 0 after -/// bonding, the newly below-capacity validator must not be given to tendermint -/// with 0 voting power, because it wasn't it its set before -#[test] -fn test_validator_sets_swap() { - let mut s = TestWlStorage::default(); - // Only 2 consensus validator slots - let params = OwnedPosParams { - max_validator_slots: 2, - // Set the stake threshold to 0 so no validators are in the - // below-threshold set - validator_stake_threshold: token::Amount::zero(), - // Set 0.1 votes per token - tm_votes_per_token: Dec::new(1, 1).expect("Dec creation failed"), - ..Default::default() - }; - - let addr_seed = "seed"; - let mut address_gen = EstablishedAddressGen::new(addr_seed); - let mut sk_seed = 0; - let mut gen_validator = || { - let res = ( - address_gen.generate_address(addr_seed), - key::testing::common_sk_from_simple_seed(sk_seed).to_public(), - ); - // bump the sk seed - sk_seed += 1; - res - }; - - // Start with two genesis validators, one with 1 voting power and other 0 - let epoch = Epoch::default(); - // 1M voting power - let ((val1, pk1), stake1) = - (gen_validator(), token::Amount::native_whole(10)); - // 0 voting power - let ((val2, pk2), stake2) = - (gen_validator(), token::Amount::from_uint(5, 0).unwrap()); - // 0 voting power - let ((val3, pk3), stake3) = - (gen_validator(), token::Amount::from_uint(5, 0).unwrap()); - println!("val1: {val1}, {pk1}, {}", stake1.to_string_native()); - println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); - println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); - - let protocol_sk_1 = common_sk_from_simple_seed(0); - let protocol_sk_2 = common_sk_from_simple_seed(1); - - let params = test_init_genesis( - &mut s, - params, - [ - GenesisValidator { - address: val1, - tokens: stake1, - consensus_key: pk1, - protocol_key: protocol_sk_1.to_public(), - eth_hot_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - eth_cold_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - commission_rate: Dec::new(1, 1).expect("Dec creation failed"), - max_commission_rate_change: Dec::new(1, 1) - .expect("Dec creation failed"), - metadata: Default::default(), - }, - GenesisValidator { - address: val2.clone(), - tokens: stake2, - consensus_key: pk2, - protocol_key: protocol_sk_2.to_public(), - eth_hot_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - eth_cold_key: key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::() - .ref_to(), - ), - commission_rate: Dec::new(1, 1).expect("Dec creation failed"), - max_commission_rate_change: Dec::new(1, 1) - .expect("Dec creation failed"), - metadata: Default::default(), - }, - ] - .into_iter(), - epoch, - ) - .unwrap(); - - // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, - addr, - pk: &PublicKey, - stake: token::Amount, - epoch: Epoch| { - insert_validator_into_validator_set( - s, - ¶ms, - addr, - stake, - epoch, - params.pipeline_len, - ) - .unwrap(); - - update_validator_deltas(s, ¶ms, addr, stake.change(), epoch, None) - .unwrap(); - - // Set their consensus key (needed for - // `validator_set_update_tendermint` fn) - validator_consensus_key_handle(addr) - .set(s, pk.clone(), epoch, params.pipeline_len) - .unwrap(); - }; - - // Advance to EPOCH 1 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Insert another validator with 0 voting power - insert_validator(&mut s, &val3, &pk3, stake3, epoch); - - assert_eq!(stake2, stake3); - - // Add 2 bonds, one for val2 and greater one for val3 - let bonds_epoch_1 = pipeline_epoch; - let bond2 = token::Amount::from_uint(1, 0).unwrap(); - let stake2 = stake2 + bond2; - let bond3 = token::Amount::from_uint(4, 0).unwrap(); - let stake3 = stake3 + bond3; - - assert!(stake2 < stake3); - assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake2), 0); - assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake3), 0); - - update_validator_set(&mut s, ¶ms, &val2, bond2.change(), epoch, None) - .unwrap(); - update_validator_deltas( - &mut s, - ¶ms, - &val2, - bond2.change(), - epoch, - None, - ) - .unwrap(); - - update_validator_set(&mut s, ¶ms, &val3, bond3.change(), epoch, None) - .unwrap(); - update_validator_deltas( - &mut s, - ¶ms, - &val3, - bond3.change(), - epoch, - None, - ) - .unwrap(); - - // Advance to EPOCH 2 - let epoch = advance_epoch(&mut s, ¶ms); - let pipeline_epoch = epoch + params.pipeline_len; - - // Add 2 more bonds, same amount for `val2` and val3` - let bonds_epoch_2 = pipeline_epoch; - let bonds = token::Amount::native_whole(1); - let stake2 = stake2 + bonds; - let stake3 = stake3 + bonds; - assert!(stake2 < stake3); - assert_eq!( - into_tm_voting_power(params.tm_votes_per_token, stake2), - into_tm_voting_power(params.tm_votes_per_token, stake3) - ); - - update_validator_set(&mut s, ¶ms, &val2, bonds.change(), epoch, None) - .unwrap(); - update_validator_deltas( - &mut s, - ¶ms, - &val2, - bonds.change(), - epoch, - None, - ) - .unwrap(); - - update_validator_set(&mut s, ¶ms, &val3, bonds.change(), epoch, None) - .unwrap(); - update_validator_deltas( - &mut s, - ¶ms, - &val3, - bonds.change(), - epoch, - None, - ) - .unwrap(); - - // Advance to EPOCH 3 - let epoch = advance_epoch(&mut s, ¶ms); - - // Check tendermint validator set updates - assert_eq!(bonds_epoch_1, epoch); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - // `val2` must not be given to tendermint - even though it was in the - // consensus set, its voting power was 0, so it wasn't in TM set before the - // bond - assert!(tm_updates.is_empty()); - - // Advance to EPOCH 4 - let epoch = advance_epoch(&mut s, ¶ms); - - // Check tendermint validator set updates - assert_eq!(bonds_epoch_2, epoch); - let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); - dbg!(&tm_updates); - assert_eq!(tm_updates.len(), 1); - // `val2` must not be given to tendermint as it was and still is below - // capacity - assert_eq!( - tm_updates[0], - ValidatorSetUpdate::Consensus(ConsensusValidator { - consensus_key: pk3, - bonded_stake: stake3, - }) - ); -} - -fn get_tendermint_set_updates( - s: &TestWlStorage, - params: &PosParams, - Epoch(epoch): Epoch, -) -> Vec { - // Because the `validator_set_update_tendermint` is called 2 blocks before - // the start of a new epoch, it expects to receive the epoch that is before - // the start of a new one too and so we give it the predecessor of the - // current epoch here to actually get the update for the current epoch. - let epoch = Epoch(epoch - 1); - validator_set_update_tendermint(s, params, epoch, |update| update).unwrap() -} - -/// Advance to the next epoch. Returns the new epoch. -fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { - s.storage.block.epoch = s.storage.block.epoch.next(); - let current_epoch = s.storage.block.epoch; - compute_and_store_total_consensus_stake(s, current_epoch).unwrap(); - copy_validator_sets_and_positions( - s, - params, - current_epoch, - current_epoch + params.pipeline_len, - ) - .unwrap(); - // purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); - // process_slashes(s, current_epoch).unwrap(); - // dbg!(current_epoch); - current_epoch -} - -fn arb_genesis_validators( - size: Range, - threshold: Option, -) -> impl Strategy> { - let threshold = threshold - .unwrap_or_else(|| PosParams::default().validator_stake_threshold); - let tokens: Vec<_> = (0..size.end) - .map(|ix| { - if ix == 0 { - // Make sure that at least one validator has at least a stake - // greater or equal to the threshold to avoid having an empty - // consensus set. - threshold.raw_amount().as_u64()..=10_000_000_u64 - } else { - 1..=10_000_000_u64 - } - .prop_map(token::Amount::from) - }) - .collect(); - (size, tokens) - .prop_map(|(size, token_amounts)| { - // use unique seeds to generate validators' address and consensus - // key - let seeds = (0_u64..).take(size); - seeds - .zip(token_amounts) - .map(|(seed, tokens)| { - let address = address_from_simple_seed(seed); - let consensus_sk = common_sk_from_simple_seed(seed); - let consensus_key = consensus_sk.to_public(); - - let protocol_sk = common_sk_from_simple_seed(seed); - let protocol_key = protocol_sk.to_public(); - - let eth_hot_key = key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::( - ) - .ref_to(), - ); - let eth_cold_key = key::common::PublicKey::Secp256k1( - key::testing::gen_keypair::( - ) - .ref_to(), - ); - - let commission_rate = Dec::new(5, 2).expect("Test failed"); - let max_commission_rate_change = - Dec::new(1, 2).expect("Test failed"); - GenesisValidator { - address, - tokens, - consensus_key, - protocol_key, - eth_hot_key, - eth_cold_key, - commission_rate, - max_commission_rate_change, - metadata: Default::default(), - } - }) - .collect() - }) - .prop_filter( - "Must have at least one genesis validator with stake above the \ - provided threshold, if any.", - move |gen_vals: &Vec| { - gen_vals.iter().any(|val| val.tokens >= threshold) - }, - ) -} - -fn test_unjail_validator_aux( - params: OwnedPosParams, - mut validators: Vec, -) { - println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); - let mut s = TestWlStorage::default(); - - // Find the validator with the most stake and 100x his stake to keep the - // cubic slash rate small - let num_vals = validators.len(); - validators.sort_by_key(|a| a.tokens); - validators[num_vals - 1].tokens = 100 * validators[num_vals - 1].tokens; - - // Get second highest stake validator tomisbehave - let val_addr = &validators[num_vals - 2].address; - let val_tokens = validators[num_vals - 2].tokens; - println!( - "Validator that will misbehave addr {val_addr}, tokens {}", - val_tokens.to_string_native() - ); - - // Genesis - let mut current_epoch = s.storage.block.epoch; - let params = test_init_genesis( - &mut s, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - s.commit_block().unwrap(); - - current_epoch = advance_epoch(&mut s, ¶ms); - super::process_slashes(&mut s, current_epoch).unwrap(); - - // Discover first slash - let slash_0_evidence_epoch = current_epoch; - let evidence_block_height = BlockHeight(0); // doesn't matter for slashing logic - let slash_0_type = SlashType::DuplicateVote; - slash( - &mut s, - ¶ms, - current_epoch, - slash_0_evidence_epoch, - evidence_block_height, - slash_0_type, - val_addr, - current_epoch.next(), - ) - .unwrap(); - - assert_eq!( - validator_state_handle(val_addr) - .get(&s, current_epoch, ¶ms) - .unwrap(), - Some(ValidatorState::Consensus) - ); - - for epoch in Epoch::iter_bounds_inclusive( - current_epoch.next(), - current_epoch + params.pipeline_len, - ) { - // Check the validator state - assert_eq!( - validator_state_handle(val_addr) - .get(&s, epoch, ¶ms) - .unwrap(), - Some(ValidatorState::Jailed) - ); - // Check the validator set positions - assert!( - validator_set_positions_handle() - .at(&epoch) - .get(&s, val_addr) - .unwrap() - .is_none(), - ); - } - - // Advance past an epoch in which we can unbond - let unfreeze_epoch = - slash_0_evidence_epoch + params.slash_processing_epoch_offset(); - while current_epoch < unfreeze_epoch + 4u64 { - current_epoch = advance_epoch(&mut s, ¶ms); - super::process_slashes(&mut s, current_epoch).unwrap(); - } - - // Unjail the validator - unjail_validator(&mut s, val_addr, current_epoch).unwrap(); - - // Check the validator state - for epoch in - Epoch::iter_bounds_inclusive(current_epoch, current_epoch.next()) - { - assert_eq!( - validator_state_handle(val_addr) - .get(&s, epoch, ¶ms) - .unwrap(), - Some(ValidatorState::Jailed) - ); - } - - assert_eq!( - validator_state_handle(val_addr) - .get(&s, current_epoch + params.pipeline_len, ¶ms) - .unwrap(), - Some(ValidatorState::Consensus) - ); - assert!( - validator_set_positions_handle() - .at(&(current_epoch + params.pipeline_len)) - .get(&s, val_addr) - .unwrap() - .is_some(), - ); - - // Advance another epoch - current_epoch = advance_epoch(&mut s, ¶ms); - super::process_slashes(&mut s, current_epoch).unwrap(); - - let second_att = unjail_validator(&mut s, val_addr, current_epoch); - assert!(second_att.is_err()); -} - -/// `iterateBondsUpToAmountTest` -#[test] -fn test_find_bonds_to_remove() { - let mut storage = TestWlStorage::default(); - let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); - gov_params.init_storage(&mut storage).unwrap(); - write_pos_params(&mut storage, &OwnedPosParams::default()).unwrap(); - - let source = established_address_1(); - let validator = established_address_2(); - let bond_handle = bond_handle(&source, &validator); - - let (e1, e2, e6) = (Epoch(1), Epoch(2), Epoch(6)); - - bond_handle - .set(&mut storage, token::Amount::from(5), e1, 0) - .unwrap(); - bond_handle - .set(&mut storage, token::Amount::from(3), e2, 0) - .unwrap(); - bond_handle - .set(&mut storage, token::Amount::from(8), e6, 0) - .unwrap(); - - // Test 1 - let bonds_for_removal = find_bonds_to_remove( - &storage, - &bond_handle.get_data_handler(), - token::Amount::from(8), - ) - .unwrap(); - assert_eq!( - bonds_for_removal.epochs, - vec![e6].into_iter().collect::>() - ); - assert!(bonds_for_removal.new_entry.is_none()); - - // Test 2 - let bonds_for_removal = find_bonds_to_remove( - &storage, - &bond_handle.get_data_handler(), - token::Amount::from(10), - ) - .unwrap(); - assert_eq!( - bonds_for_removal.epochs, - vec![e6].into_iter().collect::>() - ); - assert_eq!( - bonds_for_removal.new_entry, - Some((Epoch(2), token::Amount::from(1))) - ); - - // Test 3 - let bonds_for_removal = find_bonds_to_remove( - &storage, - &bond_handle.get_data_handler(), - token::Amount::from(11), - ) - .unwrap(); - assert_eq!( - bonds_for_removal.epochs, - vec![e6, e2].into_iter().collect::>() - ); - assert!(bonds_for_removal.new_entry.is_none()); - - // Test 4 - let bonds_for_removal = find_bonds_to_remove( - &storage, - &bond_handle.get_data_handler(), - token::Amount::from(12), - ) - .unwrap(); - assert_eq!( - bonds_for_removal.epochs, - vec![e6, e2].into_iter().collect::>() - ); - assert_eq!( - bonds_for_removal.new_entry, - Some((Epoch(1), token::Amount::from(4))) - ); -} - -/// `computeModifiedRedelegationTest` -#[test] -fn test_compute_modified_redelegation() { - let mut storage = TestWlStorage::default(); - let validator1 = established_address_1(); - let validator2 = established_address_2(); - let owner = established_address_3(); - let outer_epoch = Epoch(0); - - let mut alice = validator1.clone(); - let mut bob = validator2.clone(); - - // Ensure a ranking order of alice > bob - if bob > alice { - alice = validator2; - bob = validator1; - } - println!("\n\nalice = {}\nbob = {}\n", &alice, &bob); - - // Fill redelegated bonds in storage - let redelegated_bonds_map = delegator_redelegated_bonds_handle(&owner) - .at(&alice) - .at(&outer_epoch); - redelegated_bonds_map - .at(&alice) - .insert(&mut storage, Epoch(2), token::Amount::from(6)) - .unwrap(); - redelegated_bonds_map - .at(&alice) - .insert(&mut storage, Epoch(4), token::Amount::from(7)) - .unwrap(); - redelegated_bonds_map - .at(&bob) - .insert(&mut storage, Epoch(1), token::Amount::from(5)) - .unwrap(); - redelegated_bonds_map - .at(&bob) - .insert(&mut storage, Epoch(4), token::Amount::from(7)) - .unwrap(); - - // Test cases 1 and 2 - let mr1 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - token::Amount::from(25), - ) - .unwrap(); - let mr2 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - token::Amount::from(30), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - ..Default::default() - }; - - assert_eq!(mr1, exp_mr); - assert_eq!(mr2, exp_mr); - - // Test case 3 - let mr3 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - token::Amount::from(7), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - validators_to_remove: BTreeSet::from_iter([bob.clone()]), - validator_to_modify: Some(bob.clone()), - epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), - ..Default::default() - }; - assert_eq!(mr3, exp_mr); - - // Test case 4 - let mr4 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - token::Amount::from(8), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - validators_to_remove: BTreeSet::from_iter([bob.clone()]), - validator_to_modify: Some(bob.clone()), - epochs_to_remove: BTreeSet::from_iter([Epoch(1), Epoch(4)]), - epoch_to_modify: Some(Epoch(1)), - new_amount: Some(4.into()), - }; - assert_eq!(mr4, exp_mr); - - // Test case 5 - let mr5 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - 12.into(), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - validators_to_remove: BTreeSet::from_iter([bob.clone()]), - ..Default::default() - }; - assert_eq!(mr5, exp_mr); - - // Test case 6 - let mr6 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - 14.into(), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), - validator_to_modify: Some(alice.clone()), - epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), - epoch_to_modify: Some(Epoch(4)), - new_amount: Some(5.into()), - }; - assert_eq!(mr6, exp_mr); - - // Test case 7 - let mr7 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - 19.into(), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), - validator_to_modify: Some(alice.clone()), - epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), - ..Default::default() - }; - assert_eq!(mr7, exp_mr); - - // Test case 8 - let mr8 = compute_modified_redelegation( - &storage, - &redelegated_bonds_map, - Epoch(5), - 21.into(), - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(5)), - validators_to_remove: BTreeSet::from_iter([alice.clone(), bob]), - validator_to_modify: Some(alice), - epochs_to_remove: BTreeSet::from_iter([Epoch(2), Epoch(4)]), - epoch_to_modify: Some(Epoch(2)), - new_amount: Some(4.into()), - }; - assert_eq!(mr8, exp_mr); -} - -/// `computeBondAtEpochTest` -#[test] -fn test_compute_bond_at_epoch() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - pipeline_len: 2, - unbonding_len: 4, - cubic_slashing_window_length: 1, - ..Default::default() - }; - let alice = established_address_1(); - let bob = established_address_2(); - - // Test 1 - let res = compute_bond_at_epoch( - &storage, - ¶ms, - &bob, - 12.into(), - 3.into(), - 23.into(), - Some(&Default::default()), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 23.into()); - - // Test 2 - validator_slashes_handle(&bob) - .push( - &mut storage, - Slash { - epoch: 4.into(), - block_height: 0, - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - let res = compute_bond_at_epoch( - &storage, - ¶ms, - &bob, - 12.into(), - 3.into(), - 23.into(), - Some(&Default::default()), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 0.into()); - - // Test 3 - validator_slashes_handle(&bob).pop(&mut storage).unwrap(); - let mut redel_bonds = EagerRedelegatedBondsMap::default(); - redel_bonds.insert( - alice.clone(), - BTreeMap::from_iter([(Epoch(1), token::Amount::from(5))]), - ); - let res = compute_bond_at_epoch( - &storage, - ¶ms, - &bob, - 12.into(), - 3.into(), - 23.into(), - Some(&redel_bonds), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 23.into()); - - // Test 4 - validator_slashes_handle(&bob) - .push( - &mut storage, - Slash { - epoch: 4.into(), - block_height: 0, - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - let res = compute_bond_at_epoch( - &storage, - ¶ms, - &bob, - 12.into(), - 3.into(), - 23.into(), - Some(&redel_bonds), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 0.into()); - - // Test 5 - validator_slashes_handle(&bob).pop(&mut storage).unwrap(); - validator_slashes_handle(&alice) - .push( - &mut storage, - Slash { - epoch: 6.into(), - block_height: 0, - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - let res = compute_bond_at_epoch( - &storage, - ¶ms, - &bob, - 12.into(), - 3.into(), - 23.into(), - Some(&redel_bonds), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 23.into()); - - // Test 6 - validator_slashes_handle(&alice).pop(&mut storage).unwrap(); - validator_slashes_handle(&alice) - .push( - &mut storage, - Slash { - epoch: 4.into(), - block_height: 0, - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - let res = compute_bond_at_epoch( - &storage, - ¶ms, - &bob, - 18.into(), - 9.into(), - 23.into(), - Some(&redel_bonds), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 18.into()); -} - -/// `computeSlashBondAtEpochTest` -#[test] -fn test_compute_slash_bond_at_epoch() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - pipeline_len: 2, - unbonding_len: 4, - cubic_slashing_window_length: 1, - ..Default::default() - }; - let alice = established_address_1(); - let bob = established_address_2(); - - let current_epoch = Epoch(20); - let infraction_epoch = - current_epoch - params.slash_processing_epoch_offset(); - - let redelegated_bond = BTreeMap::from_iter([( - alice, - BTreeMap::from_iter([(infraction_epoch - 4, token::Amount::from(10))]), - )]); - - // Test 1 - let res = compute_slash_bond_at_epoch( - &storage, - ¶ms, - &bob, - current_epoch.next(), - infraction_epoch, - infraction_epoch - 2, - 30.into(), - Some(&Default::default()), - Dec::one(), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 30.into()); - - // Test 2 - let res = compute_slash_bond_at_epoch( - &storage, - ¶ms, - &bob, - current_epoch.next(), - infraction_epoch, - infraction_epoch - 2, - 30.into(), - Some(&redelegated_bond), - Dec::one(), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 30.into()); - - // Test 3 - validator_slashes_handle(&bob) - .push( - &mut storage, - Slash { - epoch: infraction_epoch.prev(), - block_height: 0, - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - let res = compute_slash_bond_at_epoch( - &storage, - ¶ms, - &bob, - current_epoch.next(), - infraction_epoch, - infraction_epoch - 2, - 30.into(), - Some(&Default::default()), - Dec::one(), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 0.into()); - - // Test 4 - let res = compute_slash_bond_at_epoch( - &storage, - ¶ms, - &bob, - current_epoch.next(), - infraction_epoch, - infraction_epoch - 2, - 30.into(), - Some(&redelegated_bond), - Dec::one(), - ) - .unwrap(); - - pretty_assertions::assert_eq!(res, 0.into()); -} - -/// `computeNewRedelegatedUnbondsTest` -#[test] -fn test_compute_new_redelegated_unbonds() { - let mut storage = TestWlStorage::default(); - let alice = established_address_1(); - let bob = established_address_2(); - - let key = Key::parse("testing").unwrap(); - let redelegated_bonds = NestedMap::::open(key); - - // Populate the lazy and eager maps - let (ep1, ep2, ep4, ep5, ep6, ep7) = - (Epoch(1), Epoch(2), Epoch(4), Epoch(5), Epoch(6), Epoch(7)); - let keys_and_values = vec![ - (ep5, alice.clone(), ep2, 1), - (ep5, alice.clone(), ep4, 1), - (ep7, alice.clone(), ep2, 1), - (ep7, alice.clone(), ep4, 1), - (ep5, bob.clone(), ep1, 1), - (ep5, bob.clone(), ep4, 2), - (ep7, bob.clone(), ep1, 1), - (ep7, bob.clone(), ep4, 2), - ]; - let mut eager_map = BTreeMap::::new(); - for (outer_ep, address, inner_ep, amount) in keys_and_values { - redelegated_bonds - .at(&outer_ep) - .at(&address) - .insert(&mut storage, inner_ep, token::Amount::from(amount)) - .unwrap(); - eager_map - .entry(outer_ep) - .or_default() - .entry(address.clone()) - .or_default() - .insert(inner_ep, token::Amount::from(amount)); - } - - // Different ModifiedRedelegation objects for testing - let empty_mr = ModifiedRedelegation::default(); - let all_mr = ModifiedRedelegation { - epoch: Some(ep7), - validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), - validator_to_modify: None, - epochs_to_remove: Default::default(), - epoch_to_modify: None, - new_amount: None, - }; - let mod_val_mr = ModifiedRedelegation { - epoch: Some(ep7), - validators_to_remove: BTreeSet::from_iter([alice.clone()]), - validator_to_modify: None, - epochs_to_remove: Default::default(), - epoch_to_modify: None, - new_amount: None, - }; - let mod_val_partial_mr = ModifiedRedelegation { - epoch: Some(ep7), - validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), - validator_to_modify: Some(bob.clone()), - epochs_to_remove: BTreeSet::from_iter([ep1]), - epoch_to_modify: None, - new_amount: None, - }; - let mod_epoch_partial_mr = ModifiedRedelegation { - epoch: Some(ep7), - validators_to_remove: BTreeSet::from_iter([alice, bob.clone()]), - validator_to_modify: Some(bob.clone()), - epochs_to_remove: BTreeSet::from_iter([ep1, ep4]), - epoch_to_modify: Some(ep4), - new_amount: Some(token::Amount::from(1)), - }; - - // Test case 1 - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &Default::default(), - &empty_mr, - ) - .unwrap(); - assert_eq!(res, Default::default()); - - let set5 = BTreeSet::::from_iter([ep5]); - let set56 = BTreeSet::::from_iter([ep5, ep6]); - - // Test case 2 - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &set5, - &empty_mr, - ) - .unwrap(); - let mut exp_res = eager_map.clone(); - exp_res.remove(&ep7); - assert_eq!(res, exp_res); - - // Test case 3 - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &set56, - &empty_mr, - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 4 - println!("\nTEST CASE 4\n"); - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &set56, - &all_mr, - ) - .unwrap(); - assert_eq!(res, eager_map); - - // Test case 5 - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &set56, - &mod_val_mr, - ) - .unwrap(); - exp_res = eager_map.clone(); - exp_res.entry(ep7).or_default().remove(&bob); - assert_eq!(res, exp_res); - - // Test case 6 - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &set56, - &mod_val_partial_mr, - ) - .unwrap(); - exp_res = eager_map.clone(); - exp_res - .entry(ep7) - .or_default() - .entry(bob.clone()) - .or_default() - .remove(&ep4); - assert_eq!(res, exp_res); - - // Test case 7 - let res = compute_new_redelegated_unbonds( - &storage, - &redelegated_bonds, - &set56, - &mod_epoch_partial_mr, - ) - .unwrap(); - exp_res - .entry(ep7) - .or_default() - .entry(bob) - .or_default() - .insert(ep4, token::Amount::from(1)); - assert_eq!(res, exp_res); -} - -/// `applyListSlashesTest` -#[test] -fn test_apply_list_slashes() { - let init_epoch = Epoch(2); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - // let unbonding_len = 4u64; - // let cubic_offset = 1u64; - - let slash1 = Slash { - epoch: init_epoch, - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slash2 = Slash { - epoch: init_epoch - + params.unbonding_len - + params.cubic_slashing_window_length - + 1u64, - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - - let list1 = vec![slash1.clone()]; - let list2 = vec![slash1.clone(), slash2.clone()]; - let list3 = vec![slash1.clone(), slash1.clone()]; - let list4 = vec![slash1.clone(), slash1, slash2]; - - let res = apply_list_slashes(¶ms, &[], token::Amount::from(100)); - assert_eq!(res, token::Amount::from(100)); - - let res = apply_list_slashes(¶ms, &list1, token::Amount::from(100)); - assert_eq!(res, token::Amount::zero()); - - let res = apply_list_slashes(¶ms, &list2, token::Amount::from(100)); - assert_eq!(res, token::Amount::zero()); - - let res = apply_list_slashes(¶ms, &list3, token::Amount::from(100)); - assert_eq!(res, token::Amount::zero()); - - let res = apply_list_slashes(¶ms, &list4, token::Amount::from(100)); - assert_eq!(res, token::Amount::zero()); -} - -/// `computeSlashableAmountTest` -#[test] -fn test_compute_slashable_amount() { - let init_epoch = Epoch(2); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - let slash1 = Slash { - epoch: init_epoch - + params.unbonding_len - + params.cubic_slashing_window_length, - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - - let slash2 = Slash { - epoch: init_epoch - + params.unbonding_len - + params.cubic_slashing_window_length - + 1u64, - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - - let test_map = vec![(init_epoch, token::Amount::from(50))] - .into_iter() - .collect::>(); - - let res = compute_slashable_amount( - ¶ms, - &slash1, - token::Amount::from(100), - &BTreeMap::new(), - ); - assert_eq!(res, token::Amount::from(100)); - - let res = compute_slashable_amount( - ¶ms, - &slash2, - token::Amount::from(100), - &test_map, - ); - assert_eq!(res, token::Amount::from(50)); - - let res = compute_slashable_amount( - ¶ms, - &slash1, - token::Amount::from(100), - &test_map, - ); - assert_eq!(res, token::Amount::from(100)); -} - -/// `foldAndSlashRedelegatedBondsMapTest` -#[test] -fn test_fold_and_slash_redelegated_bonds() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - let start_epoch = Epoch(7); - - let alice = established_address_1(); - let bob = established_address_2(); - - println!("\n\nAlice: {}", alice); - println!("Bob: {}\n", bob); - - let test_slash = Slash { - epoch: Default::default(), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - - let test_data = vec![ - (alice.clone(), vec![(2, 1), (4, 1)]), - (bob, vec![(1, 1), (4, 2)]), - ]; - let mut eager_redel_bonds = EagerRedelegatedBondsMap::default(); - for (address, pair) in test_data { - for (epoch, amount) in pair { - eager_redel_bonds - .entry(address.clone()) - .or_default() - .insert(Epoch(epoch), token::Amount::from(amount)); - } - } - - // Test case 1 - let res = fold_and_slash_redelegated_bonds( - &storage, - ¶ms, - &eager_redel_bonds, - start_epoch, - &[], - |_| true, - ); - assert_eq!( - res, - FoldRedelegatedBondsResult { - total_redelegated: token::Amount::from(5), - total_after_slashing: token::Amount::from(5), - } - ); - - // Test case 2 - let res = fold_and_slash_redelegated_bonds( - &storage, - ¶ms, - &eager_redel_bonds, - start_epoch, - &[test_slash], - |_| true, - ); - assert_eq!( - res, - FoldRedelegatedBondsResult { - total_redelegated: token::Amount::from(5), - total_after_slashing: token::Amount::zero(), - } - ); - - // Test case 3 - let alice_slash = Slash { - epoch: Epoch(6), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - validator_slashes_handle(&alice) - .push(&mut storage, alice_slash) - .unwrap(); - - let res = fold_and_slash_redelegated_bonds( - &storage, - ¶ms, - &eager_redel_bonds, - start_epoch, - &[], - |_| true, - ); - assert_eq!( - res, - FoldRedelegatedBondsResult { - total_redelegated: token::Amount::from(5), - total_after_slashing: token::Amount::from(3), - } - ); -} - -/// `slashRedelegationTest` -#[test] -fn test_slash_redelegation() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - let alice = established_address_1(); - - let total_redelegated_unbonded = - validator_total_redelegated_unbonded_handle(&alice); - total_redelegated_unbonded - .at(&Epoch(13)) - .at(&Epoch(10)) - .at(&alice) - .insert(&mut storage, Epoch(7), token::Amount::from(2)) - .unwrap(); - - let slashes = validator_slashes_handle(&alice); - - let mut slashed_amounts_map = BTreeMap::from_iter([ - (Epoch(15), token::Amount::zero()), - (Epoch(16), token::Amount::zero()), - ]); - let empty_slash_amounts = slashed_amounts_map.clone(); - - // Test case 1 - slash_redelegation( - &storage, - ¶ms, - token::Amount::from(7), - Epoch(7), - Epoch(10), - &alice, - Epoch(14), - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!( - slashed_amounts_map, - BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(5)), - (Epoch(16), token::Amount::from(5)), - ]) - ); - - // Test case 2 - slashed_amounts_map = empty_slash_amounts.clone(); - slash_redelegation( - &storage, - ¶ms, - token::Amount::from(7), - Epoch(7), - Epoch(11), - &alice, - Epoch(14), - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!( - slashed_amounts_map, - BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(7)), - (Epoch(16), token::Amount::from(7)), - ]) - ); - - // Test case 3 - slashed_amounts_map = BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(2)), - (Epoch(16), token::Amount::from(3)), - ]); - slash_redelegation( - &storage, - ¶ms, - token::Amount::from(7), - Epoch(7), - Epoch(10), - &alice, - Epoch(14), - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!( - slashed_amounts_map, - BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(7)), - (Epoch(16), token::Amount::from(8)), - ]) - ); - - // Test case 4 - slashes - .push( - &mut storage, - Slash { - epoch: Epoch(8), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - slashed_amounts_map = empty_slash_amounts.clone(); - slash_redelegation( - &storage, - ¶ms, - token::Amount::from(7), - Epoch(7), - Epoch(10), - &alice, - Epoch(14), - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!(slashed_amounts_map, empty_slash_amounts); - - // Test case 5 - slashes.pop(&mut storage).unwrap(); - slashes - .push( - &mut storage, - Slash { - epoch: Epoch(9), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - slash_redelegation( - &storage, - ¶ms, - token::Amount::from(7), - Epoch(7), - Epoch(10), - &alice, - Epoch(14), - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!(slashed_amounts_map, empty_slash_amounts); - - // Test case 6 - slashes - .push( - &mut storage, - Slash { - epoch: Epoch(8), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - slash_redelegation( - &storage, - ¶ms, - token::Amount::from(7), - Epoch(7), - Epoch(10), - &alice, - Epoch(14), - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!(slashed_amounts_map, empty_slash_amounts); -} - -/// `slashValidatorRedelegationTest` -#[test] -fn test_slash_validator_redelegation() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); - gov_params.init_storage(&mut storage).unwrap(); - write_pos_params(&mut storage, ¶ms).unwrap(); - - let alice = established_address_1(); - let bob = established_address_2(); - - let total_redelegated_unbonded = - validator_total_redelegated_unbonded_handle(&alice); - total_redelegated_unbonded - .at(&Epoch(13)) - .at(&Epoch(10)) - .at(&alice) - .insert(&mut storage, Epoch(7), token::Amount::from(2)) - .unwrap(); - - let outgoing_redelegations = - validator_outgoing_redelegations_handle(&alice).at(&bob); - - let slashes = validator_slashes_handle(&alice); - - let mut slashed_amounts_map = BTreeMap::from_iter([ - (Epoch(15), token::Amount::zero()), - (Epoch(16), token::Amount::zero()), - ]); - let empty_slash_amounts = slashed_amounts_map.clone(); - - // Test case 1 - slash_validator_redelegation( - &storage, - ¶ms, - &alice, - Epoch(14), - &outgoing_redelegations, - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!(slashed_amounts_map, empty_slash_amounts); - - // Test case 2 - total_redelegated_unbonded - .remove_all(&mut storage, &Epoch(13)) - .unwrap(); - slash_validator_redelegation( - &storage, - ¶ms, - &alice, - Epoch(14), - &outgoing_redelegations, - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!(slashed_amounts_map, empty_slash_amounts); - - // Test case 3 - total_redelegated_unbonded - .at(&Epoch(13)) - .at(&Epoch(10)) - .at(&alice) - .insert(&mut storage, Epoch(7), token::Amount::from(2)) - .unwrap(); - outgoing_redelegations - .at(&Epoch(6)) - .insert(&mut storage, Epoch(8), token::Amount::from(7)) - .unwrap(); - slash_validator_redelegation( - &storage, - ¶ms, - &alice, - Epoch(14), - &outgoing_redelegations, - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!( - slashed_amounts_map, - BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(7)), - (Epoch(16), token::Amount::from(7)), - ]) - ); - - // Test case 4 - slashed_amounts_map = empty_slash_amounts.clone(); - outgoing_redelegations - .remove_all(&mut storage, &Epoch(6)) - .unwrap(); - outgoing_redelegations - .at(&Epoch(7)) - .insert(&mut storage, Epoch(8), token::Amount::from(7)) - .unwrap(); - slash_validator_redelegation( - &storage, - ¶ms, - &alice, - Epoch(14), - &outgoing_redelegations, - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!( - slashed_amounts_map, - BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(5)), - (Epoch(16), token::Amount::from(5)), - ]) - ); - - // Test case 5 - slashed_amounts_map = BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(2)), - (Epoch(16), token::Amount::from(3)), - ]); - slash_validator_redelegation( - &storage, - ¶ms, - &alice, - Epoch(14), - &outgoing_redelegations, - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!( - slashed_amounts_map, - BTreeMap::from_iter([ - (Epoch(15), token::Amount::from(7)), - (Epoch(16), token::Amount::from(8)), - ]) - ); - - // Test case 6 - slashed_amounts_map = empty_slash_amounts.clone(); - slashes - .push( - &mut storage, - Slash { - epoch: Epoch(8), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - slash_validator_redelegation( - &storage, - ¶ms, - &alice, - Epoch(14), - &outgoing_redelegations, - &slashes, - &total_redelegated_unbonded, - Dec::one(), - &mut slashed_amounts_map, - ) - .unwrap(); - assert_eq!(slashed_amounts_map, empty_slash_amounts); -} - -/// `slashValidatorTest` -#[test] -fn test_slash_validator() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); - gov_params.init_storage(&mut storage).unwrap(); - write_pos_params(&mut storage, ¶ms).unwrap(); - - let alice = established_address_1(); - let bob = established_address_2(); - - let total_bonded = total_bonded_handle(&bob); - let total_unbonded = total_unbonded_handle(&bob); - let total_redelegated_bonded = - validator_total_redelegated_bonded_handle(&bob); - let total_redelegated_unbonded = - validator_total_redelegated_unbonded_handle(&bob); - - let infraction_stake = token::Amount::from(23); - - let initial_stakes = BTreeMap::from_iter([ - (Epoch(11), infraction_stake), - (Epoch(12), infraction_stake), - (Epoch(13), infraction_stake), - ]); - let mut exp_res = initial_stakes.clone(); - - let current_epoch = Epoch(10); - let infraction_epoch = - current_epoch - params.slash_processing_epoch_offset(); - let processing_epoch = current_epoch.next(); - let slash_rate = Dec::one(); - - // Test case 1 - println!("\nTEST 1:"); - - total_bonded - .set(&mut storage, 23.into(), infraction_epoch - 2, 0) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 2 - println!("\nTEST 2:"); - total_bonded - .set(&mut storage, 17.into(), infraction_epoch - 2, 0) - .unwrap(); - total_unbonded - .at(&(current_epoch + params.pipeline_len)) - .insert(&mut storage, infraction_epoch - 2, 6.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - exp_res.insert(Epoch(12), 17.into()); - exp_res.insert(Epoch(13), 17.into()); - assert_eq!(res, exp_res); - - // Test case 3 - println!("\nTEST 3:"); - total_redelegated_bonded - .at(&infraction_epoch.prev()) - .at(&alice) - .insert(&mut storage, Epoch(2), 5.into()) - .unwrap(); - total_redelegated_bonded - .at(&infraction_epoch.prev()) - .at(&alice) - .insert(&mut storage, Epoch(3), 1.into()) - .unwrap(); - - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 4 - println!("\nTEST 4:"); - total_unbonded_handle(&bob) - .at(&(current_epoch + params.pipeline_len)) - .remove(&mut storage, &(infraction_epoch - 2)) - .unwrap(); - total_unbonded_handle(&bob) - .at(&(current_epoch + params.pipeline_len)) - .insert(&mut storage, infraction_epoch - 1, 6.into()) - .unwrap(); - total_redelegated_unbonded - .at(&(current_epoch + params.pipeline_len)) - .at(&infraction_epoch.prev()) - .at(&alice) - .insert(&mut storage, Epoch(2), 5.into()) - .unwrap(); - total_redelegated_unbonded - .at(&(current_epoch + params.pipeline_len)) - .at(&infraction_epoch.prev()) - .at(&alice) - .insert(&mut storage, Epoch(3), 1.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 5 - println!("\nTEST 5:"); - total_bonded_handle(&bob) - .set(&mut storage, 19.into(), infraction_epoch - 2, 0) - .unwrap(); - total_unbonded_handle(&bob) - .at(&(current_epoch + params.pipeline_len)) - .insert(&mut storage, infraction_epoch - 1, 4.into()) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, Epoch(2), token::Amount::from(1)) - .unwrap(); - total_redelegated_unbonded - .at(&(current_epoch + params.pipeline_len)) - .at(&infraction_epoch.prev()) - .at(&alice) - .remove(&mut storage, &Epoch(3)) - .unwrap(); - total_redelegated_unbonded - .at(&(current_epoch + params.pipeline_len)) - .at(&infraction_epoch.prev()) - .at(&alice) - .insert(&mut storage, Epoch(2), 4.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - exp_res.insert(Epoch(12), 19.into()); - exp_res.insert(Epoch(13), 19.into()); - assert_eq!(res, exp_res); - - // Test case 6 - println!("\nTEST 6:"); - total_unbonded_handle(&bob) - .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) - .unwrap(); - total_redelegated_unbonded - .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) - .unwrap(); - total_redelegated_bonded - .remove_all(&mut storage, ¤t_epoch) - .unwrap(); - total_bonded_handle(&bob) - .set(&mut storage, 23.into(), infraction_epoch - 2, 0) - .unwrap(); - total_bonded_handle(&bob) - .set(&mut storage, 6.into(), current_epoch, 0) - .unwrap(); - - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - exp_res = initial_stakes; - assert_eq!(res, exp_res); - - // Test case 7 - println!("\nTEST 7:"); - total_bonded - .get_data_handler() - .remove(&mut storage, ¤t_epoch) - .unwrap(); - total_unbonded - .at(¤t_epoch.next()) - .insert(&mut storage, current_epoch, 6.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 8 - println!("\nTEST 8:"); - total_bonded - .get_data_handler() - .insert(&mut storage, current_epoch, 3.into()) - .unwrap(); - total_unbonded - .at(¤t_epoch.next()) - .insert(&mut storage, current_epoch, 3.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 9 - println!("\nTEST 9:"); - total_unbonded - .remove_all(&mut storage, ¤t_epoch.next()) - .unwrap(); - total_bonded - .set(&mut storage, 6.into(), current_epoch, 0) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 2.into(), 5.into()) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 3.into(), 1.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 10 - println!("\nTEST 10:"); - total_redelegated_bonded - .remove_all(&mut storage, ¤t_epoch) - .unwrap(); - total_bonded - .get_data_handler() - .remove(&mut storage, ¤t_epoch) - .unwrap(); - total_redelegated_unbonded - .at(¤t_epoch.next()) - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 2.into(), 5.into()) - .unwrap(); - total_redelegated_unbonded - .at(¤t_epoch.next()) - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 3.into(), 1.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 11 - println!("\nTEST 11:"); - total_bonded - .set(&mut storage, 2.into(), current_epoch, 0) - .unwrap(); - total_redelegated_unbonded - .at(¤t_epoch.next()) - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 2.into(), 4.into()) - .unwrap(); - total_redelegated_unbonded - .at(¤t_epoch.next()) - .at(¤t_epoch) - .at(&alice) - .remove(&mut storage, &3.into()) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 2.into(), 1.into()) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch) - .at(&alice) - .insert(&mut storage, 3.into(), 1.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 12 - println!("\nTEST 12:"); - total_bonded - .set(&mut storage, 6.into(), current_epoch, 0) - .unwrap(); - total_bonded - .set(&mut storage, 2.into(), current_epoch.next(), 0) - .unwrap(); - total_redelegated_bonded - .remove_all(&mut storage, ¤t_epoch) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch.next()) - .at(&alice) - .insert(&mut storage, 2.into(), 1.into()) - .unwrap(); - total_redelegated_bonded - .at(¤t_epoch.next()) - .at(&alice) - .insert(&mut storage, 3.into(), 1.into()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - assert_eq!(res, exp_res); - - // Test case 13 - println!("\nTEST 13:"); - validator_slashes_handle(&bob) - .push( - &mut storage, - Slash { - epoch: infraction_epoch.prev(), - block_height: 0, - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }, - ) - .unwrap(); - total_redelegated_unbonded - .remove_all(&mut storage, ¤t_epoch.next()) - .unwrap(); - total_bonded - .get_data_handler() - .remove(&mut storage, ¤t_epoch.next()) - .unwrap(); - total_redelegated_bonded - .remove_all(&mut storage, ¤t_epoch.next()) - .unwrap(); - let res = slash_validator( - &storage, - ¶ms, - &bob, - slash_rate, - processing_epoch, - &Default::default(), - ) - .unwrap(); - exp_res.insert(Epoch(11), 0.into()); - exp_res.insert(Epoch(12), 0.into()); - exp_res.insert(Epoch(13), 0.into()); - assert_eq!(res, exp_res); -} - -/// `computeAmountAfterSlashingUnbondTest` -#[test] -fn compute_amount_after_slashing_unbond_test() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - // Test data - let alice = established_address_1(); - let bob = established_address_2(); - let unbonds: BTreeMap = BTreeMap::from_iter([ - ((Epoch(2)), token::Amount::from(5)), - ((Epoch(4)), token::Amount::from(6)), - ]); - let redelegated_unbonds: EagerRedelegatedUnbonds = BTreeMap::from_iter([( - Epoch(2), - BTreeMap::from_iter([( - alice.clone(), - BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), - )]), - )]); - - // Test case 1 - let slashes = vec![]; - let result = compute_amount_after_slashing_unbond( - &storage, - ¶ms, - &unbonds, - &redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 11.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 5.into()), (4.into(), 6.into())], - ); - - // Test case 2 - let bob_slash = Slash { - epoch: Epoch(5), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slashes = vec![bob_slash.clone()]; - validator_slashes_handle(&bob) - .push(&mut storage, bob_slash) - .unwrap(); - let result = compute_amount_after_slashing_unbond( - &storage, - ¶ms, - &unbonds, - &redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 0.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 0.into()), (4.into(), 0.into())], - ); - - // Test case 3 - let alice_slash = Slash { - epoch: Epoch(0), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slashes = vec![alice_slash.clone()]; - validator_slashes_handle(&alice) - .push(&mut storage, alice_slash) - .unwrap(); - validator_slashes_handle(&bob).pop(&mut storage).unwrap(); - let result = compute_amount_after_slashing_unbond( - &storage, - ¶ms, - &unbonds, - &redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 11.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 5.into()), (4.into(), 6.into())], - ); - - // Test case 4 - let alice_slash = Slash { - epoch: Epoch(1), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slashes = vec![alice_slash.clone()]; - validator_slashes_handle(&alice).pop(&mut storage).unwrap(); - validator_slashes_handle(&alice) - .push(&mut storage, alice_slash) - .unwrap(); - let result = compute_amount_after_slashing_unbond( - &storage, - ¶ms, - &unbonds, - &redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 10.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 4.into()), (4.into(), 6.into())], - ); -} - -/// `computeAmountAfterSlashingWithdrawTest` -#[test] -fn compute_amount_after_slashing_withdraw_test() { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - // Test data - let alice = established_address_1(); - let bob = established_address_2(); - let unbonds_and_redelegated_unbonds: BTreeMap< - (Epoch, Epoch), - (token::Amount, EagerRedelegatedBondsMap), - > = BTreeMap::from_iter([ - ( - (Epoch(2), Epoch(20)), - ( - // unbond - token::Amount::from(5), - // redelegations - BTreeMap::from_iter([( - alice.clone(), - BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), - )]), - ), - ), - ( - (Epoch(4), Epoch(20)), - ( - // unbond - token::Amount::from(6), - // redelegations - BTreeMap::default(), - ), - ), - ]); - - // Test case 1 - let slashes = vec![]; - let result = compute_amount_after_slashing_withdraw( - &storage, - ¶ms, - &unbonds_and_redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 11.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 5.into()), (4.into(), 6.into())], - ); - - // Test case 2 - let bob_slash = Slash { - epoch: Epoch(5), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slashes = vec![bob_slash.clone()]; - validator_slashes_handle(&bob) - .push(&mut storage, bob_slash) - .unwrap(); - let result = compute_amount_after_slashing_withdraw( - &storage, - ¶ms, - &unbonds_and_redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 0.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 0.into()), (4.into(), 0.into())], - ); - - // Test case 3 - let alice_slash = Slash { - epoch: Epoch(0), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slashes = vec![alice_slash.clone()]; - validator_slashes_handle(&alice) - .push(&mut storage, alice_slash) - .unwrap(); - validator_slashes_handle(&bob).pop(&mut storage).unwrap(); - let result = compute_amount_after_slashing_withdraw( - &storage, - ¶ms, - &unbonds_and_redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 11.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 5.into()), (4.into(), 6.into())], - ); - - // Test case 4 - let alice_slash = Slash { - epoch: Epoch(1), - block_height: Default::default(), - r#type: SlashType::DuplicateVote, - rate: Dec::one(), - }; - let slashes = vec![alice_slash.clone()]; - validator_slashes_handle(&alice).pop(&mut storage).unwrap(); - validator_slashes_handle(&alice) - .push(&mut storage, alice_slash) - .unwrap(); - let result = compute_amount_after_slashing_withdraw( - &storage, - ¶ms, - &unbonds_and_redelegated_unbonds, - slashes, - ) - .unwrap(); - assert_eq!(result.sum, 10.into()); - itertools::assert_equal( - result.epoch_map, - [(2.into(), 4.into()), (4.into(), 6.into())], - ); -} - -fn arb_redelegation_amounts( - max_delegation: u64, -) -> impl Strategy { - let arb_delegation = arb_amount_non_zero_ceiled(max_delegation); - let amounts = arb_delegation.prop_flat_map(move |amount_delegate| { - let amount_redelegate = arb_amount_non_zero_ceiled(max( - 1, - u64::try_from(amount_delegate.raw_amount()).unwrap() - 1, - )); - (Just(amount_delegate), amount_redelegate) - }); - amounts.prop_flat_map(move |(amount_delegate, amount_redelegate)| { - let amount_unbond = arb_amount_non_zero_ceiled(max( - 1, - u64::try_from(amount_redelegate.raw_amount()).unwrap() - 1, - )); - ( - Just(amount_delegate), - Just(amount_redelegate), - amount_unbond, - ) - }) -} - -fn test_simple_redelegation_aux( - mut validators: Vec, - amount_delegate: token::Amount, - amount_redelegate: token::Amount, - amount_unbond: token::Amount, -) { - validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - - let src_validator = validators[0].address.clone(); - let dest_validator = validators[1].address.clone(); - - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - // Get a delegator with some tokens - let staking_token = staking_token_address(&storage); - let delegator = address::testing::gen_implicit_address(); - let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); - credit_tokens(&mut storage, &staking_token, &delegator, del_balance) - .unwrap(); - - // Ensure that we cannot redelegate with the same src and dest validator - let err = super::redelegate_tokens( - &mut storage, - &delegator, - &src_validator, - &src_validator, - current_epoch, - amount_redelegate, - ) - .unwrap_err(); - let err_str = err.to_string(); - assert_matches!( - err.downcast::().unwrap().deref(), - RedelegationError::RedelegationSrcEqDest, - "Redelegation with the same src and dest validator must be rejected, \ - got {err_str}", - ); - - for _ in 0..5 { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - } - - let init_epoch = current_epoch; - - // Delegate in epoch 1 to src_validator - println!( - "\nBONDING {} TOKENS TO {}\n", - amount_delegate.to_string_native(), - &src_validator - ); - super::bond_tokens( - &mut storage, - Some(&delegator), - &src_validator, - amount_delegate, - current_epoch, - None, - ) - .unwrap(); - - println!("\nAFTER DELEGATION\n"); - let bonds = bond_handle(&delegator, &src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let bonds_dest = bond_handle(&delegator, &dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let unbonds = unbond_handle(&delegator, &src_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds = total_bonded_handle(&src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds = total_unbonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - dbg!(&bonds, &bonds_dest, &unbonds, &tot_bonds, &tot_unbonds); - - // Advance three epochs - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Redelegate in epoch 3 - println!( - "\nREDELEGATING {} TOKENS TO {}\n", - amount_redelegate.to_string_native(), - &dest_validator - ); - - super::redelegate_tokens( - &mut storage, - &delegator, - &src_validator, - &dest_validator, - current_epoch, - amount_redelegate, - ) - .unwrap(); - - println!("\nAFTER REDELEGATION\n"); - println!("\nDELEGATOR\n"); - let bonds_src = bond_handle(&delegator, &src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let bonds_dest = bond_handle(&delegator, &dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let unbonds_src = unbond_handle(&delegator, &src_validator) - .collect_map(&storage) - .unwrap(); - let unbonds_dest = unbond_handle(&delegator, &dest_validator) - .collect_map(&storage) - .unwrap(); - let redel_bonds = delegator_redelegated_bonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - - dbg!( - &bonds_src, - &bonds_dest, - &unbonds_src, - &unbonds_dest, - &redel_bonds, - &redel_unbonds - ); - - // Dest val - println!("\nDEST VALIDATOR\n"); - - let incoming_redels_dest = - validator_incoming_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let outgoing_redels_dest = - validator_outgoing_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds_dest = total_bonded_handle(&dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds_dest = total_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_bonds_dest = - validator_total_redelegated_bonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_unbonds_dest = - validator_total_redelegated_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - dbg!( - &incoming_redels_dest, - &outgoing_redels_dest, - &tot_bonds_dest, - &tot_unbonds_dest, - &tot_redel_bonds_dest, - &tot_redel_unbonds_dest - ); - - // Src val - println!("\nSRC VALIDATOR\n"); - - let incoming_redels_src = - validator_incoming_redelegations_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let outgoing_redels_src = - validator_outgoing_redelegations_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds_src = total_bonded_handle(&src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds_src = total_unbonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_bonds_src = - validator_total_redelegated_bonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_unbonds_src = - validator_total_redelegated_unbonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - dbg!( - &incoming_redels_src, - &outgoing_redels_src, - &tot_bonds_src, - &tot_unbonds_src, - &tot_redel_bonds_src, - &tot_redel_unbonds_src - ); - - // Checks - let redelegated = delegator_redelegated_bonds_handle(&delegator) - .at(&dest_validator) - .at(&(current_epoch + params.pipeline_len)) - .at(&src_validator) - .get(&storage, &(init_epoch + params.pipeline_len)) - .unwrap() - .unwrap(); - assert_eq!(redelegated, amount_redelegate); - - let redel_start_epoch = - validator_incoming_redelegations_handle(&dest_validator) - .get(&storage, &delegator) - .unwrap() - .unwrap(); - assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); - - let redelegated = validator_outgoing_redelegations_handle(&src_validator) - .at(&dest_validator) - .at(¤t_epoch.prev()) - .get(&storage, ¤t_epoch) - .unwrap() - .unwrap(); - assert_eq!(redelegated, amount_redelegate); - - // Advance three epochs - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Unbond in epoch 5 from dest_validator - println!( - "\nUNBONDING {} TOKENS FROM {}\n", - amount_unbond.to_string_native(), - &dest_validator - ); - let _ = unbond_tokens( - &mut storage, - Some(&delegator), - &dest_validator, - amount_unbond, - current_epoch, - false, - ) - .unwrap(); - - println!("\nAFTER UNBONDING\n"); - println!("\nDELEGATOR\n"); - - let bonds_src = bond_handle(&delegator, &src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let bonds_dest = bond_handle(&delegator, &dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let unbonds_src = unbond_handle(&delegator, &src_validator) - .collect_map(&storage) - .unwrap(); - let unbonds_dest = unbond_handle(&delegator, &dest_validator) - .collect_map(&storage) - .unwrap(); - let redel_bonds = delegator_redelegated_bonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - - dbg!( - &bonds_src, - &bonds_dest, - &unbonds_src, - &unbonds_dest, - &redel_bonds, - &redel_unbonds - ); - - println!("\nDEST VALIDATOR\n"); - - let incoming_redels_dest = - validator_incoming_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let outgoing_redels_dest = - validator_outgoing_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds_dest = total_bonded_handle(&dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds_dest = total_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_bonds_dest = - validator_total_redelegated_bonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_unbonds_dest = - validator_total_redelegated_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - dbg!( - &incoming_redels_dest, - &outgoing_redels_dest, - &tot_bonds_dest, - &tot_unbonds_dest, - &tot_redel_bonds_dest, - &tot_redel_unbonds_dest - ); - - let bond_start = init_epoch + params.pipeline_len; - let redelegation_end = bond_start + params.pipeline_len + 1u64; - let unbond_end = - redelegation_end + params.withdrawable_epoch_offset() + 1u64; - let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; - - // Checks - let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) - .at(&dest_validator) - .at(&redelegation_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); - - let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) - .at(&dest_validator) - .at(&redelegation_end) - .at(&unbond_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap(); - assert_eq!(redel_unbonded, amount_unbond); - - dbg!(unbond_materialized, redelegation_end, bond_start); - let total_redel_unbonded = - validator_total_redelegated_unbonded_handle(&dest_validator) - .at(&unbond_materialized) - .at(&redelegation_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap(); - assert_eq!(total_redel_unbonded, amount_unbond); - - // Advance to withdrawal epoch - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - if current_epoch == unbond_end { - break; - } - } - - // Withdraw - withdraw_tokens( - &mut storage, - Some(&delegator), - &dest_validator, - current_epoch, - ) - .unwrap(); - - assert!( - delegator_redelegated_unbonds_handle(&delegator) - .at(&dest_validator) - .is_empty(&storage) - .unwrap() - ); - - let delegator_balance = storage - .read::(&token::balance_key(&staking_token, &delegator)) - .unwrap() - .unwrap_or_default(); - assert_eq!( - delegator_balance, - del_balance - amount_delegate + amount_unbond - ); -} - -fn test_redelegation_with_slashing_aux( - mut validators: Vec, - amount_delegate: token::Amount, - amount_redelegate: token::Amount, - amount_unbond: token::Amount, -) { - validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - - let src_validator = validators[0].address.clone(); - let dest_validator = validators[1].address.clone(); - - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - // Avoid empty consensus set by removing the threshold - validator_stake_threshold: token::Amount::zero(), - ..Default::default() - }; - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - // Get a delegator with some tokens - let staking_token = staking_token_address(&storage); - let delegator = address::testing::gen_implicit_address(); - let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); - credit_tokens(&mut storage, &staking_token, &delegator, del_balance) - .unwrap(); - - for _ in 0..5 { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - } - - let init_epoch = current_epoch; - - // Delegate in epoch 5 to src_validator - println!( - "\nBONDING {} TOKENS TO {}\n", - amount_delegate.to_string_native(), - &src_validator - ); - super::bond_tokens( - &mut storage, - Some(&delegator), - &src_validator, - amount_delegate, - current_epoch, - None, - ) - .unwrap(); - - println!("\nAFTER DELEGATION\n"); - let bonds = bond_handle(&delegator, &src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let bonds_dest = bond_handle(&delegator, &dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let unbonds = unbond_handle(&delegator, &src_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds = total_bonded_handle(&src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds = total_unbonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - dbg!(&bonds, &bonds_dest, &unbonds, &tot_bonds, &tot_unbonds); - - // Advance three epochs - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Redelegate in epoch 8 - println!( - "\nREDELEGATING {} TOKENS TO {}\n", - amount_redelegate.to_string_native(), - &dest_validator - ); - - super::redelegate_tokens( - &mut storage, - &delegator, - &src_validator, - &dest_validator, - current_epoch, - amount_redelegate, - ) - .unwrap(); - - println!("\nAFTER REDELEGATION\n"); - println!("\nDELEGATOR\n"); - let bonds_src = bond_handle(&delegator, &src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let bonds_dest = bond_handle(&delegator, &dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let unbonds_src = unbond_handle(&delegator, &src_validator) - .collect_map(&storage) - .unwrap(); - let unbonds_dest = unbond_handle(&delegator, &dest_validator) - .collect_map(&storage) - .unwrap(); - let redel_bonds = delegator_redelegated_bonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - - dbg!( - &bonds_src, - &bonds_dest, - &unbonds_src, - &unbonds_dest, - &redel_bonds, - &redel_unbonds - ); - - // Dest val - println!("\nDEST VALIDATOR\n"); - - let incoming_redels_dest = - validator_incoming_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let outgoing_redels_dest = - validator_outgoing_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds_dest = total_bonded_handle(&dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds_dest = total_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_bonds_dest = - validator_total_redelegated_bonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_unbonds_dest = - validator_total_redelegated_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - dbg!( - &incoming_redels_dest, - &outgoing_redels_dest, - &tot_bonds_dest, - &tot_unbonds_dest, - &tot_redel_bonds_dest, - &tot_redel_unbonds_dest - ); - - // Src val - println!("\nSRC VALIDATOR\n"); - - let incoming_redels_src = - validator_incoming_redelegations_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let outgoing_redels_src = - validator_outgoing_redelegations_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds_src = total_bonded_handle(&src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds_src = total_unbonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_bonds_src = - validator_total_redelegated_bonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_unbonds_src = - validator_total_redelegated_unbonded_handle(&src_validator) - .collect_map(&storage) - .unwrap(); - dbg!( - &incoming_redels_src, - &outgoing_redels_src, - &tot_bonds_src, - &tot_unbonds_src, - &tot_redel_bonds_src, - &tot_redel_unbonds_src - ); - - // Checks - let redelegated = delegator_redelegated_bonds_handle(&delegator) - .at(&dest_validator) - .at(&(current_epoch + params.pipeline_len)) - .at(&src_validator) - .get(&storage, &(init_epoch + params.pipeline_len)) - .unwrap() - .unwrap(); - assert_eq!(redelegated, amount_redelegate); - - let redel_start_epoch = - validator_incoming_redelegations_handle(&dest_validator) - .get(&storage, &delegator) - .unwrap() - .unwrap(); - assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); - - let redelegated = validator_outgoing_redelegations_handle(&src_validator) - .at(&dest_validator) - .at(¤t_epoch.prev()) - .get(&storage, ¤t_epoch) - .unwrap() - .unwrap(); - assert_eq!(redelegated, amount_redelegate); - - // Advance three epochs - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Unbond in epoch 11 from dest_validator - println!( - "\nUNBONDING {} TOKENS FROM {}\n", - amount_unbond.to_string_native(), - &dest_validator - ); - let _ = unbond_tokens( - &mut storage, - Some(&delegator), - &dest_validator, - amount_unbond, - current_epoch, - false, - ) - .unwrap(); - - println!("\nAFTER UNBONDING\n"); - println!("\nDELEGATOR\n"); - - let bonds_src = bond_handle(&delegator, &src_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let bonds_dest = bond_handle(&delegator, &dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let unbonds_src = unbond_handle(&delegator, &src_validator) - .collect_map(&storage) - .unwrap(); - let unbonds_dest = unbond_handle(&delegator, &dest_validator) - .collect_map(&storage) - .unwrap(); - let redel_bonds = delegator_redelegated_bonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - let redel_unbonds = delegator_redelegated_unbonds_handle(&delegator) - .collect_map(&storage) - .unwrap(); - - dbg!( - &bonds_src, - &bonds_dest, - &unbonds_src, - &unbonds_dest, - &redel_bonds, - &redel_unbonds - ); - - println!("\nDEST VALIDATOR\n"); - - let incoming_redels_dest = - validator_incoming_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let outgoing_redels_dest = - validator_outgoing_redelegations_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_bonds_dest = total_bonded_handle(&dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - let tot_unbonds_dest = total_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_bonds_dest = - validator_total_redelegated_bonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - let tot_redel_unbonds_dest = - validator_total_redelegated_unbonded_handle(&dest_validator) - .collect_map(&storage) - .unwrap(); - dbg!( - &incoming_redels_dest, - &outgoing_redels_dest, - &tot_bonds_dest, - &tot_unbonds_dest, - &tot_redel_bonds_dest, - &tot_redel_unbonds_dest - ); - - // Advance one epoch - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Discover evidence - slash( - &mut storage, - ¶ms, - current_epoch, - init_epoch + 2 * params.pipeline_len, - 0u64, - SlashType::DuplicateVote, - &src_validator, - current_epoch.next(), - ) - .unwrap(); - - let bond_start = init_epoch + params.pipeline_len; - let redelegation_end = bond_start + params.pipeline_len + 1u64; - let unbond_end = - redelegation_end + params.withdrawable_epoch_offset() + 1u64; - let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; - - // Checks - let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) - .at(&dest_validator) - .at(&redelegation_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); - - let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) - .at(&dest_validator) - .at(&redelegation_end) - .at(&unbond_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap(); - assert_eq!(redel_unbonded, amount_unbond); - - dbg!(unbond_materialized, redelegation_end, bond_start); - let total_redel_unbonded = - validator_total_redelegated_unbonded_handle(&dest_validator) - .at(&unbond_materialized) - .at(&redelegation_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap(); - assert_eq!(total_redel_unbonded, amount_unbond); - - // Advance to withdrawal epoch - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - if current_epoch == unbond_end { - break; - } - } - - // Withdraw - withdraw_tokens( - &mut storage, - Some(&delegator), - &dest_validator, - current_epoch, - ) - .unwrap(); - - assert!( - delegator_redelegated_unbonds_handle(&delegator) - .at(&dest_validator) - .is_empty(&storage) - .unwrap() - ); - - let delegator_balance = storage - .read::(&token::balance_key(&staking_token, &delegator)) - .unwrap() - .unwrap_or_default(); - assert_eq!(delegator_balance, del_balance - amount_delegate); -} - -fn test_chain_redelegations_aux(mut validators: Vec) { - validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - - let src_validator = validators[0].address.clone(); - let _init_stake_src = validators[0].tokens; - let dest_validator = validators[1].address.clone(); - let _init_stake_dest = validators[1].tokens; - let dest_validator_2 = validators[2].address.clone(); - let _init_stake_dest_2 = validators[2].tokens; - - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - // Get a delegator with some tokens - let staking_token = staking_token_address(&storage); - let delegator = address::testing::gen_implicit_address(); - let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); - credit_tokens(&mut storage, &staking_token, &delegator, del_balance) - .unwrap(); - - // Delegate in epoch 0 to src_validator - let bond_amount: token::Amount = 100.into(); - super::bond_tokens( - &mut storage, - Some(&delegator), - &src_validator, - bond_amount, - current_epoch, - None, - ) - .unwrap(); - - let bond_start = current_epoch + params.pipeline_len; - - // Advance one epoch - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Redelegate in epoch 1 to dest_validator - let redel_amount_1: token::Amount = 58.into(); - super::redelegate_tokens( - &mut storage, - &delegator, - &src_validator, - &dest_validator, - current_epoch, - redel_amount_1, - ) - .unwrap(); - - let redel_start = current_epoch; - let redel_end = current_epoch + params.pipeline_len; - - // Checks ---------------- - - // Dest validator should have an incoming redelegation - let incoming_redelegation = - validator_incoming_redelegations_handle(&dest_validator) - .get(&storage, &delegator) - .unwrap(); - assert_eq!(incoming_redelegation, Some(redel_end)); - - // Src validator should have an outoging redelegation - let outgoing_redelegation = - validator_outgoing_redelegations_handle(&src_validator) - .at(&dest_validator) - .at(&bond_start) - .get(&storage, &redel_start) - .unwrap(); - assert_eq!(outgoing_redelegation, Some(redel_amount_1)); - - // Delegator should have redelegated bonds - let del_total_redelegated_bonded = - delegator_redelegated_bonds_handle(&delegator) - .at(&dest_validator) - .at(&redel_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - assert_eq!(del_total_redelegated_bonded, redel_amount_1); - - // There should be delegator bonds for both src and dest validators - let bonded_src = bond_handle(&delegator, &src_validator); - let bonded_dest = bond_handle(&delegator, &dest_validator); - assert_eq!( - bonded_src - .get_delta_val(&storage, bond_start) - .unwrap() - .unwrap_or_default(), - bond_amount - redel_amount_1 - ); - assert_eq!( - bonded_dest - .get_delta_val(&storage, redel_end) - .unwrap() - .unwrap_or_default(), - redel_amount_1 - ); - - // The dest validator should have total redelegated bonded tokens - let dest_total_redelegated_bonded = - validator_total_redelegated_bonded_handle(&dest_validator) - .at(&redel_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - assert_eq!(dest_total_redelegated_bonded, redel_amount_1); - - // The dest validator's total bonded should have an entry for the genesis - // bond and the redelegation - let dest_total_bonded = total_bonded_handle(&dest_validator) - .get_data_handler() - .collect_map(&storage) - .unwrap(); - assert!( - dest_total_bonded.len() == 2 - && dest_total_bonded.contains_key(&Epoch::default()) - ); - assert_eq!( - dest_total_bonded - .get(&redel_end) - .cloned() - .unwrap_or_default(), - redel_amount_1 - ); - - // The src validator should have a total bonded entry for the original bond - // accounting for the redelegation - assert_eq!( - total_bonded_handle(&src_validator) - .get_delta_val(&storage, bond_start) - .unwrap() - .unwrap_or_default(), - bond_amount - redel_amount_1 - ); - - // The src validator should have a total unbonded entry due to the - // redelegation - let src_total_unbonded = total_unbonded_handle(&src_validator) - .at(&redel_end) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - assert_eq!(src_total_unbonded, redel_amount_1); - - // Attempt to redelegate in epoch 3 to dest_validator - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - let redel_amount_2: token::Amount = 23.into(); - let redel_att = super::redelegate_tokens( - &mut storage, - &delegator, - &dest_validator, - &dest_validator_2, - current_epoch, - redel_amount_2, - ); - assert!(redel_att.is_err()); - - // Advance to right before the redelegation can be redelegated again - assert_eq!(redel_end, current_epoch); - let epoch_can_redel = - redel_end.prev() + params.slash_processing_epoch_offset(); - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - if current_epoch == epoch_can_redel.prev() { - break; - } - } - - // Attempt to redelegate in epoch before we actually are able to - let redel_att = super::redelegate_tokens( - &mut storage, - &delegator, - &dest_validator, - &dest_validator_2, - current_epoch, - redel_amount_2, - ); - assert!(redel_att.is_err()); - - // Advance one more epoch - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Redelegate from dest_validator to dest_validator_2 now - super::redelegate_tokens( - &mut storage, - &delegator, - &dest_validator, - &dest_validator_2, - current_epoch, - redel_amount_2, - ) - .unwrap(); - - let redel_2_start = current_epoch; - let redel_2_end = current_epoch + params.pipeline_len; - - // Checks ----------------------------------- - - // Both the dest validator and dest validator 2 should have incoming - // redelegations - let incoming_redelegation_1 = - validator_incoming_redelegations_handle(&dest_validator) - .get(&storage, &delegator) - .unwrap(); - assert_eq!(incoming_redelegation_1, Some(redel_end)); - let incoming_redelegation_2 = - validator_incoming_redelegations_handle(&dest_validator_2) - .get(&storage, &delegator) - .unwrap(); - assert_eq!(incoming_redelegation_2, Some(redel_2_end)); - - // Both the src validator and dest validator should have outgoing - // redelegations - let outgoing_redelegation_1 = - validator_outgoing_redelegations_handle(&src_validator) - .at(&dest_validator) - .at(&bond_start) - .get(&storage, &redel_start) - .unwrap(); - assert_eq!(outgoing_redelegation_1, Some(redel_amount_1)); - - let outgoing_redelegation_2 = - validator_outgoing_redelegations_handle(&dest_validator) - .at(&dest_validator_2) - .at(&redel_end) - .get(&storage, &redel_2_start) - .unwrap(); - assert_eq!(outgoing_redelegation_2, Some(redel_amount_2)); - - // All three validators should have bonds - let bonded_dest2 = bond_handle(&delegator, &dest_validator_2); - assert_eq!( - bonded_src - .get_delta_val(&storage, bond_start) - .unwrap() - .unwrap_or_default(), - bond_amount - redel_amount_1 - ); - assert_eq!( - bonded_dest - .get_delta_val(&storage, redel_end) - .unwrap() - .unwrap_or_default(), - redel_amount_1 - redel_amount_2 - ); - assert_eq!( - bonded_dest2 - .get_delta_val(&storage, redel_2_end) - .unwrap() - .unwrap_or_default(), - redel_amount_2 - ); - - // There should be no unbond entries - let unbond_src = unbond_handle(&delegator, &src_validator); - let unbond_dest = unbond_handle(&delegator, &dest_validator); - assert!(unbond_src.is_empty(&storage).unwrap()); - assert!(unbond_dest.is_empty(&storage).unwrap()); - - // The dest validator should have some total unbonded due to the second - // redelegation - let dest_total_unbonded = total_unbonded_handle(&dest_validator) - .at(&redel_2_end) - .get(&storage, &redel_end) - .unwrap(); - assert_eq!(dest_total_unbonded, Some(redel_amount_2)); - - // Delegator should have redelegated bonds due to both redelegations - let del_redelegated_bonds = delegator_redelegated_bonds_handle(&delegator); - assert_eq!( - Some(redel_amount_1 - redel_amount_2), - del_redelegated_bonds - .at(&dest_validator) - .at(&redel_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - ); - assert_eq!( - Some(redel_amount_2), - del_redelegated_bonds - .at(&dest_validator_2) - .at(&redel_2_end) - .at(&dest_validator) - .get(&storage, &redel_end) - .unwrap() - ); - - // Delegator redelegated unbonds should be empty - assert!( - delegator_redelegated_unbonds_handle(&delegator) - .is_empty(&storage) - .unwrap() - ); - - // Both the dest validator and dest validator 2 should have total - // redelegated bonds - let dest_redelegated_bonded = - validator_total_redelegated_bonded_handle(&dest_validator) - .at(&redel_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - let dest2_redelegated_bonded = - validator_total_redelegated_bonded_handle(&dest_validator_2) - .at(&redel_2_end) - .at(&dest_validator) - .get(&storage, &redel_end) - .unwrap() - .unwrap_or_default(); - assert_eq!(dest_redelegated_bonded, redel_amount_1 - redel_amount_2); - assert_eq!(dest2_redelegated_bonded, redel_amount_2); - - // Total redelegated unbonded should be empty for src_validator and - // dest_validator_2 - assert!( - validator_total_redelegated_unbonded_handle(&dest_validator_2) - .is_empty(&storage) - .unwrap() - ); - assert!( - validator_total_redelegated_unbonded_handle(&src_validator) - .is_empty(&storage) - .unwrap() - ); - - // The dest_validator should have total_redelegated unbonded - let tot_redel_unbonded = - validator_total_redelegated_unbonded_handle(&dest_validator) - .at(&redel_2_end) - .at(&redel_end) - .at(&src_validator) - .get(&storage, &bond_start) - .unwrap() - .unwrap_or_default(); - assert_eq!(tot_redel_unbonded, redel_amount_2); -} - -/// SM test case 1 from Brent -#[test] -fn test_from_sm_case_1() { - use namada_core::types::address::testing::established_address_4; - - let mut storage = TestWlStorage::default(); - let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); - gov_params.init_storage(&mut storage).unwrap(); - write_pos_params(&mut storage, &OwnedPosParams::default()).unwrap(); - - let validator = established_address_1(); - let redeleg_src_1 = established_address_2(); - let redeleg_src_2 = established_address_3(); - let owner = established_address_4(); - let unbond_amount = token::Amount::from(3130688); - println!( - "Owner: {owner}\nValidator: {validator}\nRedeleg src 1: \ - {redeleg_src_1}\nRedeleg src 2: {redeleg_src_2}" - ); - - // Validator's incoming redelegations - let outer_epoch_1 = Epoch(27); - // from redeleg_src_1 - let epoch_1_redeleg_1 = token::Amount::from(8516); - // from redeleg_src_2 - let epoch_1_redeleg_2 = token::Amount::from(5704386); - let outer_epoch_2 = Epoch(30); - // from redeleg_src_2 - let epoch_2_redeleg_2 = token::Amount::from(1035191); - - // Insert the data - bonds and redelegated bonds - let bonds_handle = bond_handle(&owner, &validator); - bonds_handle - .add( - &mut storage, - epoch_1_redeleg_1 + epoch_1_redeleg_2, - outer_epoch_1, - 0, - ) - .unwrap(); - bonds_handle - .add(&mut storage, epoch_2_redeleg_2, outer_epoch_2, 0) - .unwrap(); - - let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) - .at(&validator) - .at(&outer_epoch_1); - redelegated_bonds_map_1 - .at(&redeleg_src_1) - .insert(&mut storage, Epoch(14), epoch_1_redeleg_1) - .unwrap(); - redelegated_bonds_map_1 - .at(&redeleg_src_2) - .insert(&mut storage, Epoch(18), epoch_1_redeleg_2) - .unwrap(); - let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) - .at(&validator) - .at(&outer_epoch_1); - - let redelegated_bonds_map_2 = delegator_redelegated_bonds_handle(&owner) - .at(&validator) - .at(&outer_epoch_2); - redelegated_bonds_map_2 - .at(&redeleg_src_2) - .insert(&mut storage, Epoch(18), epoch_2_redeleg_2) - .unwrap(); - - // Find the modified redelegation the same way as `unbond_tokens` - let bonds_to_unbond = find_bonds_to_remove( - &storage, - &bonds_handle.get_data_handler(), - unbond_amount, - ) - .unwrap(); - dbg!(&bonds_to_unbond); - - let (new_entry_epoch, new_bond_amount) = bonds_to_unbond.new_entry.unwrap(); - assert_eq!(outer_epoch_1, new_entry_epoch); - // The modified bond should be sum of all redelegations less the unbonded - // amouunt - assert_eq!( - epoch_1_redeleg_1 + epoch_1_redeleg_2 + epoch_2_redeleg_2 - - unbond_amount, - new_bond_amount - ); - // The current bond should be sum of redelegations from the modified epoch - let cur_bond_amount = bonds_handle - .get_delta_val(&storage, new_entry_epoch) - .unwrap() - .unwrap_or_default(); - assert_eq!(epoch_1_redeleg_1 + epoch_1_redeleg_2, cur_bond_amount); - - let mr = compute_modified_redelegation( - &storage, - &redelegated_bonds_map_1, - new_entry_epoch, - cur_bond_amount - new_bond_amount, - ) - .unwrap(); - - let exp_mr = ModifiedRedelegation { - epoch: Some(Epoch(27)), - validators_to_remove: BTreeSet::from_iter([redeleg_src_2.clone()]), - validator_to_modify: Some(redeleg_src_2), - epochs_to_remove: BTreeSet::from_iter([Epoch(18)]), - epoch_to_modify: Some(Epoch(18)), - new_amount: Some(token::Amount::from(3608889)), - }; - - pretty_assertions::assert_eq!(mr, exp_mr); -} - -/// Test precisely that we are not overslashing, as originally discovered by Tomas in this issue: https://github.com/informalsystems/partnership-heliax/issues/74 -fn test_overslashing_aux(mut validators: Vec) { - assert_eq!(validators.len(), 4); - - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - let offending_stake = token::Amount::native_whole(110); - let other_stake = token::Amount::native_whole(100); - - // Set stakes so we know we will get a slashing rate between 0.5 -1.0 - validators[0].tokens = offending_stake; - validators[1].tokens = other_stake; - validators[2].tokens = other_stake; - validators[3].tokens = other_stake; - - // Get the offending validator - let validator = validators[0].address.clone(); - - println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); - let mut storage = TestWlStorage::default(); - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - // Get a delegator with some tokens - let staking_token = storage.storage.native_token.clone(); - let delegator = address::testing::gen_implicit_address(); - let amount_del = token::Amount::native_whole(5); - credit_tokens(&mut storage, &staking_token, &delegator, amount_del) - .unwrap(); - - // Delegate tokens in epoch 0 to validator - bond_tokens( - &mut storage, - Some(&delegator), - &validator, - amount_del, - current_epoch, - None, - ) - .unwrap(); - - let self_bond_epoch = current_epoch; - let delegation_epoch = current_epoch + params.pipeline_len; - - // Advance to pipeline epoch - for _ in 0..params.pipeline_len { - current_epoch = advance_epoch(&mut storage, ¶ms); - } - assert_eq!(delegation_epoch, current_epoch); - - // Find a misbehavior committed in epoch 0 - slash( - &mut storage, - ¶ms, - current_epoch, - self_bond_epoch, - 0_u64, - SlashType::DuplicateVote, - &validator, - current_epoch.next(), - ) - .unwrap(); - - // Find a misbehavior committed in current epoch - slash( - &mut storage, - ¶ms, - current_epoch, - delegation_epoch, - 0_u64, - SlashType::DuplicateVote, - &validator, - current_epoch.next(), - ) - .unwrap(); - - let processing_epoch_1 = - self_bond_epoch + params.slash_processing_epoch_offset(); - let processing_epoch_2 = - delegation_epoch + params.slash_processing_epoch_offset(); - - // Advance to processing epoch 1 - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - if current_epoch == processing_epoch_1 { - break; - } - } - - let total_stake_1 = offending_stake + 3 * other_stake; - let stake_frac = Dec::from(offending_stake) / Dec::from(total_stake_1); - let slash_rate_1 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; - dbg!(&slash_rate_1); - - let exp_slashed_1 = offending_stake.mul_ceil(slash_rate_1); - - // Check that the proper amount was slashed - let epoch = current_epoch.next(); - let validator_stake = - read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); - let exp_validator_stake = offending_stake - exp_slashed_1 + amount_del; - assert_eq!(validator_stake, exp_validator_stake); - - let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); - let exp_total_stake = - offending_stake - exp_slashed_1 + amount_del + 3 * other_stake; - assert_eq!(total_stake, exp_total_stake); - - let self_bond_id = BondId { - source: validator.clone(), - validator: validator.clone(), - }; - let bond_amount = - crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); - let exp_bond_amount = offending_stake - exp_slashed_1; - assert_eq!(bond_amount, exp_bond_amount); - - // Advance to processing epoch 2 - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - if current_epoch == processing_epoch_2 { - break; - } - } - - let total_stake_2 = offending_stake + amount_del + 3 * other_stake; - let stake_frac = - Dec::from(offending_stake + amount_del) / Dec::from(total_stake_2); - let slash_rate_2 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; - dbg!(&slash_rate_2); - - let exp_slashed_from_delegation = amount_del.mul_ceil(slash_rate_2); - - // Check that the proper amount was slashed. We expect that all of the - // validator self-bond has been slashed and some of the delegation has been - // slashed due to the second infraction. - let epoch = current_epoch.next(); - - let validator_stake = - read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); - let exp_validator_stake = amount_del - exp_slashed_from_delegation; - assert_eq!(validator_stake, exp_validator_stake); - - let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); - let exp_total_stake = - amount_del - exp_slashed_from_delegation + 3 * other_stake; - assert_eq!(total_stake, exp_total_stake); - - let delegation_id = BondId { - source: delegator.clone(), - validator: validator.clone(), - }; - let delegation_amount = - crate::bond_amount(&storage, &delegation_id, epoch).unwrap(); - let exp_del_amount = amount_del - exp_slashed_from_delegation; - assert_eq!(delegation_amount, exp_del_amount); - - let self_bond_amount = - crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); - let exp_bond_amount = token::Amount::zero(); - assert_eq!(self_bond_amount, exp_bond_amount); -} - -fn test_unslashed_bond_amount_aux(validators: Vec) { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - let validator1 = validators[0].address.clone(); - let validator2 = validators[1].address.clone(); - - // Get a delegator with some tokens - let staking_token = staking_token_address(&storage); - let delegator = address::testing::gen_implicit_address(); - let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); - credit_tokens(&mut storage, &staking_token, &delegator, del_balance) - .unwrap(); - - // Bond to validator 1 - super::bond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 10_000.into(), - current_epoch, - None, - ) - .unwrap(); - - // Unbond some from validator 1 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 1_342.into(), - current_epoch, - false, - ) - .unwrap(); - - // Redelegate some from validator 1 -> 2 - super::redelegate_tokens( - &mut storage, - &delegator, - &validator1, - &validator2, - current_epoch, - 1_875.into(), - ) - .unwrap(); - - // Unbond some from validator 2 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator2, - 584.into(), - current_epoch, - false, - ) - .unwrap(); - - // Advance an epoch - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Bond to validator 1 - super::bond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 384.into(), - current_epoch, - None, - ) - .unwrap(); - - // Unbond some from validator 1 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 144.into(), - current_epoch, - false, - ) - .unwrap(); - - // Redelegate some from validator 1 -> 2 - super::redelegate_tokens( - &mut storage, - &delegator, - &validator1, - &validator2, - current_epoch, - 3_448.into(), - ) - .unwrap(); - - // Unbond some from validator 2 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator2, - 699.into(), - current_epoch, - false, - ) - .unwrap(); - - // Advance an epoch - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Bond to validator 1 - super::bond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 4_384.into(), - current_epoch, - None, - ) - .unwrap(); - - // Redelegate some from validator 1 -> 2 - super::redelegate_tokens( - &mut storage, - &delegator, - &validator1, - &validator2, - current_epoch, - 1_008.into(), - ) - .unwrap(); - - // Unbond some from validator 2 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator2, - 3_500.into(), - current_epoch, - false, - ) - .unwrap(); - - // Checks - let val1_init_stake = validators[0].tokens; - - for epoch in Epoch::iter_bounds_inclusive( - Epoch(0), - current_epoch + params.pipeline_len, - ) { - let bond_amount = crate::bond_amount( - &storage, - &BondId { - source: delegator.clone(), - validator: validator1.clone(), - }, - epoch, - ) - .unwrap_or_default(); - - let val_stake = - crate::read_validator_stake(&storage, ¶ms, &validator1, epoch) - .unwrap(); - // dbg!(&bond_amount); - assert_eq!(val_stake - val1_init_stake, bond_amount); - } -} - -fn test_log_block_rewards_aux( - validators: Vec, - params: OwnedPosParams, -) { - tracing::info!( - "New case with {} validators: {:#?}", - validators.len(), - validators - .iter() - .map(|v| (&v.address, v.tokens.to_string_native())) - .collect::>() - ); - let mut s = TestWlStorage::default(); - // Init genesis - let current_epoch = s.storage.block.epoch; - let params = test_init_genesis( - &mut s, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - s.commit_block().unwrap(); - let total_stake = - crate::get_total_consensus_stake(&s, current_epoch, ¶ms).unwrap(); - let consensus_set = - crate::read_consensus_validator_set_addresses(&s, current_epoch) - .unwrap(); - let proposer_address = consensus_set.iter().next().unwrap().clone(); - - tracing::info!( - ?params.block_proposer_reward, - ?params.block_vote_reward, - ); - tracing::info!(?proposer_address,); - - // Rewards accumulator should be empty at first - let rewards_handle = crate::rewards_accumulator_handle(); - assert!(rewards_handle.is_empty(&s).unwrap()); - - let mut last_rewards = BTreeMap::default(); - - let num_blocks = 100; - // Loop through `num_blocks`, log rewards & check results - for i in 0..num_blocks { - tracing::info!(""); - tracing::info!("Block {}", i + 1); - - // A helper closure to prepare minimum required votes - let prep_votes = |epoch| { - // Ceil of 2/3 of total stake - let min_required_votes = total_stake.mul_ceil(Dec::two() / 3); - - let mut total_votes = token::Amount::zero(); - let mut non_voters = HashSet::
::default(); - let mut prep_vote = |validator| { - // Add validator vote if it's in consensus set and if we don't - // yet have min required votes - if consensus_set.contains(validator) - && total_votes < min_required_votes - { - let stake = - read_validator_stake(&s, ¶ms, validator, epoch) - .unwrap(); - total_votes += stake; - let validator_vp = - into_tm_voting_power(params.tm_votes_per_token, stake) - as u64; - tracing::info!("Validator {validator} signed"); - Some(VoteInfo { - validator_address: validator.clone(), - validator_vp, - }) - } else { - non_voters.insert(validator.clone()); - None - } - }; - - let votes: Vec = validators - .iter() - .rev() - .filter_map(|validator| prep_vote(&validator.address)) - .collect(); - (votes, total_votes, non_voters) - }; - - let (votes, signing_stake, non_voters) = prep_votes(current_epoch); - crate::log_block_rewards( - &mut s, - current_epoch, - &proposer_address, - votes.clone(), - ) - .unwrap(); - - assert!(!rewards_handle.is_empty(&s).unwrap()); - - let rewards_calculator = PosRewardsCalculator { - proposer_reward: params.block_proposer_reward, - signer_reward: params.block_vote_reward, - signing_stake, - total_stake, - }; - let coeffs = rewards_calculator.get_reward_coeffs().unwrap(); - tracing::info!(?coeffs); - - // Check proposer reward - let stake = - read_validator_stake(&s, ¶ms, &proposer_address, current_epoch) - .unwrap(); - let proposer_signing_reward = votes.iter().find_map(|vote| { - if vote.validator_address == proposer_address { - let signing_fraction = - Dec::from(stake) / Dec::from(signing_stake); - Some(coeffs.signer_coeff * signing_fraction) - } else { - None - } - }); - let expected_proposer_rewards = last_rewards.get(&proposer_address).copied().unwrap_or_default() + - // Proposer reward - coeffs.proposer_coeff - // Consensus validator reward - + (coeffs.active_val_coeff - * (Dec::from(stake) / Dec::from(total_stake))) - // Signing reward (if proposer voted) - + proposer_signing_reward - .unwrap_or_default(); - tracing::info!( - "Expected proposer rewards: {expected_proposer_rewards}. Signed \ - block: {}", - proposer_signing_reward.is_some() - ); - assert_eq!( - rewards_handle.get(&s, &proposer_address).unwrap(), - Some(expected_proposer_rewards) - ); - - // Check voters rewards - for VoteInfo { - validator_address, .. - } in votes.iter() - { - // Skip proposer, in case voted - already checked - if validator_address == &proposer_address { - continue; - } - - let stake = read_validator_stake( - &s, - ¶ms, - validator_address, - current_epoch, - ) - .unwrap(); - let signing_fraction = Dec::from(stake) / Dec::from(signing_stake); - let expected_signer_rewards = last_rewards - .get(validator_address) - .copied() - .unwrap_or_default() - + coeffs.signer_coeff * signing_fraction - + (coeffs.active_val_coeff - * (Dec::from(stake) / Dec::from(total_stake))); - tracing::info!( - "Expected signer {validator_address} rewards: \ - {expected_signer_rewards}" - ); - assert_eq!( - rewards_handle.get(&s, validator_address).unwrap(), - Some(expected_signer_rewards) - ); - } - - // Check non-voters rewards, if any - for address in non_voters { - // Skip proposer, in case it didn't vote - already checked - if address == proposer_address { - continue; - } - - if consensus_set.contains(&address) { - let stake = - read_validator_stake(&s, ¶ms, &address, current_epoch) - .unwrap(); - let expected_non_signer_rewards = - last_rewards.get(&address).copied().unwrap_or_default() - + coeffs.active_val_coeff - * (Dec::from(stake) / Dec::from(total_stake)); - tracing::info!( - "Expected non-signer {address} rewards: \ - {expected_non_signer_rewards}" - ); - assert_eq!( - rewards_handle.get(&s, &address).unwrap(), - Some(expected_non_signer_rewards) - ); - } else { - let last_reward = last_rewards.get(&address).copied(); - assert_eq!( - rewards_handle.get(&s, &address).unwrap(), - last_reward - ); - } - } - s.commit_block().unwrap(); - - last_rewards = - crate::rewards_accumulator_handle().collect_map(&s).unwrap(); - - let rewards_sum: Dec = last_rewards.values().copied().sum(); - let expected_sum = Dec::one() * (i as u64 + 1); - let err_tolerance = Dec::new(1, 9).unwrap(); - let fail_msg = format!( - "Expected rewards sum at block {} to be {expected_sum}, got \ - {rewards_sum}. Error tolerance {err_tolerance}.", - i + 1 - ); - assert!(expected_sum <= rewards_sum + err_tolerance, "{fail_msg}"); - assert!(rewards_sum <= expected_sum, "{fail_msg}"); - } -} - -fn test_update_rewards_products_aux(validators: Vec) { - tracing::info!( - "New case with {} validators: {:#?}", - validators.len(), - validators - .iter() - .map(|v| (&v.address, v.tokens.to_string_native())) - .collect::>() - ); - let mut s = TestWlStorage::default(); - // Init genesis - let current_epoch = s.storage.block.epoch; - let params = OwnedPosParams::default(); - let params = test_init_genesis( - &mut s, - params, - validators.into_iter(), - current_epoch, - ) - .unwrap(); - s.commit_block().unwrap(); - - let staking_token = staking_token_address(&s); - let consensus_set = - crate::read_consensus_validator_set_addresses(&s, current_epoch) - .unwrap(); - - // Start a new epoch - let current_epoch = advance_epoch(&mut s, ¶ms); - - // Read some data before applying rewards - let pos_balance_pre = - read_balance(&s, &staking_token, &address::POS).unwrap(); - let gov_balance_pre = - read_balance(&s, &staking_token, &address::GOV).unwrap(); - - let num_consensus_validators = consensus_set.len() as u64; - let accum_val = Dec::one() / num_consensus_validators; - let num_blocks_in_last_epoch = 1000; - - // Assign some reward accumulator values to consensus validator - for validator in &consensus_set { - crate::rewards_accumulator_handle() - .insert( - &mut s, - validator.clone(), - accum_val * num_blocks_in_last_epoch, - ) - .unwrap(); - } - - // Distribute inflation into rewards - let last_epoch = current_epoch.prev(); - let inflation = token::Amount::native_whole(10_000_000); - crate::update_rewards_products_and_mint_inflation( - &mut s, - ¶ms, - last_epoch, - num_blocks_in_last_epoch, - inflation, - &staking_token, - ) - .unwrap(); - - let pos_balance_post = - read_balance(&s, &staking_token, &address::POS).unwrap(); - let gov_balance_post = - read_balance(&s, &staking_token, &address::GOV).unwrap(); - - assert_eq!( - pos_balance_pre + gov_balance_pre + inflation, - pos_balance_post + gov_balance_post, - "Expected inflation to be minted to PoS and left-over amount to Gov" - ); - - let pos_credit = pos_balance_post - pos_balance_pre; - let gov_credit = gov_balance_post - gov_balance_pre; - assert!( - pos_credit > gov_credit, - "PoS must receive more tokens than Gov, but got {} in PoS and {} in \ - Gov", - pos_credit.to_string_native(), - gov_credit.to_string_native() - ); - - // Rewards accumulator must be cleared out - let rewards_handle = crate::rewards_accumulator_handle(); - assert!(rewards_handle.is_empty(&s).unwrap()); -} - -fn test_slashed_bond_amount_aux(validators: Vec) { - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - let init_tot_stake = validators - .clone() - .into_iter() - .fold(token::Amount::zero(), |acc, v| acc + v.tokens); - let val1_init_stake = validators[0].tokens; - - let mut validators = validators; - validators[0].tokens = (init_tot_stake - val1_init_stake) / 30; - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - let validator1 = validators[0].address.clone(); - let validator2 = validators[1].address.clone(); - - // Get a delegator with some tokens - let staking_token = staking_token_address(&storage); - let delegator = address::testing::gen_implicit_address(); - let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); - credit_tokens(&mut storage, &staking_token, &delegator, del_balance) - .unwrap(); - - // Bond to validator 1 - super::bond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 10_000.into(), - current_epoch, - None, - ) - .unwrap(); - - // Unbond some from validator 1 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 1_342.into(), - current_epoch, - false, - ) - .unwrap(); - - // Redelegate some from validator 1 -> 2 - super::redelegate_tokens( - &mut storage, - &delegator, - &validator1, - &validator2, - current_epoch, - 1_875.into(), - ) - .unwrap(); - - // Unbond some from validator 2 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator2, - 584.into(), - current_epoch, - false, - ) - .unwrap(); - - // Advance an epoch to 1 - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Bond to validator 1 - super::bond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 384.into(), - current_epoch, - None, - ) - .unwrap(); - - // Unbond some from validator 1 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 144.into(), - current_epoch, - false, - ) - .unwrap(); - - // Redelegate some from validator 1 -> 2 - super::redelegate_tokens( - &mut storage, - &delegator, - &validator1, - &validator2, - current_epoch, - 3_448.into(), - ) - .unwrap(); - - // Unbond some from validator 2 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator2, - 699.into(), - current_epoch, - false, - ) - .unwrap(); - - // Advance an epoch to ep 2 - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Bond to validator 1 - super::bond_tokens( - &mut storage, - Some(&delegator), - &validator1, - 4_384.into(), - current_epoch, - None, - ) - .unwrap(); - - // Redelegate some from validator 1 -> 2 - super::redelegate_tokens( - &mut storage, - &delegator, - &validator1, - &validator2, - current_epoch, - 1_008.into(), - ) - .unwrap(); - - // Unbond some from validator 2 - super::unbond_tokens( - &mut storage, - Some(&delegator), - &validator2, - 3_500.into(), - current_epoch, - false, - ) - .unwrap(); - - // Advance two epochs to ep 4 - for _ in 0..2 { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - } - - // Find some slashes committed in various epochs - super::slash( - &mut storage, - ¶ms, - current_epoch, - Epoch(1), - 1_u64, - SlashType::DuplicateVote, - &validator1, - current_epoch, - ) - .unwrap(); - super::slash( - &mut storage, - ¶ms, - current_epoch, - Epoch(2), - 1_u64, - SlashType::DuplicateVote, - &validator1, - current_epoch, - ) - .unwrap(); - super::slash( - &mut storage, - ¶ms, - current_epoch, - Epoch(2), - 1_u64, - SlashType::DuplicateVote, - &validator1, - current_epoch, - ) - .unwrap(); - super::slash( - &mut storage, - ¶ms, - current_epoch, - Epoch(3), - 1_u64, - SlashType::DuplicateVote, - &validator1, - current_epoch, - ) - .unwrap(); - - // Advance such that these slashes are all processed - for _ in 0..params.slash_processing_epoch_offset() { - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - } - - let pipeline_epoch = current_epoch + params.pipeline_len; - - let del_bond_amount = crate::bond_amount( - &storage, - &BondId { - source: delegator.clone(), - validator: validator1.clone(), - }, - pipeline_epoch, - ) - .unwrap_or_default(); - - let self_bond_amount = crate::bond_amount( - &storage, - &BondId { - source: validator1.clone(), - validator: validator1.clone(), - }, - pipeline_epoch, - ) - .unwrap_or_default(); - - let val_stake = crate::read_validator_stake( - &storage, - ¶ms, - &validator1, - pipeline_epoch, - ) - .unwrap(); - // dbg!(&val_stake); - // dbg!(&del_bond_amount); - // dbg!(&self_bond_amount); - - let diff = val_stake - self_bond_amount - del_bond_amount; - assert!(diff <= 2.into()); -} - -fn test_consensus_key_change_aux(validators: Vec) { - assert_eq!(validators.len(), 1); - - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - let validator = validators[0].address.clone(); - - println!("\nTest inputs: {params:?}, genesis validators: {validators:#?}"); - let mut storage = TestWlStorage::default(); - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - // Check that there is one consensus key in the network - let consensus_keys = get_consensus_key_set(&storage).unwrap(); - assert_eq!(consensus_keys.len(), 1); - let ck = consensus_keys.first().cloned().unwrap(); - let og_ck = validator_consensus_key_handle(&validator) - .get(&storage, current_epoch, ¶ms) - .unwrap() - .unwrap(); - assert_eq!(ck, og_ck); - - // Attempt to change to a new secp256k1 consensus key (disallowed) - let secp_ck = gen_keypair::(); - let secp_ck = key::common::SecretKey::Secp256k1(secp_ck).ref_to(); - let res = - change_consensus_key(&mut storage, &validator, &secp_ck, current_epoch); - assert!(res.is_err()); - - // Change consensus keys - let ck_2 = common_sk_from_simple_seed(1).ref_to(); - change_consensus_key(&mut storage, &validator, &ck_2, current_epoch) - .unwrap(); - - // Check that there is a new consensus key - let consensus_keys = get_consensus_key_set(&storage).unwrap(); - assert_eq!(consensus_keys.len(), 2); - - for epoch in current_epoch.iter_range(params.pipeline_len) { - let ck = validator_consensus_key_handle(&validator) - .get(&storage, epoch, ¶ms) - .unwrap() - .unwrap(); - assert_eq!(ck, og_ck); - } - let pipeline_epoch = current_epoch + params.pipeline_len; - let ck = validator_consensus_key_handle(&validator) - .get(&storage, pipeline_epoch, ¶ms) - .unwrap() - .unwrap(); - assert_eq!(ck, ck_2); - - // Advance to the pipeline epoch - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - if current_epoch == pipeline_epoch { - break; - } - } - - // Check the consensus keys again - let consensus_keys = get_consensus_key_set(&storage).unwrap(); - assert_eq!(consensus_keys.len(), 2); - - for epoch in current_epoch.iter_range(params.pipeline_len + 1) { - let ck = validator_consensus_key_handle(&validator) - .get(&storage, epoch, ¶ms) - .unwrap() - .unwrap(); - assert_eq!(ck, ck_2); - } - - // Now change the consensus key again and bond in the same epoch - let ck_3 = common_sk_from_simple_seed(3).ref_to(); - change_consensus_key(&mut storage, &validator, &ck_3, current_epoch) - .unwrap(); - - let staking_token = storage.storage.native_token.clone(); - let amount_del = token::Amount::native_whole(5); - credit_tokens(&mut storage, &staking_token, &validator, amount_del) - .unwrap(); - bond_tokens( - &mut storage, - None, - &validator, - token::Amount::native_whole(1), - current_epoch, - None, - ) - .unwrap(); - - // Check consensus keys again - let consensus_keys = get_consensus_key_set(&storage).unwrap(); - assert_eq!(consensus_keys.len(), 3); - - for epoch in current_epoch.iter_range(params.pipeline_len) { - let ck = validator_consensus_key_handle(&validator) - .get(&storage, epoch, ¶ms) - .unwrap() - .unwrap(); - assert_eq!(ck, ck_2); - } - let pipeline_epoch = current_epoch + params.pipeline_len; - let ck = validator_consensus_key_handle(&validator) - .get(&storage, pipeline_epoch, ¶ms) - .unwrap() - .unwrap(); - assert_eq!(ck, ck_3); - - // Advance to the pipeline epoch to ensure that the validator set updates to - // tendermint will work - loop { - current_epoch = advance_epoch(&mut storage, ¶ms); - if current_epoch == pipeline_epoch { - break; - } - } - assert_eq!(current_epoch.0, 2 * params.pipeline_len); -} - -fn test_is_delegator_aux(mut validators: Vec) { - validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); - - let validator1 = validators[0].address.clone(); - let validator2 = validators[1].address.clone(); - - let mut storage = TestWlStorage::default(); - let params = OwnedPosParams { - unbonding_len: 4, - ..Default::default() - }; - - // Genesis - let mut current_epoch = storage.storage.block.epoch; - let params = test_init_genesis( - &mut storage, - params, - validators.clone().into_iter(), - current_epoch, - ) - .unwrap(); - storage.commit_block().unwrap(); - - // Get delegators with some tokens - let staking_token = staking_token_address(&storage); - let delegator1 = address::testing::gen_implicit_address(); - let delegator2 = address::testing::gen_implicit_address(); - let del_balance = token::Amount::native_whole(1000); - credit_tokens(&mut storage, &staking_token, &delegator1, del_balance) - .unwrap(); - credit_tokens(&mut storage, &staking_token, &delegator2, del_balance) - .unwrap(); - - // Advance to epoch 1 - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Delegate in epoch 1 to validator1 - let del1_epoch = current_epoch; - super::bond_tokens( - &mut storage, - Some(&delegator1), - &validator1, - 1000.into(), - current_epoch, - None, - ) - .unwrap(); - - // Advance to epoch 2 - current_epoch = advance_epoch(&mut storage, ¶ms); - super::process_slashes(&mut storage, current_epoch).unwrap(); - - // Delegate in epoch 2 to validator2 - let del2_epoch = current_epoch; - super::bond_tokens( - &mut storage, - Some(&delegator2), - &validator2, - 1000.into(), - current_epoch, - None, - ) - .unwrap(); - - // Checks - assert!(super::is_validator(&storage, &validator1).unwrap()); - assert!(super::is_validator(&storage, &validator2).unwrap()); - assert!(!super::is_delegator(&storage, &validator1, None).unwrap()); - assert!(!super::is_delegator(&storage, &validator2, None).unwrap()); - - assert!(!super::is_validator(&storage, &delegator1).unwrap()); - assert!(!super::is_validator(&storage, &delegator2).unwrap()); - assert!(super::is_delegator(&storage, &delegator1, None).unwrap()); - assert!(super::is_delegator(&storage, &delegator2, None).unwrap()); - - for epoch in Epoch::default().iter_range(del1_epoch.0 + params.pipeline_len) - { - assert!( - !super::is_delegator(&storage, &delegator1, Some(epoch)).unwrap() - ); - } - assert!( - super::is_delegator( - &storage, - &delegator1, - Some(del1_epoch + params.pipeline_len) - ) - .unwrap() - ); - for epoch in Epoch::default().iter_range(del2_epoch.0 + params.pipeline_len) - { - assert!( - !super::is_delegator(&storage, &delegator2, Some(epoch)).unwrap() - ); - } - assert!( - super::is_delegator( - &storage, - &delegator2, - Some(del2_epoch + params.pipeline_len) - ) - .unwrap() - ); -} diff --git a/proof_of_stake/src/tests/helpers.rs b/proof_of_stake/src/tests/helpers.rs new file mode 100644 index 0000000000..ddf8bba4c6 --- /dev/null +++ b/proof_of_stake/src/tests/helpers.rs @@ -0,0 +1,173 @@ +use std::cmp::max; +use std::ops::Range; + +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::types::address::testing::address_from_simple_seed; +use namada_core::types::dec::Dec; +use namada_core::types::key::testing::common_sk_from_simple_seed; +use namada_core::types::key::{self, RefTo}; +use namada_core::types::storage::Epoch; +use namada_core::types::token; +use namada_core::types::token::testing::arb_amount_non_zero_ceiled; +use proptest::strategy::{Just, Strategy}; + +use crate::parameters::testing::arb_pos_params; +use crate::types::{GenesisValidator, ValidatorSetUpdate}; +use crate::validator_set_update::{ + copy_validator_sets_and_positions, validator_set_update_tendermint, +}; +use crate::{ + compute_and_store_total_consensus_stake, OwnedPosParams, PosParams, +}; + +pub fn arb_params_and_genesis_validators( + num_max_validator_slots: Option, + val_size: Range, +) -> impl Strategy)> { + let params = arb_pos_params(num_max_validator_slots); + params.prop_flat_map(move |params| { + let validators = arb_genesis_validators( + val_size.clone(), + Some(params.validator_stake_threshold), + ); + (Just(params), validators) + }) +} + +pub fn test_slashes_with_unbonding_params() +-> impl Strategy, u64)> { + let params = arb_pos_params(Some(5)); + params.prop_flat_map(|params| { + let unbond_delay = 0..(params.slash_processing_epoch_offset() * 2); + // Must have at least 4 validators so we can slash one and the cubic + // slash rate will be less than 100% + let validators = arb_genesis_validators(4..10, None); + (Just(params), validators, unbond_delay) + }) +} + +pub fn get_tendermint_set_updates( + s: &TestWlStorage, + params: &PosParams, + Epoch(epoch): Epoch, +) -> Vec { + // Because the `validator_set_update_tendermint` is called 2 blocks before + // the start of a new epoch, it expects to receive the epoch that is before + // the start of a new one too and so we give it the predecessor of the + // current epoch here to actually get the update for the current epoch. + let epoch = Epoch(epoch - 1); + validator_set_update_tendermint(s, params, epoch, |update| update).unwrap() +} + +/// Advance to the next epoch. Returns the new epoch. +pub fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { + s.storage.block.epoch = s.storage.block.epoch.next(); + let current_epoch = s.storage.block.epoch; + compute_and_store_total_consensus_stake(s, current_epoch).unwrap(); + copy_validator_sets_and_positions( + s, + params, + current_epoch, + current_epoch + params.pipeline_len, + ) + .unwrap(); + // purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); + // process_slashes(s, current_epoch).unwrap(); + // dbg!(current_epoch); + current_epoch +} + +pub fn arb_genesis_validators( + size: Range, + threshold: Option, +) -> impl Strategy> { + let threshold = threshold + .unwrap_or_else(|| PosParams::default().validator_stake_threshold); + let tokens: Vec<_> = (0..size.end) + .map(|ix| { + if ix == 0 { + // Make sure that at least one validator has at least a stake + // greater or equal to the threshold to avoid having an empty + // consensus set. + threshold.raw_amount().as_u64()..=10_000_000_u64 + } else { + 1..=10_000_000_u64 + } + .prop_map(token::Amount::from) + }) + .collect(); + (size, tokens) + .prop_map(|(size, token_amounts)| { + // use unique seeds to generate validators' address and consensus + // key + let seeds = (0_u64..).take(size); + seeds + .zip(token_amounts) + .map(|(seed, tokens)| { + let address = address_from_simple_seed(seed); + let consensus_sk = common_sk_from_simple_seed(seed); + let consensus_key = consensus_sk.to_public(); + + let protocol_sk = common_sk_from_simple_seed(seed); + let protocol_key = protocol_sk.to_public(); + + let eth_hot_key = key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::( + ) + .ref_to(), + ); + let eth_cold_key = key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::( + ) + .ref_to(), + ); + + let commission_rate = Dec::new(5, 2).expect("Test failed"); + let max_commission_rate_change = + Dec::new(1, 2).expect("Test failed"); + GenesisValidator { + address, + tokens, + consensus_key, + protocol_key, + eth_hot_key, + eth_cold_key, + commission_rate, + max_commission_rate_change, + metadata: Default::default(), + } + }) + .collect() + }) + .prop_filter( + "Must have at least one genesis validator with stake above the \ + provided threshold, if any.", + move |gen_vals: &Vec| { + gen_vals.iter().any(|val| val.tokens >= threshold) + }, + ) +} + +pub fn arb_redelegation_amounts( + max_delegation: u64, +) -> impl Strategy { + let arb_delegation = arb_amount_non_zero_ceiled(max_delegation); + let amounts = arb_delegation.prop_flat_map(move |amount_delegate| { + let amount_redelegate = arb_amount_non_zero_ceiled(max( + 1, + u64::try_from(amount_delegate.raw_amount()).unwrap() - 1, + )); + (Just(amount_delegate), amount_redelegate) + }); + amounts.prop_flat_map(move |(amount_delegate, amount_redelegate)| { + let amount_unbond = arb_amount_non_zero_ceiled(max( + 1, + u64::try_from(amount_redelegate.raw_amount()).unwrap() - 1, + )); + ( + Just(amount_delegate), + Just(amount_redelegate), + amount_unbond, + ) + }) +} diff --git a/proof_of_stake/src/tests/mod.rs b/proof_of_stake/src/tests/mod.rs new file mode 100644 index 0000000000..86cb3d6ca1 --- /dev/null +++ b/proof_of_stake/src/tests/mod.rs @@ -0,0 +1,8 @@ +mod helpers; +mod state_machine; +mod state_machine_v2; +mod test_helper_fns; +mod test_pos; +mod test_slash_and_redel; +mod test_validator; +mod utils; diff --git a/proof_of_stake/src/tests/state_machine.rs b/proof_of_stake/src/tests/state_machine.rs index 781d276105..1d07d6465b 100644 --- a/proof_of_stake/src/tests/state_machine.rs +++ b/proof_of_stake/src/tests/state_machine.rs @@ -29,14 +29,20 @@ use test_log::test; use crate::parameters::testing::arb_rate; use crate::parameters::PosParams; -use crate::tests::arb_params_and_genesis_validators; +use crate::storage::{ + enqueued_slashes_handle, read_all_validator_addresses, + read_below_capacity_validator_set_addresses, + read_below_capacity_validator_set_addresses_with_stake, + read_below_threshold_validator_set_addresses, + read_consensus_validator_set_addresses_with_stake, +}; +use crate::tests::helpers::{advance_epoch, arb_params_and_genesis_validators}; use crate::types::{ BondId, EagerRedelegatedBondsMap, GenesisValidator, ReverseOrdTokenAmount, Slash, SlashType, ValidatorState, WeightedValidator, }; use crate::{ below_capacity_validator_set_handle, consensus_validator_set_handle, - enqueued_slashes_handle, read_below_threshold_validator_set_addresses, read_pos_params, redelegate_tokens, validator_deltas_handle, validator_slashes_handle, validator_state_handle, BondsForRemovalRes, EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, @@ -243,11 +249,12 @@ impl StateMachineTest for ConcretePosState { match transition { Transition::NextEpoch => { tracing::debug!("\nCONCRETE Next epoch"); - super::advance_epoch(&mut state.s, ¶ms); + advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing let current_epoch = state.s.storage.block.epoch; - super::process_slashes(&mut state.s, current_epoch).unwrap(); + crate::slashing::process_slashes(&mut state.s, current_epoch) + .unwrap(); let params = read_pos_params(&state.s).unwrap(); state.check_next_epoch_post_conditions(¶ms); @@ -264,9 +271,9 @@ impl StateMachineTest for ConcretePosState { tracing::debug!("\nCONCRETE Init validator"); let current_epoch = state.current_epoch(); - super::become_validator( + crate::become_validator( &mut state.s, - super::BecomeValidator { + crate::BecomeValidator { params: ¶ms, address: &address, consensus_key: &consensus_key, @@ -336,7 +343,7 @@ impl StateMachineTest for ConcretePosState { ); // Apply the bond - super::bond_tokens( + crate::bond_tokens( &mut state.s, Some(&id.source), &id.validator, @@ -403,7 +410,7 @@ impl StateMachineTest for ConcretePosState { .unwrap(); // Apply the unbond - super::unbond_tokens( + crate::unbond_tokens( &mut state.s, Some(&id.source), &id.validator, @@ -454,7 +461,7 @@ impl StateMachineTest for ConcretePosState { .unwrap(); // Apply the withdrawal - let withdrawn = super::withdraw_tokens( + let withdrawn = crate::withdraw_tokens( &mut state.s, Some(&source), &validator, @@ -547,9 +554,10 @@ impl StateMachineTest for ConcretePosState { .unwrap(); // Find delegations - let delegations_pre = - crate::find_delegations(&state.s, &id.source, &pipeline) - .unwrap(); + let delegations_pre = crate::queries::find_delegations( + &state.s, &id.source, &pipeline, + ) + .unwrap(); // Apply redelegation let result = redelegate_tokens( @@ -668,7 +676,7 @@ impl StateMachineTest for ConcretePosState { // updated with redelegation. For the source reduced by the // redelegation amount and for the destination increased by // the redelegation amount, less any slashes. - let delegations_post = crate::find_delegations( + let delegations_post = crate::queries::find_delegations( &state.s, &id.source, &pipeline, ) .unwrap(); @@ -707,7 +715,7 @@ impl StateMachineTest for ConcretePosState { tracing::debug!("\nCONCRETE Misbehavior"); let current_epoch = state.current_epoch(); // Record the slash evidence - super::slash( + crate::slashing::slash( &mut state.s, ¶ms, current_epoch, @@ -736,7 +744,7 @@ impl StateMachineTest for ConcretePosState { let current_epoch = state.current_epoch(); // Unjail the validator - super::unjail_validator(&mut state.s, &address, current_epoch) + crate::unjail_validator(&mut state.s, &address, current_epoch) .unwrap(); // Post-conditions @@ -769,13 +777,13 @@ impl ConcretePosState { // Post-condition: Consensus validator sets at pipeline offset // must be the same as at the epoch before it. let consensus_set_before_pipeline = - crate::read_consensus_validator_set_addresses_with_stake( + read_consensus_validator_set_addresses_with_stake( &self.s, before_pipeline, ) .unwrap(); let consensus_set_at_pipeline = - crate::read_consensus_validator_set_addresses_with_stake( + read_consensus_validator_set_addresses_with_stake( &self.s, pipeline, ) .unwrap(); @@ -787,13 +795,13 @@ impl ConcretePosState { // Post-condition: Below-capacity validator sets at pipeline // offset must be the same as at the epoch before it. let below_cap_before_pipeline = - crate::read_below_capacity_validator_set_addresses_with_stake( + read_below_capacity_validator_set_addresses_with_stake( &self.s, before_pipeline, ) .unwrap(); let below_cap_at_pipeline = - crate::read_below_capacity_validator_set_addresses_with_stake( + read_below_capacity_validator_set_addresses_with_stake( &self.s, pipeline, ) .unwrap(); @@ -846,7 +854,7 @@ impl ConcretePosState { ) { let pipeline = submit_epoch + params.pipeline_len; - let cur_stake = super::read_validator_stake( + let cur_stake = crate::read_validator_stake( &self.s, params, &id.validator, @@ -858,7 +866,7 @@ impl ConcretePosState { // change assert_eq!(cur_stake, validator_stake_before_bond_cur); - let stake_at_pipeline = super::read_validator_stake( + let stake_at_pipeline = crate::read_validator_stake( &self.s, params, &id.validator, @@ -915,7 +923,7 @@ impl ConcretePosState { ) { let pipeline = submit_epoch + params.pipeline_len; - let cur_stake = super::read_validator_stake( + let cur_stake = crate::read_validator_stake( &self.s, params, &id.validator, @@ -927,7 +935,7 @@ impl ConcretePosState { // change assert_eq!(cur_stake, validator_stake_before_unbond_cur); - let stake_at_pipeline = super::read_validator_stake( + let stake_at_pipeline = crate::read_validator_stake( &self.s, params, &id.validator, @@ -1171,21 +1179,18 @@ impl ConcretePosState { || (num_occurrences == 0 && validator_is_jailed) ); - let consensus_set = - crate::read_consensus_validator_set_addresses_with_stake( - &self.s, pipeline, - ) - .unwrap(); + let consensus_set = read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); let below_cap_set = - crate::read_below_capacity_validator_set_addresses_with_stake( + read_below_capacity_validator_set_addresses_with_stake( &self.s, pipeline, ) .unwrap(); let below_thresh_set = - crate::read_below_threshold_validator_set_addresses( - &self.s, pipeline, - ) - .unwrap(); + read_below_threshold_validator_set_addresses(&self.s, pipeline) + .unwrap(); let weighted = WeightedValidator { bonded_stake: stake_at_pipeline, address: id.validator, @@ -1310,21 +1315,17 @@ impl ConcretePosState { .contains(address) ); assert!( - !crate::read_below_capacity_validator_set_addresses( - &self.s, epoch - ) - .unwrap() - .contains(address) + !read_below_capacity_validator_set_addresses(&self.s, epoch) + .unwrap() + .contains(address) ); assert!( - !crate::read_below_threshold_validator_set_addresses( - &self.s, epoch - ) - .unwrap() - .contains(address) + !read_below_threshold_validator_set_addresses(&self.s, epoch) + .unwrap() + .contains(address) ); assert!( - !crate::read_all_validator_addresses(&self.s, epoch) + !read_all_validator_addresses(&self.s, epoch) .unwrap() .contains(address) ); @@ -1333,17 +1334,14 @@ impl ConcretePosState { crate::read_consensus_validator_set_addresses(&self.s, pipeline) .unwrap() .contains(address); - let in_bc = crate::read_below_capacity_validator_set_addresses( - &self.s, pipeline, - ) - .unwrap() - .contains(address); + let in_bc = + read_below_capacity_validator_set_addresses(&self.s, pipeline) + .unwrap() + .contains(address); let in_below_thresh = - crate::read_below_threshold_validator_set_addresses( - &self.s, pipeline, - ) - .unwrap() - .contains(address); + read_below_threshold_validator_set_addresses(&self.s, pipeline) + .unwrap() + .contains(address); assert!(in_below_thresh && !in_consensus && !in_bc); } @@ -1410,7 +1408,7 @@ impl ConcretePosState { let abs_enqueued = ref_state.enqueued_slashes.clone(); let mut conc_enqueued: BTreeMap>> = BTreeMap::new(); - crate::enqueued_slashes_handle() + enqueued_slashes_handle() .get_data_handler() .iter(&self.s) .unwrap() @@ -1764,7 +1762,7 @@ impl ConcretePosState { for WeightedValidator { bonded_stake, address: validator, - } in crate::read_consensus_validator_set_addresses_with_stake( + } in read_consensus_validator_set_addresses_with_stake( &self.s, epoch, ) .unwrap() @@ -1821,11 +1819,10 @@ impl ConcretePosState { for WeightedValidator { bonded_stake, address: validator, - } in - crate::read_below_capacity_validator_set_addresses_with_stake( - &self.s, epoch, - ) - .unwrap() + } in read_below_capacity_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() { let deltas_stake = validator_deltas_handle(&validator) .get_sum(&self.s, epoch, params) @@ -1873,10 +1870,8 @@ impl ConcretePosState { } for validator in - crate::read_below_threshold_validator_set_addresses( - &self.s, epoch, - ) - .unwrap() + read_below_threshold_validator_set_addresses(&self.s, epoch) + .unwrap() { let stake = crate::read_validator_stake( &self.s, params, &validator, epoch, @@ -1921,7 +1916,7 @@ impl ConcretePosState { // Jailed validators not in a set let all_validators = - crate::read_all_validator_addresses(&self.s, epoch).unwrap(); + read_all_validator_addresses(&self.s, epoch).unwrap(); for validator in all_validators { let state = validator_state_handle(&validator) diff --git a/proof_of_stake/src/tests/state_machine_v2.rs b/proof_of_stake/src/tests/state_machine_v2.rs index cf59a59238..c75d629995 100644 --- a/proof_of_stake/src/tests/state_machine_v2.rs +++ b/proof_of_stake/src/tests/state_machine_v2.rs @@ -29,10 +29,20 @@ use proptest_state_machine::{ use test_log::test; use yansi::Paint; +use super::helpers::advance_epoch; use super::utils::DbgPrintDiff; use crate::parameters::testing::arb_rate; use crate::parameters::PosParams; -use crate::tests::arb_params_and_genesis_validators; +use crate::queries::find_delegations; +use crate::slashing::find_slashes_in_range; +use crate::storage::{ + enqueued_slashes_handle, read_all_validator_addresses, + read_below_capacity_validator_set_addresses, + read_below_capacity_validator_set_addresses_with_stake, + read_below_threshold_validator_set_addresses, + read_consensus_validator_set_addresses_with_stake, +}; +use crate::tests::helpers::arb_params_and_genesis_validators; use crate::tests::utils::pause_for_enter; use crate::types::{ BondId, GenesisValidator, ReverseOrdTokenAmount, Slash, SlashType, @@ -41,10 +51,8 @@ use crate::types::{ use crate::{ below_capacity_validator_set_handle, bond_handle, consensus_validator_set_handle, delegator_redelegated_bonds_handle, - enqueued_slashes_handle, find_slashes_in_range, - read_below_threshold_validator_set_addresses, read_pos_params, - redelegate_tokens, validator_deltas_handle, validator_slashes_handle, - validator_state_handle, RedelegationError, + read_pos_params, redelegate_tokens, validator_deltas_handle, + validator_slashes_handle, validator_state_handle, RedelegationError, }; prop_state_machine! { @@ -1971,11 +1979,12 @@ impl StateMachineTest for ConcretePosState { match transition { Transition::NextEpoch => { tracing::debug!("\nCONCRETE Next epoch"); - super::advance_epoch(&mut state.s, ¶ms); + advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing let current_epoch = state.s.storage.block.epoch; - super::process_slashes(&mut state.s, current_epoch).unwrap(); + crate::slashing::process_slashes(&mut state.s, current_epoch) + .unwrap(); let params = read_pos_params(&state.s).unwrap(); state.check_next_epoch_post_conditions(¶ms); @@ -1992,9 +2001,9 @@ impl StateMachineTest for ConcretePosState { tracing::debug!("\nCONCRETE Init validator"); let current_epoch = state.current_epoch(); - super::become_validator( + crate::become_validator( &mut state.s, - super::BecomeValidator { + crate::BecomeValidator { params: ¶ms, address: &address, consensus_key: &consensus_key, @@ -2064,7 +2073,7 @@ impl StateMachineTest for ConcretePosState { ); // Apply the bond - super::bond_tokens( + crate::bond_tokens( &mut state.s, Some(&id.source), &id.validator, @@ -2129,7 +2138,7 @@ impl StateMachineTest for ConcretePosState { .unwrap(); // Apply the unbond - super::unbond_tokens( + crate::unbond_tokens( &mut state.s, Some(&id.source), &id.validator, @@ -2205,7 +2214,7 @@ impl StateMachineTest for ConcretePosState { // .unwrap(); // Apply the withdrawal - let withdrawn = super::withdraw_tokens( + let withdrawn = crate::withdraw_tokens( &mut state.s, Some(&source), &validator, @@ -2454,8 +2463,7 @@ impl StateMachineTest for ConcretePosState { // Find delegations let delegations_pre = - crate::find_delegations(&state.s, &id.source, &pipeline) - .unwrap(); + find_delegations(&state.s, &id.source, &pipeline).unwrap(); // Apply redelegation let result = redelegate_tokens( @@ -2607,10 +2615,9 @@ impl StateMachineTest for ConcretePosState { // updated with redelegation. For the source reduced by the // redelegation amount and for the destination increased by // the redelegation amount, less any slashes. - let delegations_post = crate::find_delegations( - &state.s, &id.source, &pipeline, - ) - .unwrap(); + let delegations_post = + find_delegations(&state.s, &id.source, &pipeline) + .unwrap(); let src_delegation_pre = delegations_pre .get(&id.validator) .cloned() @@ -2663,7 +2670,7 @@ impl StateMachineTest for ConcretePosState { tracing::debug!("\nCONCRETE Misbehavior"); let current_epoch = state.current_epoch(); // Record the slash evidence - super::slash( + crate::slashing::slash( &mut state.s, ¶ms, current_epoch, @@ -2692,7 +2699,7 @@ impl StateMachineTest for ConcretePosState { let current_epoch = state.current_epoch(); // Unjail the validator - super::unjail_validator(&mut state.s, &address, current_epoch) + crate::unjail_validator(&mut state.s, &address, current_epoch) .unwrap(); // Post-conditions @@ -2725,13 +2732,13 @@ impl ConcretePosState { // Post-condition: Consensus validator sets at pipeline offset // must be the same as at the epoch before it. let consensus_set_before_pipeline = - crate::read_consensus_validator_set_addresses_with_stake( + read_consensus_validator_set_addresses_with_stake( &self.s, before_pipeline, ) .unwrap(); let consensus_set_at_pipeline = - crate::read_consensus_validator_set_addresses_with_stake( + read_consensus_validator_set_addresses_with_stake( &self.s, pipeline, ) .unwrap(); @@ -2743,13 +2750,13 @@ impl ConcretePosState { // Post-condition: Below-capacity validator sets at pipeline // offset must be the same as at the epoch before it. let below_cap_before_pipeline = - crate::read_below_capacity_validator_set_addresses_with_stake( + read_below_capacity_validator_set_addresses_with_stake( &self.s, before_pipeline, ) .unwrap(); let below_cap_at_pipeline = - crate::read_below_capacity_validator_set_addresses_with_stake( + read_below_capacity_validator_set_addresses_with_stake( &self.s, pipeline, ) .unwrap(); @@ -2802,7 +2809,7 @@ impl ConcretePosState { ) { let pipeline = submit_epoch + params.pipeline_len; - let cur_stake = super::read_validator_stake( + let cur_stake = crate::read_validator_stake( &self.s, params, &id.validator, @@ -2814,7 +2821,7 @@ impl ConcretePosState { // change assert_eq!(cur_stake, validator_stake_before_bond_cur); - let stake_at_pipeline = super::read_validator_stake( + let stake_at_pipeline = crate::read_validator_stake( &self.s, params, &id.validator, @@ -2848,7 +2855,7 @@ impl ConcretePosState { ) { let pipeline = submit_epoch + params.pipeline_len; - let cur_stake = super::read_validator_stake( + let cur_stake = crate::read_validator_stake( &self.s, params, &id.validator, @@ -2860,7 +2867,7 @@ impl ConcretePosState { // change assert_eq!(cur_stake, validator_stake_before_unbond_cur); - let stake_at_pipeline = super::read_validator_stake( + let stake_at_pipeline = crate::read_validator_stake( &self.s, params, &id.validator, @@ -2938,21 +2945,18 @@ impl ConcretePosState { || (num_occurrences == 0 && validator_is_jailed) ); - let consensus_set = - crate::read_consensus_validator_set_addresses_with_stake( - &self.s, pipeline, - ) - .unwrap(); + let consensus_set = read_consensus_validator_set_addresses_with_stake( + &self.s, pipeline, + ) + .unwrap(); let below_cap_set = - crate::read_below_capacity_validator_set_addresses_with_stake( + read_below_capacity_validator_set_addresses_with_stake( &self.s, pipeline, ) .unwrap(); let below_thresh_set = - crate::read_below_threshold_validator_set_addresses( - &self.s, pipeline, - ) - .unwrap(); + read_below_threshold_validator_set_addresses(&self.s, pipeline) + .unwrap(); let weighted = WeightedValidator { bonded_stake: stake_at_pipeline, address: id.validator, @@ -3015,21 +3019,17 @@ impl ConcretePosState { .contains(address) ); assert!( - !crate::read_below_capacity_validator_set_addresses( - &self.s, epoch - ) - .unwrap() - .contains(address) + !read_below_capacity_validator_set_addresses(&self.s, epoch) + .unwrap() + .contains(address) ); assert!( - !crate::read_below_threshold_validator_set_addresses( - &self.s, epoch - ) - .unwrap() - .contains(address) + !read_below_threshold_validator_set_addresses(&self.s, epoch) + .unwrap() + .contains(address) ); assert!( - !crate::read_all_validator_addresses(&self.s, epoch) + !read_all_validator_addresses(&self.s, epoch) .unwrap() .contains(address) ); @@ -3038,17 +3038,14 @@ impl ConcretePosState { crate::read_consensus_validator_set_addresses(&self.s, pipeline) .unwrap() .contains(address); - let in_bc = crate::read_below_capacity_validator_set_addresses( - &self.s, pipeline, - ) - .unwrap() - .contains(address); + let in_bc = + read_below_capacity_validator_set_addresses(&self.s, pipeline) + .unwrap() + .contains(address); let in_below_thresh = - crate::read_below_threshold_validator_set_addresses( - &self.s, pipeline, - ) - .unwrap() - .contains(address); + read_below_threshold_validator_set_addresses(&self.s, pipeline) + .unwrap() + .contains(address); assert!(in_below_thresh && !in_consensus && !in_bc); } @@ -3204,7 +3201,7 @@ impl ConcretePosState { for WeightedValidator { bonded_stake, address: validator, - } in crate::read_consensus_validator_set_addresses_with_stake( + } in read_consensus_validator_set_addresses_with_stake( &self.s, epoch, ) .unwrap() @@ -3274,11 +3271,10 @@ impl ConcretePosState { for WeightedValidator { bonded_stake, address: validator, - } in - crate::read_below_capacity_validator_set_addresses_with_stake( - &self.s, epoch, - ) - .unwrap() + } in read_below_capacity_validator_set_addresses_with_stake( + &self.s, epoch, + ) + .unwrap() { let deltas_stake = validator_deltas_handle(&validator) .get_sum(&self.s, epoch, params) @@ -3359,10 +3355,8 @@ impl ConcretePosState { } for validator in - crate::read_below_threshold_validator_set_addresses( - &self.s, epoch, - ) - .unwrap() + read_below_threshold_validator_set_addresses(&self.s, epoch) + .unwrap() { let conc_stake = validator_deltas_handle(&validator) .get_sum(&self.s, epoch, params) @@ -3425,7 +3419,7 @@ impl ConcretePosState { // Jailed validators not in a set let all_validators = - crate::read_all_validator_addresses(&self.s, epoch).unwrap(); + read_all_validator_addresses(&self.s, epoch).unwrap(); for val in all_validators { let state = validator_state_handle(&val) diff --git a/proof_of_stake/src/tests/test_helper_fns.rs b/proof_of_stake/src/tests/test_helper_fns.rs new file mode 100644 index 0000000000..594965fe43 --- /dev/null +++ b/proof_of_stake/src/tests/test_helper_fns.rs @@ -0,0 +1,2034 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::NestedMap; +use namada_core::ledger::storage_api::collections::LazyCollection; +use namada_core::types::address::testing::{ + established_address_1, established_address_2, established_address_3, +}; +use namada_core::types::dec::Dec; +use namada_core::types::storage::{Epoch, Key}; +use namada_core::types::token; + +use crate::slashing::{ + apply_list_slashes, compute_amount_after_slashing_unbond, + compute_amount_after_slashing_withdraw, compute_bond_at_epoch, + compute_slash_bond_at_epoch, compute_slashable_amount, slash_redelegation, + slash_validator, slash_validator_redelegation, +}; +use crate::storage::{ + bond_handle, delegator_redelegated_bonds_handle, total_bonded_handle, + total_unbonded_handle, validator_outgoing_redelegations_handle, + validator_slashes_handle, validator_total_redelegated_bonded_handle, + validator_total_redelegated_unbonded_handle, write_pos_params, +}; +use crate::types::{ + EagerRedelegatedBondsMap, RedelegatedTokens, Slash, SlashType, +}; +use crate::{ + compute_modified_redelegation, compute_new_redelegated_unbonds, + find_bonds_to_remove, fold_and_slash_redelegated_bonds, + EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, + OwnedPosParams, +}; + +/// `iterateBondsUpToAmountTest` +#[test] +fn test_find_bonds_to_remove() { + let mut storage = TestWlStorage::default(); + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, &OwnedPosParams::default()).unwrap(); + + let source = established_address_1(); + let validator = established_address_2(); + let bond_handle = bond_handle(&source, &validator); + + let (e1, e2, e6) = (Epoch(1), Epoch(2), Epoch(6)); + + bond_handle + .set(&mut storage, token::Amount::from(5), e1, 0) + .unwrap(); + bond_handle + .set(&mut storage, token::Amount::from(3), e2, 0) + .unwrap(); + bond_handle + .set(&mut storage, token::Amount::from(8), e6, 0) + .unwrap(); + + // Test 1 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(8), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6].into_iter().collect::>() + ); + assert!(bonds_for_removal.new_entry.is_none()); + + // Test 2 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(10), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6].into_iter().collect::>() + ); + assert_eq!( + bonds_for_removal.new_entry, + Some((Epoch(2), token::Amount::from(1))) + ); + + // Test 3 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(11), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6, e2].into_iter().collect::>() + ); + assert!(bonds_for_removal.new_entry.is_none()); + + // Test 4 + let bonds_for_removal = find_bonds_to_remove( + &storage, + &bond_handle.get_data_handler(), + token::Amount::from(12), + ) + .unwrap(); + assert_eq!( + bonds_for_removal.epochs, + vec![e6, e2].into_iter().collect::>() + ); + assert_eq!( + bonds_for_removal.new_entry, + Some((Epoch(1), token::Amount::from(4))) + ); +} + +/// `computeModifiedRedelegationTest` +#[test] +fn test_compute_modified_redelegation() { + let mut storage = TestWlStorage::default(); + let validator1 = established_address_1(); + let validator2 = established_address_2(); + let owner = established_address_3(); + let outer_epoch = Epoch(0); + + let mut alice = validator1.clone(); + let mut bob = validator2.clone(); + + // Ensure a ranking order of alice > bob + if bob > alice { + alice = validator2; + bob = validator1; + } + println!("\n\nalice = {}\nbob = {}\n", &alice, &bob); + + // Fill redelegated bonds in storage + let redelegated_bonds_map = delegator_redelegated_bonds_handle(&owner) + .at(&alice) + .at(&outer_epoch); + redelegated_bonds_map + .at(&alice) + .insert(&mut storage, Epoch(2), token::Amount::from(6)) + .unwrap(); + redelegated_bonds_map + .at(&alice) + .insert(&mut storage, Epoch(4), token::Amount::from(7)) + .unwrap(); + redelegated_bonds_map + .at(&bob) + .insert(&mut storage, Epoch(1), token::Amount::from(5)) + .unwrap(); + redelegated_bonds_map + .at(&bob) + .insert(&mut storage, Epoch(4), token::Amount::from(7)) + .unwrap(); + + // Test cases 1 and 2 + let mr1 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(25), + ) + .unwrap(); + let mr2 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(30), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + ..Default::default() + }; + + assert_eq!(mr1, exp_mr); + assert_eq!(mr2, exp_mr); + + // Test case 3 + let mr3 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(7), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + ..Default::default() + }; + assert_eq!(mr3, exp_mr); + + // Test case 4 + let mr4 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + token::Amount::from(8), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(1), Epoch(4)]), + epoch_to_modify: Some(Epoch(1)), + new_amount: Some(4.into()), + }; + assert_eq!(mr4, exp_mr); + + // Test case 5 + let mr5 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 12.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([bob.clone()]), + ..Default::default() + }; + assert_eq!(mr5, exp_mr); + + // Test case 6 + let mr6 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 14.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(alice.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + epoch_to_modify: Some(Epoch(4)), + new_amount: Some(5.into()), + }; + assert_eq!(mr6, exp_mr); + + // Test case 7 + let mr7 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 19.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(alice.clone()), + epochs_to_remove: BTreeSet::from_iter([Epoch(4)]), + ..Default::default() + }; + assert_eq!(mr7, exp_mr); + + // Test case 8 + let mr8 = compute_modified_redelegation( + &storage, + &redelegated_bonds_map, + Epoch(5), + 21.into(), + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(5)), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob]), + validator_to_modify: Some(alice), + epochs_to_remove: BTreeSet::from_iter([Epoch(2), Epoch(4)]), + epoch_to_modify: Some(Epoch(2)), + new_amount: Some(4.into()), + }; + assert_eq!(mr8, exp_mr); +} + +/// `computeBondAtEpochTest` +#[test] +fn test_compute_bond_at_epoch() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + pipeline_len: 2, + unbonding_len: 4, + cubic_slashing_window_length: 1, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + // Test 1 + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&Default::default()), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 2 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&Default::default()), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 3 + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let mut redel_bonds = EagerRedelegatedBondsMap::default(); + redel_bonds.insert( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(5))]), + ); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 4 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 5 + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push( + &mut storage, + Slash { + epoch: 6.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 12.into(), + 3.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 23.into()); + + // Test 6 + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push( + &mut storage, + Slash { + epoch: 4.into(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_bond_at_epoch( + &storage, + ¶ms, + &bob, + 18.into(), + 9.into(), + 23.into(), + Some(&redel_bonds), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 18.into()); +} + +/// `computeSlashBondAtEpochTest` +#[test] +fn test_compute_slash_bond_at_epoch() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + pipeline_len: 2, + unbonding_len: 4, + cubic_slashing_window_length: 1, + ..Default::default() + }; + let alice = established_address_1(); + let bob = established_address_2(); + + let current_epoch = Epoch(20); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + + let redelegated_bond = BTreeMap::from_iter([( + alice, + BTreeMap::from_iter([(infraction_epoch - 4, token::Amount::from(10))]), + )]); + + // Test 1 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&Default::default()), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 30.into()); + + // Test 2 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&redelegated_bond), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 30.into()); + + // Test 3 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: infraction_epoch.prev(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&Default::default()), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); + + // Test 4 + let res = compute_slash_bond_at_epoch( + &storage, + ¶ms, + &bob, + current_epoch.next(), + infraction_epoch, + infraction_epoch - 2, + 30.into(), + Some(&redelegated_bond), + Dec::one(), + ) + .unwrap(); + + pretty_assertions::assert_eq!(res, 0.into()); +} + +/// `computeNewRedelegatedUnbondsTest` +#[test] +fn test_compute_new_redelegated_unbonds() { + let mut storage = TestWlStorage::default(); + let alice = established_address_1(); + let bob = established_address_2(); + + let key = Key::parse("testing").unwrap(); + let redelegated_bonds = NestedMap::::open(key); + + // Populate the lazy and eager maps + let (ep1, ep2, ep4, ep5, ep6, ep7) = + (Epoch(1), Epoch(2), Epoch(4), Epoch(5), Epoch(6), Epoch(7)); + let keys_and_values = vec![ + (ep5, alice.clone(), ep2, 1), + (ep5, alice.clone(), ep4, 1), + (ep7, alice.clone(), ep2, 1), + (ep7, alice.clone(), ep4, 1), + (ep5, bob.clone(), ep1, 1), + (ep5, bob.clone(), ep4, 2), + (ep7, bob.clone(), ep1, 1), + (ep7, bob.clone(), ep4, 2), + ]; + let mut eager_map = BTreeMap::::new(); + for (outer_ep, address, inner_ep, amount) in keys_and_values { + redelegated_bonds + .at(&outer_ep) + .at(&address) + .insert(&mut storage, inner_ep, token::Amount::from(amount)) + .unwrap(); + eager_map + .entry(outer_ep) + .or_default() + .entry(address.clone()) + .or_default() + .insert(inner_ep, token::Amount::from(amount)); + } + + // Different ModifiedRedelegation objects for testing + let empty_mr = ModifiedRedelegation::default(); + let all_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: None, + epochs_to_remove: Default::default(), + epoch_to_modify: None, + new_amount: None, + }; + let mod_val_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone()]), + validator_to_modify: None, + epochs_to_remove: Default::default(), + epoch_to_modify: None, + new_amount: None, + }; + let mod_val_partial_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice.clone(), bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([ep1]), + epoch_to_modify: None, + new_amount: None, + }; + let mod_epoch_partial_mr = ModifiedRedelegation { + epoch: Some(ep7), + validators_to_remove: BTreeSet::from_iter([alice, bob.clone()]), + validator_to_modify: Some(bob.clone()), + epochs_to_remove: BTreeSet::from_iter([ep1, ep4]), + epoch_to_modify: Some(ep4), + new_amount: Some(token::Amount::from(1)), + }; + + // Test case 1 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &Default::default(), + &empty_mr, + ) + .unwrap(); + assert_eq!(res, Default::default()); + + let set5 = BTreeSet::::from_iter([ep5]); + let set56 = BTreeSet::::from_iter([ep5, ep6]); + + // Test case 2 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set5, + &empty_mr, + ) + .unwrap(); + let mut exp_res = eager_map.clone(); + exp_res.remove(&ep7); + assert_eq!(res, exp_res); + + // Test case 3 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &empty_mr, + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 4 + println!("\nTEST CASE 4\n"); + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &all_mr, + ) + .unwrap(); + assert_eq!(res, eager_map); + + // Test case 5 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_val_mr, + ) + .unwrap(); + exp_res = eager_map.clone(); + exp_res.entry(ep7).or_default().remove(&bob); + assert_eq!(res, exp_res); + + // Test case 6 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_val_partial_mr, + ) + .unwrap(); + exp_res = eager_map.clone(); + exp_res + .entry(ep7) + .or_default() + .entry(bob.clone()) + .or_default() + .remove(&ep4); + assert_eq!(res, exp_res); + + // Test case 7 + let res = compute_new_redelegated_unbonds( + &storage, + &redelegated_bonds, + &set56, + &mod_epoch_partial_mr, + ) + .unwrap(); + exp_res + .entry(ep7) + .or_default() + .entry(bob) + .or_default() + .insert(ep4, token::Amount::from(1)); + assert_eq!(res, exp_res); +} + +/// `applyListSlashesTest` +#[test] +fn test_apply_list_slashes() { + let init_epoch = Epoch(2); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + // let unbonding_len = 4u64; + // let cubic_offset = 1u64; + + let slash1 = Slash { + epoch: init_epoch, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slash2 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length + + 1u64, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let list1 = vec![slash1.clone()]; + let list2 = vec![slash1.clone(), slash2.clone()]; + let list3 = vec![slash1.clone(), slash1.clone()]; + let list4 = vec![slash1.clone(), slash1, slash2]; + + let res = apply_list_slashes(¶ms, &[], token::Amount::from(100)); + assert_eq!(res, token::Amount::from(100)); + + let res = apply_list_slashes(¶ms, &list1, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list2, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list3, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); + + let res = apply_list_slashes(¶ms, &list4, token::Amount::from(100)); + assert_eq!(res, token::Amount::zero()); +} + +/// `computeSlashableAmountTest` +#[test] +fn test_compute_slashable_amount() { + let init_epoch = Epoch(2); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + let slash1 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let slash2 = Slash { + epoch: init_epoch + + params.unbonding_len + + params.cubic_slashing_window_length + + 1u64, + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let test_map = vec![(init_epoch, token::Amount::from(50))] + .into_iter() + .collect::>(); + + let res = compute_slashable_amount( + ¶ms, + &slash1, + token::Amount::from(100), + &BTreeMap::new(), + ); + assert_eq!(res, token::Amount::from(100)); + + let res = compute_slashable_amount( + ¶ms, + &slash2, + token::Amount::from(100), + &test_map, + ); + assert_eq!(res, token::Amount::from(50)); + + let res = compute_slashable_amount( + ¶ms, + &slash1, + token::Amount::from(100), + &test_map, + ); + assert_eq!(res, token::Amount::from(100)); +} + +/// `foldAndSlashRedelegatedBondsMapTest` +#[test] +fn test_fold_and_slash_redelegated_bonds() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let start_epoch = Epoch(7); + + let alice = established_address_1(); + let bob = established_address_2(); + + println!("\n\nAlice: {}", alice); + println!("Bob: {}\n", bob); + + let test_slash = Slash { + epoch: Default::default(), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + + let test_data = vec![ + (alice.clone(), vec![(2, 1), (4, 1)]), + (bob, vec![(1, 1), (4, 2)]), + ]; + let mut eager_redel_bonds = EagerRedelegatedBondsMap::default(); + for (address, pair) in test_data { + for (epoch, amount) in pair { + eager_redel_bonds + .entry(address.clone()) + .or_default() + .insert(Epoch(epoch), token::Amount::from(amount)); + } + } + + // Test case 1 + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::from(5), + } + ); + + // Test case 2 + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[test_slash], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::zero(), + } + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(6), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + + let res = fold_and_slash_redelegated_bonds( + &storage, + ¶ms, + &eager_redel_bonds, + start_epoch, + &[], + |_| true, + ); + assert_eq!( + res, + FoldRedelegatedBondsResult { + total_redelegated: token::Amount::from(5), + total_after_slashing: token::Amount::from(3), + } + ); +} + +/// `slashRedelegationTest` +#[test] +fn test_slash_redelegation() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let alice = established_address_1(); + + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&alice); + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + + let slashes = validator_slashes_handle(&alice); + + let mut slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::zero()), + (Epoch(16), token::Amount::zero()), + ]); + let empty_slash_amounts = slashed_amounts_map.clone(); + + // Test case 1 + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(5)), + (Epoch(16), token::Amount::from(5)), + ]) + ); + + // Test case 2 + slashed_amounts_map = empty_slash_amounts.clone(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(11), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(7)), + ]) + ); + + // Test case 3 + slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(2)), + (Epoch(16), token::Amount::from(3)), + ]); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(8)), + ]) + ); + + // Test case 4 + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slashed_amounts_map = empty_slash_amounts.clone(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 5 + slashes.pop(&mut storage).unwrap(); + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(9), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 6 + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_redelegation( + &storage, + ¶ms, + token::Amount::from(7), + Epoch(7), + Epoch(10), + &alice, + Epoch(14), + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); +} + +/// `slashValidatorRedelegationTest` +#[test] +fn test_slash_validator_redelegation() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, ¶ms).unwrap(); + + let alice = established_address_1(); + let bob = established_address_2(); + + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&alice); + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + + let outgoing_redelegations = + validator_outgoing_redelegations_handle(&alice).at(&bob); + + let slashes = validator_slashes_handle(&alice); + + let mut slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::zero()), + (Epoch(16), token::Amount::zero()), + ]); + let empty_slash_amounts = slashed_amounts_map.clone(); + + // Test case 1 + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 2 + total_redelegated_unbonded + .remove_all(&mut storage, &Epoch(13)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); + + // Test case 3 + total_redelegated_unbonded + .at(&Epoch(13)) + .at(&Epoch(10)) + .at(&alice) + .insert(&mut storage, Epoch(7), token::Amount::from(2)) + .unwrap(); + outgoing_redelegations + .at(&Epoch(6)) + .insert(&mut storage, Epoch(8), token::Amount::from(7)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(7)), + ]) + ); + + // Test case 4 + slashed_amounts_map = empty_slash_amounts.clone(); + outgoing_redelegations + .remove_all(&mut storage, &Epoch(6)) + .unwrap(); + outgoing_redelegations + .at(&Epoch(7)) + .insert(&mut storage, Epoch(8), token::Amount::from(7)) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(5)), + (Epoch(16), token::Amount::from(5)), + ]) + ); + + // Test case 5 + slashed_amounts_map = BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(2)), + (Epoch(16), token::Amount::from(3)), + ]); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!( + slashed_amounts_map, + BTreeMap::from_iter([ + (Epoch(15), token::Amount::from(7)), + (Epoch(16), token::Amount::from(8)), + ]) + ); + + // Test case 6 + slashed_amounts_map = empty_slash_amounts.clone(); + slashes + .push( + &mut storage, + Slash { + epoch: Epoch(8), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + slash_validator_redelegation( + &storage, + ¶ms, + &alice, + Epoch(14), + &outgoing_redelegations, + &slashes, + &total_redelegated_unbonded, + Dec::one(), + &mut slashed_amounts_map, + ) + .unwrap(); + assert_eq!(slashed_amounts_map, empty_slash_amounts); +} + +/// `slashValidatorTest` +#[test] +fn test_slash_validator() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, ¶ms).unwrap(); + + let alice = established_address_1(); + let bob = established_address_2(); + + let total_bonded = total_bonded_handle(&bob); + let total_unbonded = total_unbonded_handle(&bob); + let total_redelegated_bonded = + validator_total_redelegated_bonded_handle(&bob); + let total_redelegated_unbonded = + validator_total_redelegated_unbonded_handle(&bob); + + let infraction_stake = token::Amount::from(23); + + let initial_stakes = BTreeMap::from_iter([ + (Epoch(11), infraction_stake), + (Epoch(12), infraction_stake), + (Epoch(13), infraction_stake), + ]); + let mut exp_res = initial_stakes.clone(); + + let current_epoch = Epoch(10); + let infraction_epoch = + current_epoch - params.slash_processing_epoch_offset(); + let processing_epoch = current_epoch.next(); + let slash_rate = Dec::one(); + + // Test case 1 + total_bonded + .set(&mut storage, 23.into(), infraction_epoch - 2, 0) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 2 + total_bonded + .set(&mut storage, 17.into(), infraction_epoch - 2, 0) + .unwrap(); + total_unbonded + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 2, 6.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(12), 17.into()); + exp_res.insert(Epoch(13), 17.into()); + assert_eq!(res, exp_res); + + // Test case 3 + total_redelegated_bonded + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 5.into()) + .unwrap(); + total_redelegated_bonded + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(3), 1.into()) + .unwrap(); + + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 4 + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .remove(&mut storage, &(infraction_epoch - 2)) + .unwrap(); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 1, 6.into()) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 5.into()) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(3), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 5 + total_bonded_handle(&bob) + .set(&mut storage, 19.into(), infraction_epoch - 2, 0) + .unwrap(); + total_unbonded_handle(&bob) + .at(&(current_epoch + params.pipeline_len)) + .insert(&mut storage, infraction_epoch - 1, 4.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, Epoch(2), token::Amount::from(1)) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .remove(&mut storage, &Epoch(3)) + .unwrap(); + total_redelegated_unbonded + .at(&(current_epoch + params.pipeline_len)) + .at(&infraction_epoch.prev()) + .at(&alice) + .insert(&mut storage, Epoch(2), 4.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(12), 19.into()); + exp_res.insert(Epoch(13), 19.into()); + assert_eq!(res, exp_res); + + // Test case 6 + total_unbonded_handle(&bob) + .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) + .unwrap(); + total_redelegated_unbonded + .remove_all(&mut storage, &(current_epoch + params.pipeline_len)) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_bonded_handle(&bob) + .set(&mut storage, 23.into(), infraction_epoch - 2, 0) + .unwrap(); + total_bonded_handle(&bob) + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res = initial_stakes; + assert_eq!(res, exp_res); + + // Test case 7 + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch) + .unwrap(); + total_unbonded + .at(¤t_epoch.next()) + .insert(&mut storage, current_epoch, 6.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 8 + total_bonded + .get_data_handler() + .insert(&mut storage, current_epoch, 3.into()) + .unwrap(); + total_unbonded + .at(¤t_epoch.next()) + .insert(&mut storage, current_epoch, 3.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 9 + total_unbonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_bonded + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 5.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 10 + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 5.into()) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 11 + total_bonded + .set(&mut storage, 2.into(), current_epoch, 0) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 4.into()) + .unwrap(); + total_redelegated_unbonded + .at(¤t_epoch.next()) + .at(¤t_epoch) + .at(&alice) + .remove(&mut storage, &3.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 2.into(), 1.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 12 + total_bonded + .set(&mut storage, 6.into(), current_epoch, 0) + .unwrap(); + total_bonded + .set(&mut storage, 2.into(), current_epoch.next(), 0) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch.next()) + .at(&alice) + .insert(&mut storage, 2.into(), 1.into()) + .unwrap(); + total_redelegated_bonded + .at(¤t_epoch.next()) + .at(&alice) + .insert(&mut storage, 3.into(), 1.into()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + assert_eq!(res, exp_res); + + // Test case 13 + validator_slashes_handle(&bob) + .push( + &mut storage, + Slash { + epoch: infraction_epoch.prev(), + block_height: 0, + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }, + ) + .unwrap(); + total_redelegated_unbonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_bonded + .get_data_handler() + .remove(&mut storage, ¤t_epoch.next()) + .unwrap(); + total_redelegated_bonded + .remove_all(&mut storage, ¤t_epoch.next()) + .unwrap(); + let res = slash_validator( + &storage, + ¶ms, + &bob, + slash_rate, + processing_epoch, + &Default::default(), + ) + .unwrap(); + exp_res.insert(Epoch(11), 0.into()); + exp_res.insert(Epoch(12), 0.into()); + exp_res.insert(Epoch(13), 0.into()); + assert_eq!(res, exp_res); +} + +/// `computeAmountAfterSlashingUnbondTest` +#[test] +fn test_compute_amount_after_slashing_unbond() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Test data + let alice = established_address_1(); + let bob = established_address_2(); + let unbonds: BTreeMap = BTreeMap::from_iter([ + ((Epoch(2)), token::Amount::from(5)), + ((Epoch(4)), token::Amount::from(6)), + ]); + let redelegated_unbonds: EagerRedelegatedUnbonds = BTreeMap::from_iter([( + Epoch(2), + BTreeMap::from_iter([( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), + )]), + )]); + + // Test case 1 + let slashes = vec![]; + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 2 + let bob_slash = Slash { + epoch: Epoch(5), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![bob_slash.clone()]; + validator_slashes_handle(&bob) + .push(&mut storage, bob_slash) + .unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 0.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 0.into()), (4.into(), 0.into())], + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(0), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 4 + let alice_slash = Slash { + epoch: Epoch(1), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + let result = compute_amount_after_slashing_unbond( + &storage, + ¶ms, + &unbonds, + &redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 10.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 4.into()), (4.into(), 6.into())], + ); +} + +/// `computeAmountAfterSlashingWithdrawTest` +#[test] +fn test_compute_amount_after_slashing_withdraw() { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Test data + let alice = established_address_1(); + let bob = established_address_2(); + let unbonds_and_redelegated_unbonds: BTreeMap< + (Epoch, Epoch), + (token::Amount, EagerRedelegatedBondsMap), + > = BTreeMap::from_iter([ + ( + (Epoch(2), Epoch(20)), + ( + // unbond + token::Amount::from(5), + // redelegations + BTreeMap::from_iter([( + alice.clone(), + BTreeMap::from_iter([(Epoch(1), token::Amount::from(1))]), + )]), + ), + ), + ( + (Epoch(4), Epoch(20)), + ( + // unbond + token::Amount::from(6), + // redelegations + BTreeMap::default(), + ), + ), + ]); + + // Test case 1 + let slashes = vec![]; + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 2 + let bob_slash = Slash { + epoch: Epoch(5), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![bob_slash.clone()]; + validator_slashes_handle(&bob) + .push(&mut storage, bob_slash) + .unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 0.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 0.into()), (4.into(), 0.into())], + ); + + // Test case 3 + let alice_slash = Slash { + epoch: Epoch(0), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + validator_slashes_handle(&bob).pop(&mut storage).unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 11.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 5.into()), (4.into(), 6.into())], + ); + + // Test case 4 + let alice_slash = Slash { + epoch: Epoch(1), + block_height: Default::default(), + r#type: SlashType::DuplicateVote, + rate: Dec::one(), + }; + let slashes = vec![alice_slash.clone()]; + validator_slashes_handle(&alice).pop(&mut storage).unwrap(); + validator_slashes_handle(&alice) + .push(&mut storage, alice_slash) + .unwrap(); + let result = compute_amount_after_slashing_withdraw( + &storage, + ¶ms, + &unbonds_and_redelegated_unbonds, + slashes, + ) + .unwrap(); + assert_eq!(result.sum, 10.into()); + itertools::assert_equal( + result.epoch_map, + [(2.into(), 4.into()), (4.into(), 6.into())], + ); +} + +/// SM test case 1 from Brent +#[test] +fn test_from_sm_case_1() { + use namada_core::types::address::testing::established_address_4; + + let mut storage = TestWlStorage::default(); + let gov_params = namada_core::ledger::governance::parameters::GovernanceParameters::default(); + gov_params.init_storage(&mut storage).unwrap(); + write_pos_params(&mut storage, &OwnedPosParams::default()).unwrap(); + + let validator = established_address_1(); + let redeleg_src_1 = established_address_2(); + let redeleg_src_2 = established_address_3(); + let owner = established_address_4(); + let unbond_amount = token::Amount::from(3130688); + println!( + "Owner: {owner}\nValidator: {validator}\nRedeleg src 1: \ + {redeleg_src_1}\nRedeleg src 2: {redeleg_src_2}" + ); + + // Validator's incoming redelegations + let outer_epoch_1 = Epoch(27); + // from redeleg_src_1 + let epoch_1_redeleg_1 = token::Amount::from(8516); + // from redeleg_src_2 + let epoch_1_redeleg_2 = token::Amount::from(5704386); + let outer_epoch_2 = Epoch(30); + // from redeleg_src_2 + let epoch_2_redeleg_2 = token::Amount::from(1035191); + + // Insert the data - bonds and redelegated bonds + let bonds_handle = bond_handle(&owner, &validator); + bonds_handle + .add( + &mut storage, + epoch_1_redeleg_1 + epoch_1_redeleg_2, + outer_epoch_1, + 0, + ) + .unwrap(); + bonds_handle + .add(&mut storage, epoch_2_redeleg_2, outer_epoch_2, 0) + .unwrap(); + + let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_1); + redelegated_bonds_map_1 + .at(&redeleg_src_1) + .insert(&mut storage, Epoch(14), epoch_1_redeleg_1) + .unwrap(); + redelegated_bonds_map_1 + .at(&redeleg_src_2) + .insert(&mut storage, Epoch(18), epoch_1_redeleg_2) + .unwrap(); + let redelegated_bonds_map_1 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_1); + + let redelegated_bonds_map_2 = delegator_redelegated_bonds_handle(&owner) + .at(&validator) + .at(&outer_epoch_2); + redelegated_bonds_map_2 + .at(&redeleg_src_2) + .insert(&mut storage, Epoch(18), epoch_2_redeleg_2) + .unwrap(); + + // Find the modified redelegation the same way as `unbond_tokens` + let bonds_to_unbond = find_bonds_to_remove( + &storage, + &bonds_handle.get_data_handler(), + unbond_amount, + ) + .unwrap(); + dbg!(&bonds_to_unbond); + + let (new_entry_epoch, new_bond_amount) = bonds_to_unbond.new_entry.unwrap(); + assert_eq!(outer_epoch_1, new_entry_epoch); + // The modified bond should be sum of all redelegations less the unbonded + // amouunt + assert_eq!( + epoch_1_redeleg_1 + epoch_1_redeleg_2 + epoch_2_redeleg_2 + - unbond_amount, + new_bond_amount + ); + // The current bond should be sum of redelegations fom the modified epoch + let cur_bond_amount = bonds_handle + .get_delta_val(&storage, new_entry_epoch) + .unwrap() + .unwrap_or_default(); + assert_eq!(epoch_1_redeleg_1 + epoch_1_redeleg_2, cur_bond_amount); + + let mr = compute_modified_redelegation( + &storage, + &redelegated_bonds_map_1, + new_entry_epoch, + cur_bond_amount - new_bond_amount, + ) + .unwrap(); + + let exp_mr = ModifiedRedelegation { + epoch: Some(Epoch(27)), + validators_to_remove: BTreeSet::from_iter([redeleg_src_2.clone()]), + validator_to_modify: Some(redeleg_src_2), + epochs_to_remove: BTreeSet::from_iter([Epoch(18)]), + epoch_to_modify: Some(Epoch(18)), + new_amount: Some(token::Amount::from(3608889)), + }; + + pretty_assertions::assert_eq!(mr, exp_mr); +} diff --git a/proof_of_stake/src/tests/test_pos.rs b/proof_of_stake/src/tests/test_pos.rs new file mode 100644 index 0000000000..a37268a1a6 --- /dev/null +++ b/proof_of_stake/src/tests/test_pos.rs @@ -0,0 +1,1653 @@ +//! PoS system tests + +use std::collections::{BTreeMap, HashSet}; + +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::Collectable; +use namada_core::ledger::storage_api::token::{credit_tokens, read_balance}; +use namada_core::ledger::storage_api::StorageRead; +use namada_core::types::address::Address; +use namada_core::types::dec::Dec; +use namada_core::types::key::testing::{ + common_sk_from_simple_seed, gen_keypair, +}; +use namada_core::types::key::RefTo; +use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::{address, key, token}; +use proptest::prelude::*; +use proptest::test_runner::Config; +// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see +// `tracing` logs from tests +use test_log::test; + +use crate::parameters::testing::arb_pos_params; +use crate::parameters::OwnedPosParams; +use crate::queries::bonds_and_unbonds; +use crate::rewards::{ + log_block_rewards, update_rewards_products_and_mint_inflation, + PosRewardsCalculator, +}; +use crate::slashing::{process_slashes, slash}; +use crate::storage::{ + get_consensus_key_set, read_below_threshold_validator_set_addresses, + read_consensus_validator_set_addresses_with_stake, read_total_stake, + read_validator_deltas_value, rewards_accumulator_handle, + total_deltas_handle, +}; +use crate::test_utils::test_init_genesis; +use crate::tests::helpers::{ + advance_epoch, arb_genesis_validators, arb_params_and_genesis_validators, +}; +use crate::types::{ + into_tm_voting_power, BondDetails, BondId, BondsAndUnbondsDetails, + GenesisValidator, SlashType, UnbondDetails, ValidatorState, VoteInfo, + WeightedValidator, +}; +use crate::{ + below_capacity_validator_set_handle, bond_handle, bond_tokens, + change_consensus_key, consensus_validator_set_handle, is_delegator, + is_validator, read_validator_stake, redelegate_tokens, + staking_token_address, unbond_handle, unbond_tokens, unjail_validator, + validator_consensus_key_handle, validator_set_positions_handle, + validator_state_handle, withdraw_tokens, +}; + +proptest! { + // Generate arb valid input for `test_test_init_genesis_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_test_init_genesis( + + (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..10), + start_epoch in (0_u64..1000).prop_map(Epoch), + + ) { + test_test_init_genesis_aux(pos_params, start_epoch, genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_bonds_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_bonds( + + (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..3), + + ) { + test_bonds_aux(pos_params, genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_unjail_validator_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_unjail_validator( + (pos_params, genesis_validators) + in arb_params_and_genesis_validators(Some(4),6..9) + ) { + test_unjail_validator_aux(pos_params, + genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_unslashed_bond_amount_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_unslashed_bond_amount( + + genesis_validators in arb_genesis_validators(4..5, None), + + ) { + test_unslashed_bond_amount_aux(genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_log_block_rewards_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_log_block_rewards( + genesis_validators in arb_genesis_validators(4..10, None), + params in arb_pos_params(Some(5)) + + ) { + test_log_block_rewards_aux(genesis_validators, params) + } +} + +proptest! { + // Generate arb valid input for `test_update_rewards_products_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_update_rewards_products( + genesis_validators in arb_genesis_validators(4..10, None), + + ) { + test_update_rewards_products_aux(genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_consensus_key_change` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_consensus_key_change( + + genesis_validators in arb_genesis_validators(1..2, None), + + ) { + test_consensus_key_change_aux(genesis_validators) + } +} + +proptest! { + // Generate arb valid input for `test_is_delegator` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_is_delegator( + + genesis_validators in arb_genesis_validators(2..3, None), + + ) { + test_is_delegator_aux(genesis_validators) + } +} + +/// Test genesis initialization +fn test_test_init_genesis_aux( + params: OwnedPosParams, + start_epoch: Epoch, + mut validators: Vec, +) { + // println!( + // "Test inputs: {params:?}, {start_epoch}, genesis validators: \ + // {validators:#?}" + // ); + let mut s = TestWlStorage::default(); + s.storage.block.epoch = start_epoch; + + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + start_epoch, + ) + .unwrap(); + + let mut bond_details = bonds_and_unbonds(&s, None, None).unwrap(); + assert!(bond_details.iter().all(|(_id, details)| { + details.unbonds.is_empty() && details.slashes.is_empty() + })); + + for (i, validator) in validators.into_iter().enumerate() { + let addr = &validator.address; + let self_bonds = bond_details + .remove(&BondId { + source: addr.clone(), + validator: addr.clone(), + }) + .unwrap(); + assert_eq!(self_bonds.bonds.len(), 1); + assert_eq!( + self_bonds.bonds[0], + BondDetails { + start: start_epoch, + amount: validator.tokens, + slashed_amount: None, + } + ); + + let state = validator_state_handle(&validator.address) + .get(&s, start_epoch, ¶ms) + .unwrap(); + if (i as u64) < params.max_validator_slots + && validator.tokens >= params.validator_stake_threshold + { + // should be in consensus set + let handle = consensus_validator_set_handle().at(&start_epoch); + assert!(handle.at(&validator.tokens).iter(&s).unwrap().any( + |result| { + let (_pos, addr) = result.unwrap(); + addr == validator.address + } + )); + assert_eq!(state, Some(ValidatorState::Consensus)); + } else if validator.tokens >= params.validator_stake_threshold { + // Should be in below-capacity set if its tokens are greater than + // `validator_stake_threshold` + let handle = below_capacity_validator_set_handle().at(&start_epoch); + assert!(handle.at(&validator.tokens.into()).iter(&s).unwrap().any( + |result| { + let (_pos, addr) = result.unwrap(); + addr == validator.address + } + )); + assert_eq!(state, Some(ValidatorState::BelowCapacity)); + } else { + // Should be in below-threshold + let bt_addresses = + read_below_threshold_validator_set_addresses(&s, start_epoch) + .unwrap(); + assert!( + bt_addresses + .into_iter() + .any(|addr| { addr == validator.address }) + ); + assert_eq!(state, Some(ValidatorState::BelowThreshold)); + } + } +} + +/// Test bonding +/// NOTE: copy validator sets each time we advance the epoch +fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { + // This can be useful for debugging: + // params.pipeline_len = 2; + // params.unbonding_len = 4; + // println!("\nTest inputs: {params:?}, genesis validators: + // {validators:#?}"); + let mut s = TestWlStorage::default(); + + // Genesis + let start_epoch = s.storage.block.epoch; + let mut current_epoch = s.storage.block.epoch; + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + s.commit_block().unwrap(); + + // Advance to epoch 1 + current_epoch = advance_epoch(&mut s, ¶ms); + let self_bond_epoch = current_epoch; + + let validator = validators.first().unwrap(); + + // Read some data before submitting bond + let pipeline_epoch = current_epoch + params.pipeline_len; + let staking_token = staking_token_address(&s); + let pos_balance_pre = s + .read::(&token::balance_key( + &staking_token, + &crate::ADDRESS, + )) + .unwrap() + .unwrap_or_default(); + let total_stake_before = + read_total_stake(&s, ¶ms, pipeline_epoch).unwrap(); + + // Self-bond + let amount_self_bond = token::Amount::from_uint(100_500_000, 0).unwrap(); + credit_tokens(&mut s, &staking_token, &validator.address, amount_self_bond) + .unwrap(); + bond_tokens( + &mut s, + None, + &validator.address, + amount_self_bond, + current_epoch, + None, + ) + .unwrap(); + + // Check the bond delta + let self_bond = bond_handle(&validator.address, &validator.address); + let delta = self_bond.get_delta_val(&s, pipeline_epoch).unwrap(); + assert_eq!(delta, Some(amount_self_bond)); + + // Check the validator in the validator set + let set = + read_consensus_validator_set_addresses_with_stake(&s, pipeline_epoch) + .unwrap(); + assert!(set.into_iter().any( + |WeightedValidator { + bonded_stake, + address, + }| { + address == validator.address + && bonded_stake == validator.tokens + amount_self_bond + } + )); + + let val_deltas = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); + assert_eq!(val_deltas, Some(amount_self_bond.change())); + + let total_deltas_handle = total_deltas_handle(); + assert_eq!( + current_epoch, + total_deltas_handle.get_last_update(&s).unwrap().unwrap() + ); + let total_stake_after = + read_total_stake(&s, ¶ms, pipeline_epoch).unwrap(); + assert_eq!(total_stake_before + amount_self_bond, total_stake_after); + + // Check bond details after self-bond + let self_bond_id = BondId { + source: validator.address.clone(), + validator: validator.address.clone(), + }; + let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { + println!("Check index {ix}"); + let details = bond_details.get(&self_bond_id).unwrap(); + assert_eq!( + details.bonds.len(), + 2, + "Contains genesis and newly added self-bond" + ); + // dbg!(&details.bonds); + assert_eq!( + details.bonds[0], + BondDetails { + start: start_epoch, + amount: validator.tokens, + slashed_amount: None + }, + ); + assert_eq!( + details.bonds[1], + BondDetails { + start: pipeline_epoch, + amount: amount_self_bond, + slashed_amount: None + }, + ); + }; + // Try to call it with different combinations of owner/validator args + check_bond_details(0, bonds_and_unbonds(&s, None, None).unwrap()); + check_bond_details( + 1, + bonds_and_unbonds(&s, Some(validator.address.clone()), None).unwrap(), + ); + check_bond_details( + 2, + bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), + ); + check_bond_details( + 3, + bonds_and_unbonds( + &s, + Some(validator.address.clone()), + Some(validator.address.clone()), + ) + .unwrap(), + ); + + // Get a non-validating account with tokens + let delegator = address::testing::gen_implicit_address(); + let amount_del = token::Amount::from_uint(201_000_000, 0).unwrap(); + credit_tokens(&mut s, &staking_token, &delegator, amount_del).unwrap(); + let balance_key = token::balance_key(&staking_token, &delegator); + let balance = s + .read::(&balance_key) + .unwrap() + .unwrap_or_default(); + assert_eq!(balance, amount_del); + + // Advance to epoch 3 + advance_epoch(&mut s, ¶ms); + current_epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = current_epoch + params.pipeline_len; + + // Delegation + let delegation_epoch = current_epoch; + bond_tokens( + &mut s, + Some(&delegator), + &validator.address, + amount_del, + current_epoch, + None, + ) + .unwrap(); + let val_stake_pre = read_validator_stake( + &s, + ¶ms, + &validator.address, + pipeline_epoch.prev(), + ) + .unwrap(); + let val_stake_post = + read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) + .unwrap(); + assert_eq!(validator.tokens + amount_self_bond, val_stake_pre); + assert_eq!( + validator.tokens + amount_self_bond + amount_del, + val_stake_post + ); + let delegation = bond_handle(&delegator, &validator.address); + assert_eq!( + delegation + .get_sum(&s, pipeline_epoch.prev(), ¶ms) + .unwrap() + .unwrap_or_default(), + token::Amount::zero() + ); + assert_eq!( + delegation + .get_sum(&s, pipeline_epoch, ¶ms) + .unwrap() + .unwrap_or_default(), + amount_del + ); + + // Check delegation bonds details after delegation + let delegation_bond_id = BondId { + source: delegator.clone(), + validator: validator.address.clone(), + }; + let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { + println!("Check index {ix}"); + assert_eq!(bond_details.len(), 1); + let details = bond_details.get(&delegation_bond_id).unwrap(); + assert_eq!(details.bonds.len(), 1,); + // dbg!(&details.bonds); + assert_eq!( + details.bonds[0], + BondDetails { + start: pipeline_epoch, + amount: amount_del, + slashed_amount: None + }, + ); + }; + // Try to call it with different combinations of owner/validator args + check_bond_details( + 0, + bonds_and_unbonds(&s, Some(delegator.clone()), None).unwrap(), + ); + check_bond_details( + 1, + bonds_and_unbonds( + &s, + Some(delegator.clone()), + Some(validator.address.clone()), + ) + .unwrap(), + ); + + // Check all bond details (self-bonds and delegation) + let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { + println!("Check index {ix}"); + let self_bond_details = bond_details.get(&self_bond_id).unwrap(); + let delegation_details = bond_details.get(&delegation_bond_id).unwrap(); + assert_eq!( + self_bond_details.bonds.len(), + 2, + "Contains genesis and newly added self-bond" + ); + assert_eq!( + self_bond_details.bonds[0], + BondDetails { + start: start_epoch, + amount: validator.tokens, + slashed_amount: None + }, + ); + assert_eq!(self_bond_details.bonds[1].amount, amount_self_bond); + assert_eq!( + delegation_details.bonds[0], + BondDetails { + start: pipeline_epoch, + amount: amount_del, + slashed_amount: None + }, + ); + }; + // Try to call it with different combinations of owner/validator args + check_bond_details(0, bonds_and_unbonds(&s, None, None).unwrap()); + check_bond_details( + 1, + bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), + ); + + // Advance to epoch 5 + for _ in 0..2 { + current_epoch = advance_epoch(&mut s, ¶ms); + } + let pipeline_epoch = current_epoch + params.pipeline_len; + + // Unbond the self-bond with an amount that will remove all of the self-bond + // executed after genesis and some of the genesis bond + let amount_self_unbond: token::Amount = + amount_self_bond + (validator.tokens / 2); + // When the difference is 0, only the non-genesis self-bond is unbonded + let unbonded_genesis_self_bond = + amount_self_unbond - amount_self_bond != token::Amount::zero(); + + let self_unbond_epoch = s.storage.block.epoch; + + unbond_tokens( + &mut s, + None, + &validator.address, + amount_self_unbond, + current_epoch, + false, + ) + .unwrap(); + + let val_stake_pre = read_validator_stake( + &s, + ¶ms, + &validator.address, + pipeline_epoch.prev(), + ) + .unwrap(); + + let val_stake_post = + read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) + .unwrap(); + + let val_delta = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); + let unbond = unbond_handle(&validator.address, &validator.address); + + assert_eq!(val_delta, Some(-amount_self_unbond.change())); + assert_eq!( + unbond + .at(&Epoch::default()) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) + .unwrap(), + if unbonded_genesis_self_bond { + Some(amount_self_unbond - amount_self_bond) + } else { + None + } + ); + assert_eq!( + unbond + .at(&(self_bond_epoch + params.pipeline_len)) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) + .unwrap(), + Some(amount_self_bond) + ); + assert_eq!( + val_stake_pre, + validator.tokens + amount_self_bond + amount_del + ); + assert_eq!( + val_stake_post, + validator.tokens + amount_self_bond + amount_del - amount_self_unbond + ); + + // Check all bond and unbond details (self-bonds and delegation) + let check_bond_details = |ix, bond_details: BondsAndUnbondsDetails| { + println!("Check index {ix}"); + // dbg!(&bond_details); + assert_eq!(bond_details.len(), 2); + let self_bond_details = bond_details.get(&self_bond_id).unwrap(); + let delegation_details = bond_details.get(&delegation_bond_id).unwrap(); + assert_eq!( + self_bond_details.bonds.len(), + 1, + "Contains only part of the genesis bond now" + ); + assert_eq!( + self_bond_details.bonds[0], + BondDetails { + start: start_epoch, + amount: validator.tokens + amount_self_bond + - amount_self_unbond, + slashed_amount: None + }, + ); + assert_eq!( + delegation_details.bonds[0], + BondDetails { + start: delegation_epoch + params.pipeline_len, + amount: amount_del, + slashed_amount: None + }, + ); + assert_eq!( + self_bond_details.unbonds.len(), + if unbonded_genesis_self_bond { 2 } else { 1 }, + "Contains a full unbond of the last self-bond and an unbond from \ + the genesis bond" + ); + if unbonded_genesis_self_bond { + assert_eq!( + self_bond_details.unbonds[0], + UnbondDetails { + start: start_epoch, + withdraw: self_unbond_epoch + + params.pipeline_len + + params.unbonding_len + + params.cubic_slashing_window_length, + amount: amount_self_unbond - amount_self_bond, + slashed_amount: None + } + ); + } + assert_eq!( + self_bond_details.unbonds[usize::from(unbonded_genesis_self_bond)], + UnbondDetails { + start: self_bond_epoch + params.pipeline_len, + withdraw: self_unbond_epoch + + params.pipeline_len + + params.unbonding_len + + params.cubic_slashing_window_length, + amount: amount_self_bond, + slashed_amount: None + } + ); + }; + check_bond_details( + 0, + bonds_and_unbonds(&s, None, Some(validator.address.clone())).unwrap(), + ); + + // Unbond delegation + let amount_undel = token::Amount::from_uint(1_000_000, 0).unwrap(); + unbond_tokens( + &mut s, + Some(&delegator), + &validator.address, + amount_undel, + current_epoch, + false, + ) + .unwrap(); + + let val_stake_pre = read_validator_stake( + &s, + ¶ms, + &validator.address, + pipeline_epoch.prev(), + ) + .unwrap(); + let val_stake_post = + read_validator_stake(&s, ¶ms, &validator.address, pipeline_epoch) + .unwrap(); + let val_delta = + read_validator_deltas_value(&s, &validator.address, &pipeline_epoch) + .unwrap(); + let unbond = unbond_handle(&delegator, &validator.address); + + assert_eq!( + val_delta, + Some(-(amount_self_unbond + amount_undel).change()) + ); + assert_eq!( + unbond + .at(&(delegation_epoch + params.pipeline_len)) + .get( + &s, + &(pipeline_epoch + + params.unbonding_len + + params.cubic_slashing_window_length) + ) + .unwrap(), + Some(amount_undel) + ); + assert_eq!( + val_stake_pre, + validator.tokens + amount_self_bond + amount_del + ); + assert_eq!( + val_stake_post, + validator.tokens + amount_self_bond - amount_self_unbond + amount_del + - amount_undel + ); + + let withdrawable_offset = params.unbonding_len + + params.pipeline_len + + params.cubic_slashing_window_length; + + // Advance to withdrawable epoch + for _ in 0..withdrawable_offset { + current_epoch = advance_epoch(&mut s, ¶ms); + } + + let pos_balance = s + .read::(&token::balance_key( + &staking_token, + &crate::ADDRESS, + )) + .unwrap(); + + assert_eq!( + Some(pos_balance_pre + amount_self_bond + amount_del), + pos_balance + ); + + // Withdraw the self-unbond + withdraw_tokens(&mut s, None, &validator.address, current_epoch).unwrap(); + let unbond = unbond_handle(&validator.address, &validator.address); + let unbond_iter = unbond.iter(&s).unwrap().next(); + assert!(unbond_iter.is_none()); + + let pos_balance = s + .read::(&token::balance_key( + &staking_token, + &crate::ADDRESS, + )) + .unwrap(); + assert_eq!( + Some( + pos_balance_pre + amount_self_bond - amount_self_unbond + + amount_del + ), + pos_balance + ); + + // Withdraw the delegation unbond + withdraw_tokens( + &mut s, + Some(&delegator), + &validator.address, + current_epoch, + ) + .unwrap(); + let unbond = unbond_handle(&delegator, &validator.address); + let unbond_iter = unbond.iter(&s).unwrap().next(); + assert!(unbond_iter.is_none()); + + let pos_balance = s + .read::(&token::balance_key( + &staking_token, + &crate::ADDRESS, + )) + .unwrap(); + assert_eq!( + Some( + pos_balance_pre + amount_self_bond - amount_self_unbond + + amount_del + - amount_undel + ), + pos_balance + ); +} + +fn test_unjail_validator_aux( + params: OwnedPosParams, + mut validators: Vec, +) { + // println!("\nTest inputs: {params:?}, genesis validators: + // {validators:#?}"); + let mut s = TestWlStorage::default(); + + // Find the validator with the most stake and 100x his stake to keep the + // cubic slash rate small + let num_vals = validators.len(); + validators.sort_by_key(|a| a.tokens); + validators[num_vals - 1].tokens = 100 * validators[num_vals - 1].tokens; + + // Get second highest stake validator to misbehave + let val_addr = &validators[num_vals - 2].address; + // let val_tokens = validators[num_vals - 2].tokens; + + // Genesis + let mut current_epoch = s.storage.block.epoch; + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + s.commit_block().unwrap(); + + current_epoch = advance_epoch(&mut s, ¶ms); + process_slashes(&mut s, current_epoch).unwrap(); + + // Discover first slash + let slash_0_evidence_epoch = current_epoch; + let evidence_block_height = BlockHeight(0); // doesn't matter for slashing logic + let slash_0_type = SlashType::DuplicateVote; + slash( + &mut s, + ¶ms, + current_epoch, + slash_0_evidence_epoch, + evidence_block_height, + slash_0_type, + val_addr, + current_epoch.next(), + ) + .unwrap(); + + assert_eq!( + validator_state_handle(val_addr) + .get(&s, current_epoch, ¶ms) + .unwrap(), + Some(ValidatorState::Consensus) + ); + + for epoch in Epoch::iter_bounds_inclusive( + current_epoch.next(), + current_epoch + params.pipeline_len, + ) { + // Check the validator state + assert_eq!( + validator_state_handle(val_addr) + .get(&s, epoch, ¶ms) + .unwrap(), + Some(ValidatorState::Jailed) + ); + // Check the validator set positions + assert!( + validator_set_positions_handle() + .at(&epoch) + .get(&s, val_addr) + .unwrap() + .is_none(), + ); + } + + // Advance past an epoch in which we can unbond + let unfreeze_epoch = + slash_0_evidence_epoch + params.slash_processing_epoch_offset(); + while current_epoch < unfreeze_epoch + 4u64 { + current_epoch = advance_epoch(&mut s, ¶ms); + process_slashes(&mut s, current_epoch).unwrap(); + } + + // Unjail the validator + unjail_validator(&mut s, val_addr, current_epoch).unwrap(); + + // Check the validator state + for epoch in + Epoch::iter_bounds_inclusive(current_epoch, current_epoch.next()) + { + assert_eq!( + validator_state_handle(val_addr) + .get(&s, epoch, ¶ms) + .unwrap(), + Some(ValidatorState::Jailed) + ); + } + + assert_eq!( + validator_state_handle(val_addr) + .get(&s, current_epoch + params.pipeline_len, ¶ms) + .unwrap(), + Some(ValidatorState::Consensus) + ); + assert!( + validator_set_positions_handle() + .at(&(current_epoch + params.pipeline_len)) + .get(&s, val_addr) + .unwrap() + .is_some(), + ); + + // Advance another epoch + current_epoch = advance_epoch(&mut s, ¶ms); + process_slashes(&mut s, current_epoch).unwrap(); + + let second_att = unjail_validator(&mut s, val_addr, current_epoch); + assert!(second_att.is_err()); +} + +fn test_unslashed_bond_amount_aux(validators: Vec) { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + let validator1 = validators[0].address.clone(); + let validator2 = validators[1].address.clone(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Bond to validator 1 + bond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 10_000.into(), + current_epoch, + None, + ) + .unwrap(); + + // Unbond some from validator 1 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 1_342.into(), + current_epoch, + false, + ) + .unwrap(); + + // Redelegate some from validator 1 -> 2 + redelegate_tokens( + &mut storage, + &delegator, + &validator1, + &validator2, + current_epoch, + 1_875.into(), + ) + .unwrap(); + + // Unbond some from validator 2 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator2, + 584.into(), + current_epoch, + false, + ) + .unwrap(); + + // Advance an epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Bond to validator 1 + bond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 384.into(), + current_epoch, + None, + ) + .unwrap(); + + // Unbond some from validator 1 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 144.into(), + current_epoch, + false, + ) + .unwrap(); + + // Redelegate some from validator 1 -> 2 + redelegate_tokens( + &mut storage, + &delegator, + &validator1, + &validator2, + current_epoch, + 3_448.into(), + ) + .unwrap(); + + // Unbond some from validator 2 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator2, + 699.into(), + current_epoch, + false, + ) + .unwrap(); + + // Advance an epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Bond to validator 1 + bond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 4_384.into(), + current_epoch, + None, + ) + .unwrap(); + + // Redelegate some from validator 1 -> 2 + redelegate_tokens( + &mut storage, + &delegator, + &validator1, + &validator2, + current_epoch, + 1_008.into(), + ) + .unwrap(); + + // Unbond some from validator 2 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator2, + 3_500.into(), + current_epoch, + false, + ) + .unwrap(); + + // Checks + let val1_init_stake = validators[0].tokens; + + for epoch in Epoch::iter_bounds_inclusive( + Epoch(0), + current_epoch + params.pipeline_len, + ) { + let bond_amount = crate::bond_amount( + &storage, + &BondId { + source: delegator.clone(), + validator: validator1.clone(), + }, + epoch, + ) + .unwrap_or_default(); + + let val_stake = + crate::read_validator_stake(&storage, ¶ms, &validator1, epoch) + .unwrap(); + // dbg!(&bond_amount); + assert_eq!(val_stake - val1_init_stake, bond_amount); + } +} + +fn test_log_block_rewards_aux( + validators: Vec, + params: OwnedPosParams, +) { + tracing::info!( + "New case with {} validators: {:#?}", + validators.len(), + validators + .iter() + .map(|v| (&v.address, v.tokens.to_string_native())) + .collect::>() + ); + let mut s = TestWlStorage::default(); + // Init genesis + let current_epoch = s.storage.block.epoch; + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + s.commit_block().unwrap(); + let total_stake = + crate::get_total_consensus_stake(&s, current_epoch, ¶ms).unwrap(); + let consensus_set = + crate::read_consensus_validator_set_addresses(&s, current_epoch) + .unwrap(); + let proposer_address = consensus_set.iter().next().unwrap().clone(); + + tracing::info!( + ?params.block_proposer_reward, + ?params.block_vote_reward, + ); + tracing::info!(?proposer_address,); + + // Rewards accumulator should be empty at first + let rewards_handle = rewards_accumulator_handle(); + assert!(rewards_handle.is_empty(&s).unwrap()); + + let mut last_rewards = BTreeMap::default(); + + let num_blocks = 100; + // Loop through `num_blocks`, log rewards & check results + for i in 0..num_blocks { + tracing::info!(""); + tracing::info!("Block {}", i + 1); + + // A helper closure to prepare minimum required votes + let prep_votes = |epoch| { + // Ceil of 2/3 of total stake + let min_required_votes = total_stake.mul_ceil(Dec::two() / 3); + + let mut total_votes = token::Amount::zero(); + let mut non_voters = HashSet::
::default(); + let mut prep_vote = |validator| { + // Add validator vote if it's in consensus set and if we don't + // yet have min required votes + if consensus_set.contains(validator) + && total_votes < min_required_votes + { + let stake = + read_validator_stake(&s, ¶ms, validator, epoch) + .unwrap(); + total_votes += stake; + let validator_vp = + into_tm_voting_power(params.tm_votes_per_token, stake) + as u64; + tracing::info!("Validator {validator} signed"); + Some(VoteInfo { + validator_address: validator.clone(), + validator_vp, + }) + } else { + non_voters.insert(validator.clone()); + None + } + }; + + let votes: Vec = validators + .iter() + .rev() + .filter_map(|validator| prep_vote(&validator.address)) + .collect(); + (votes, total_votes, non_voters) + }; + + let (votes, signing_stake, non_voters) = prep_votes(current_epoch); + log_block_rewards( + &mut s, + current_epoch, + &proposer_address, + votes.clone(), + ) + .unwrap(); + + assert!(!rewards_handle.is_empty(&s).unwrap()); + + let rewards_calculator = PosRewardsCalculator { + proposer_reward: params.block_proposer_reward, + signer_reward: params.block_vote_reward, + signing_stake, + total_stake, + }; + let coeffs = rewards_calculator.get_reward_coeffs().unwrap(); + tracing::info!(?coeffs); + + // Check proposer reward + let stake = + read_validator_stake(&s, ¶ms, &proposer_address, current_epoch) + .unwrap(); + let proposer_signing_reward = votes.iter().find_map(|vote| { + if vote.validator_address == proposer_address { + let signing_fraction = + Dec::from(stake) / Dec::from(signing_stake); + Some(coeffs.signer_coeff * signing_fraction) + } else { + None + } + }); + let expected_proposer_rewards = last_rewards.get(&proposer_address).copied().unwrap_or_default() + + // Proposer reward + coeffs.proposer_coeff + // Consensus validator reward + + (coeffs.active_val_coeff + * (Dec::from(stake) / Dec::from(total_stake))) + // Signing reward (if proposer voted) + + proposer_signing_reward + .unwrap_or_default(); + tracing::info!( + "Expected proposer rewards: {expected_proposer_rewards}. Signed \ + block: {}", + proposer_signing_reward.is_some() + ); + assert_eq!( + rewards_handle.get(&s, &proposer_address).unwrap(), + Some(expected_proposer_rewards) + ); + + // Check voters rewards + for VoteInfo { + validator_address, .. + } in votes.iter() + { + // Skip proposer, in case voted - already checked + if validator_address == &proposer_address { + continue; + } + + let stake = read_validator_stake( + &s, + ¶ms, + validator_address, + current_epoch, + ) + .unwrap(); + let signing_fraction = Dec::from(stake) / Dec::from(signing_stake); + let expected_signer_rewards = last_rewards + .get(validator_address) + .copied() + .unwrap_or_default() + + coeffs.signer_coeff * signing_fraction + + (coeffs.active_val_coeff + * (Dec::from(stake) / Dec::from(total_stake))); + tracing::info!( + "Expected signer {validator_address} rewards: \ + {expected_signer_rewards}" + ); + assert_eq!( + rewards_handle.get(&s, validator_address).unwrap(), + Some(expected_signer_rewards) + ); + } + + // Check non-voters rewards, if any + for address in non_voters { + // Skip proposer, in case it didn't vote - already checked + if address == proposer_address { + continue; + } + + if consensus_set.contains(&address) { + let stake = + read_validator_stake(&s, ¶ms, &address, current_epoch) + .unwrap(); + let expected_non_signer_rewards = + last_rewards.get(&address).copied().unwrap_or_default() + + coeffs.active_val_coeff + * (Dec::from(stake) / Dec::from(total_stake)); + tracing::info!( + "Expected non-signer {address} rewards: \ + {expected_non_signer_rewards}" + ); + assert_eq!( + rewards_handle.get(&s, &address).unwrap(), + Some(expected_non_signer_rewards) + ); + } else { + let last_reward = last_rewards.get(&address).copied(); + assert_eq!( + rewards_handle.get(&s, &address).unwrap(), + last_reward + ); + } + } + s.commit_block().unwrap(); + + last_rewards = rewards_accumulator_handle().collect_map(&s).unwrap(); + + let rewards_sum: Dec = last_rewards.values().copied().sum(); + let expected_sum = Dec::one() * (i as u64 + 1); + let err_tolerance = Dec::new(1, 9).unwrap(); + let fail_msg = format!( + "Expected rewards sum at block {} to be {expected_sum}, got \ + {rewards_sum}. Error tolerance {err_tolerance}.", + i + 1 + ); + assert!(expected_sum <= rewards_sum + err_tolerance, "{fail_msg}"); + assert!(rewards_sum <= expected_sum, "{fail_msg}"); + } +} + +fn test_update_rewards_products_aux(validators: Vec) { + tracing::info!( + "New case with {} validators: {:#?}", + validators.len(), + validators + .iter() + .map(|v| (&v.address, v.tokens.to_string_native())) + .collect::>() + ); + let mut s = TestWlStorage::default(); + // Init genesis + let current_epoch = s.storage.block.epoch; + let params = OwnedPosParams::default(); + let params = test_init_genesis( + &mut s, + params, + validators.into_iter(), + current_epoch, + ) + .unwrap(); + s.commit_block().unwrap(); + + let staking_token = staking_token_address(&s); + let consensus_set = + crate::read_consensus_validator_set_addresses(&s, current_epoch) + .unwrap(); + + // Start a new epoch + let current_epoch = advance_epoch(&mut s, ¶ms); + + // Read some data before applying rewards + let pos_balance_pre = + read_balance(&s, &staking_token, &address::POS).unwrap(); + let gov_balance_pre = + read_balance(&s, &staking_token, &address::GOV).unwrap(); + + let num_consensus_validators = consensus_set.len() as u64; + let accum_val = Dec::one() / num_consensus_validators; + let num_blocks_in_last_epoch = 1000; + + // Assign some reward accumulator values to consensus validator + for validator in &consensus_set { + rewards_accumulator_handle() + .insert( + &mut s, + validator.clone(), + accum_val * num_blocks_in_last_epoch, + ) + .unwrap(); + } + + // Distribute inflation into rewards + let last_epoch = current_epoch.prev(); + let inflation = token::Amount::native_whole(10_000_000); + update_rewards_products_and_mint_inflation( + &mut s, + ¶ms, + last_epoch, + num_blocks_in_last_epoch, + inflation, + &staking_token, + ) + .unwrap(); + + let pos_balance_post = + read_balance(&s, &staking_token, &address::POS).unwrap(); + let gov_balance_post = + read_balance(&s, &staking_token, &address::GOV).unwrap(); + + assert_eq!( + pos_balance_pre + gov_balance_pre + inflation, + pos_balance_post + gov_balance_post, + "Expected inflation to be minted to PoS and left-over amount to Gov" + ); + + let pos_credit = pos_balance_post - pos_balance_pre; + let gov_credit = gov_balance_post - gov_balance_pre; + assert!( + pos_credit > gov_credit, + "PoS must receive more tokens than Gov, but got {} in PoS and {} in \ + Gov", + pos_credit.to_string_native(), + gov_credit.to_string_native() + ); + + // Rewards accumulator must be cleared out + let rewards_handle = rewards_accumulator_handle(); + assert!(rewards_handle.is_empty(&s).unwrap()); +} + +fn test_consensus_key_change_aux(validators: Vec) { + assert_eq!(validators.len(), 1); + + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + let validator = validators[0].address.clone(); + + // println!("\nTest inputs: {params:?}, genesis validators: + // {validators:#?}"); + let mut storage = TestWlStorage::default(); + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Check that there is one consensus key in the network + let consensus_keys = get_consensus_key_set(&storage).unwrap(); + assert_eq!(consensus_keys.len(), 1); + let ck = consensus_keys.first().cloned().unwrap(); + let og_ck = validator_consensus_key_handle(&validator) + .get(&storage, current_epoch, ¶ms) + .unwrap() + .unwrap(); + assert_eq!(ck, og_ck); + + // Attempt to change to a new secp256k1 consensus key (disallowed) + let secp_ck = gen_keypair::(); + let secp_ck = key::common::SecretKey::Secp256k1(secp_ck).ref_to(); + let res = + change_consensus_key(&mut storage, &validator, &secp_ck, current_epoch); + assert!(res.is_err()); + + // Change consensus keys + let ck_2 = common_sk_from_simple_seed(1).ref_to(); + change_consensus_key(&mut storage, &validator, &ck_2, current_epoch) + .unwrap(); + + // Check that there is a new consensus key + let consensus_keys = get_consensus_key_set(&storage).unwrap(); + assert_eq!(consensus_keys.len(), 2); + + for epoch in current_epoch.iter_range(params.pipeline_len) { + let ck = validator_consensus_key_handle(&validator) + .get(&storage, epoch, ¶ms) + .unwrap() + .unwrap(); + assert_eq!(ck, og_ck); + } + let pipeline_epoch = current_epoch + params.pipeline_len; + let ck = validator_consensus_key_handle(&validator) + .get(&storage, pipeline_epoch, ¶ms) + .unwrap() + .unwrap(); + assert_eq!(ck, ck_2); + + // Advance to the pipeline epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + if current_epoch == pipeline_epoch { + break; + } + } + + // Check the consensus keys again + let consensus_keys = get_consensus_key_set(&storage).unwrap(); + assert_eq!(consensus_keys.len(), 2); + + for epoch in current_epoch.iter_range(params.pipeline_len + 1) { + let ck = validator_consensus_key_handle(&validator) + .get(&storage, epoch, ¶ms) + .unwrap() + .unwrap(); + assert_eq!(ck, ck_2); + } + + // Now change the consensus key again and bond in the same epoch + let ck_3 = common_sk_from_simple_seed(3).ref_to(); + change_consensus_key(&mut storage, &validator, &ck_3, current_epoch) + .unwrap(); + + let staking_token = storage.storage.native_token.clone(); + let amount_del = token::Amount::native_whole(5); + credit_tokens(&mut storage, &staking_token, &validator, amount_del) + .unwrap(); + bond_tokens( + &mut storage, + None, + &validator, + token::Amount::native_whole(1), + current_epoch, + None, + ) + .unwrap(); + + // Check consensus keys again + let consensus_keys = get_consensus_key_set(&storage).unwrap(); + assert_eq!(consensus_keys.len(), 3); + + for epoch in current_epoch.iter_range(params.pipeline_len) { + let ck = validator_consensus_key_handle(&validator) + .get(&storage, epoch, ¶ms) + .unwrap() + .unwrap(); + assert_eq!(ck, ck_2); + } + let pipeline_epoch = current_epoch + params.pipeline_len; + let ck = validator_consensus_key_handle(&validator) + .get(&storage, pipeline_epoch, ¶ms) + .unwrap() + .unwrap(); + assert_eq!(ck, ck_3); + + // Advance to the pipeline epoch to ensure that the validator set updates to + // tendermint will work + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + if current_epoch == pipeline_epoch { + break; + } + } + assert_eq!(current_epoch.0, 2 * params.pipeline_len); +} + +fn test_is_delegator_aux(mut validators: Vec) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let validator1 = validators[0].address.clone(); + let validator2 = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get delegators with some tokens + let staking_token = staking_token_address(&storage); + let delegator1 = address::testing::gen_implicit_address(); + let delegator2 = address::testing::gen_implicit_address(); + let del_balance = token::Amount::native_whole(1000); + credit_tokens(&mut storage, &staking_token, &delegator1, del_balance) + .unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator2, del_balance) + .unwrap(); + + // Advance to epoch 1 + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Delegate in epoch 1 to validator1 + let del1_epoch = current_epoch; + bond_tokens( + &mut storage, + Some(&delegator1), + &validator1, + 1000.into(), + current_epoch, + None, + ) + .unwrap(); + + // Advance to epoch 2 + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Delegate in epoch 2 to validator2 + let del2_epoch = current_epoch; + bond_tokens( + &mut storage, + Some(&delegator2), + &validator2, + 1000.into(), + current_epoch, + None, + ) + .unwrap(); + + // Checks + assert!(is_validator(&storage, &validator1).unwrap()); + assert!(is_validator(&storage, &validator2).unwrap()); + assert!(!is_delegator(&storage, &validator1, None).unwrap()); + assert!(!is_delegator(&storage, &validator2, None).unwrap()); + + assert!(!is_validator(&storage, &delegator1).unwrap()); + assert!(!is_validator(&storage, &delegator2).unwrap()); + assert!(is_delegator(&storage, &delegator1, None).unwrap()); + assert!(is_delegator(&storage, &delegator2, None).unwrap()); + + for epoch in Epoch::default().iter_range(del1_epoch.0 + params.pipeline_len) + { + assert!(!is_delegator(&storage, &delegator1, Some(epoch)).unwrap()); + } + assert!( + is_delegator( + &storage, + &delegator1, + Some(del1_epoch + params.pipeline_len) + ) + .unwrap() + ); + for epoch in Epoch::default().iter_range(del2_epoch.0 + params.pipeline_len) + { + assert!(!is_delegator(&storage, &delegator2, Some(epoch)).unwrap()); + } + assert!( + is_delegator( + &storage, + &delegator2, + Some(del2_epoch + params.pipeline_len) + ) + .unwrap() + ); +} diff --git a/proof_of_stake/src/tests/test_slash_and_redel.rs b/proof_of_stake/src/tests/test_slash_and_redel.rs new file mode 100644 index 0000000000..17b73494b6 --- /dev/null +++ b/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -0,0 +1,1495 @@ +use std::ops::Deref; +use std::str::FromStr; + +use assert_matches::assert_matches; +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage_api::collections::lazy_map::Collectable; +use namada_core::ledger::storage_api::token::{credit_tokens, read_balance}; +use namada_core::ledger::storage_api::StorageRead; +use namada_core::types::dec::Dec; +use namada_core::types::storage::{BlockHeight, Epoch}; +use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; +use namada_core::types::{address, token}; +use proptest::prelude::*; +use proptest::test_runner::Config; +// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see +// `tracing` logs from tests +use test_log::test; + +use crate::queries::bonds_and_unbonds; +use crate::slashing::{process_slashes, slash}; +use crate::storage::{ + bond_handle, delegator_redelegated_bonds_handle, + delegator_redelegated_unbonds_handle, read_total_stake, + read_validator_stake, total_bonded_handle, total_unbonded_handle, + unbond_handle, validator_incoming_redelegations_handle, + validator_outgoing_redelegations_handle, validator_slashes_handle, + validator_total_redelegated_bonded_handle, + validator_total_redelegated_unbonded_handle, +}; +use crate::test_utils::test_init_genesis; +use crate::tests::helpers::{ + advance_epoch, arb_genesis_validators, arb_redelegation_amounts, + test_slashes_with_unbonding_params, +}; +use crate::types::{BondId, GenesisValidator, SlashType}; +use crate::{ + bond_tokens, redelegate_tokens, staking_token_address, unbond_tokens, + withdraw_tokens, OwnedPosParams, RedelegationError, +}; + +proptest! { + // Generate arb valid input for `test_simple_redelegation_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_simple_redelegation( + + genesis_validators in arb_genesis_validators(2..4, None), + (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) + + ) { + test_simple_redelegation_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) + } +} + +fn test_simple_redelegation_aux( + mut validators: Vec, + amount_delegate: token::Amount, + amount_redelegate: token::Amount, + amount_unbond: token::Amount, +) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let dest_validator = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Ensure that we cannot redelegate with the same src and dest validator + let err = redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &src_validator, + current_epoch, + amount_redelegate, + ) + .unwrap_err(); + let err_str = err.to_string(); + assert_matches!( + err.downcast::().unwrap().deref(), + RedelegationError::RedelegationSrcEqDest, + "Redelegation with the same src and dest validator must be rejected, \ + got {err_str}", + ); + + for _ in 0..5 { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + } + + let init_epoch = current_epoch; + + // Delegate in epoch 1 to src_validator + bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + amount_delegate, + current_epoch, + None, + ) + .unwrap(); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 3 + redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + amount_redelegate, + ) + .unwrap(); + + // Dest val + + // Src val + + // Checks + let redelegated = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&(current_epoch + params.pipeline_len)) + .at(&src_validator) + .get(&storage, &(init_epoch + params.pipeline_len)) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + let redel_start_epoch = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap() + .unwrap(); + assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); + + let redelegated = validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(¤t_epoch.prev()) + .get(&storage, ¤t_epoch) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Unbond in epoch 5 from dest_validator + let _ = unbond_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + amount_unbond, + current_epoch, + false, + ) + .unwrap(); + + let bond_start = init_epoch + params.pipeline_len; + let redelegation_end = bond_start + params.pipeline_len + 1u64; + let unbond_end = + redelegation_end + params.withdrawable_epoch_offset() + 1u64; + let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; + + // Checks + let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); + + let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&unbond_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(redel_unbonded, amount_unbond); + + let total_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&unbond_materialized) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(total_redel_unbonded, amount_unbond); + + // Advance to withdrawal epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == unbond_end { + break; + } + } + + // Withdraw + withdraw_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + current_epoch, + ) + .unwrap(); + + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .is_empty(&storage) + .unwrap() + ); + + let delegator_balance = storage + .read::(&token::balance_key(&staking_token, &delegator)) + .unwrap() + .unwrap_or_default(); + assert_eq!( + delegator_balance, + del_balance - amount_delegate + amount_unbond + ); +} + +proptest! { + // Generate arb valid input for `test_slashes_with_unbonding_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_slashes_with_unbonding( + (params, genesis_validators, unbond_delay) + in test_slashes_with_unbonding_params() + ) { + test_slashes_with_unbonding_aux( + params, genesis_validators, unbond_delay) + } +} + +fn test_slashes_with_unbonding_aux( + mut params: OwnedPosParams, + validators: Vec, + unbond_delay: u64, +) { + // This can be useful for debugging: + params.pipeline_len = 2; + params.unbonding_len = 4; + // println!("\nTest inputs: {params:?}, genesis validators: + // {validators:#?}"); + let mut s = TestWlStorage::default(); + + // Find the validator with the least stake to avoid the cubic slash rate + // going to 100% + let validator = + itertools::Itertools::sorted_by_key(validators.iter(), |v| v.tokens) + .next() + .unwrap(); + let val_addr = &validator.address; + let val_tokens = validator.tokens; + + // Genesis + // let start_epoch = s.storage.block.epoch; + let mut current_epoch = s.storage.block.epoch; + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + s.commit_block().unwrap(); + + current_epoch = advance_epoch(&mut s, ¶ms); + process_slashes(&mut s, current_epoch).unwrap(); + + // Discover first slash + let slash_0_evidence_epoch = current_epoch; + // let slash_0_processing_epoch = + // slash_0_evidence_epoch + params.slash_processing_epoch_offset(); + let evidence_block_height = BlockHeight(0); // doesn't matter for slashing logic + let slash_0_type = SlashType::DuplicateVote; + slash( + &mut s, + ¶ms, + current_epoch, + slash_0_evidence_epoch, + evidence_block_height, + slash_0_type, + val_addr, + current_epoch.next(), + ) + .unwrap(); + + // Advance to an epoch in which we can unbond + let unfreeze_epoch = + slash_0_evidence_epoch + params.slash_processing_epoch_offset(); + while current_epoch < unfreeze_epoch { + current_epoch = advance_epoch(&mut s, ¶ms); + process_slashes(&mut s, current_epoch).unwrap(); + } + + // Advance more epochs randomly from the generated delay + for _ in 0..unbond_delay { + current_epoch = advance_epoch(&mut s, ¶ms); + } + + // Unbond half of the tokens + let unbond_amount = Dec::new(5, 1).unwrap() * val_tokens; + let unbond_epoch = current_epoch; + unbond_tokens(&mut s, None, val_addr, unbond_amount, unbond_epoch, false) + .unwrap(); + + // Discover second slash + let slash_1_evidence_epoch = current_epoch; + // Ensure that both slashes happen before `unbond_epoch + pipeline` + let _slash_1_processing_epoch = + slash_1_evidence_epoch + params.slash_processing_epoch_offset(); + let evidence_block_height = BlockHeight(0); // doesn't matter for slashing logic + let slash_1_type = SlashType::DuplicateVote; + slash( + &mut s, + ¶ms, + current_epoch, + slash_1_evidence_epoch, + evidence_block_height, + slash_1_type, + val_addr, + current_epoch.next(), + ) + .unwrap(); + + // Advance to an epoch in which we can withdraw + let withdraw_epoch = unbond_epoch + params.withdrawable_epoch_offset(); + while current_epoch < withdraw_epoch { + current_epoch = advance_epoch(&mut s, ¶ms); + process_slashes(&mut s, current_epoch).unwrap(); + } + let token = staking_token_address(&s); + let val_balance_pre = read_balance(&s, &token, val_addr).unwrap(); + + let bond_id = BondId { + source: val_addr.clone(), + validator: val_addr.clone(), + }; + let binding = bonds_and_unbonds(&s, None, Some(val_addr.clone())).unwrap(); + let details = binding.get(&bond_id).unwrap(); + let exp_withdraw_from_details = details.unbonds[0].amount + - details.unbonds[0].slashed_amount.unwrap_or_default(); + + withdraw_tokens(&mut s, None, val_addr, current_epoch).unwrap(); + + let val_balance_post = read_balance(&s, &token, val_addr).unwrap(); + let withdrawn_tokens = val_balance_post - val_balance_pre; + + assert_eq!(exp_withdraw_from_details, withdrawn_tokens); + + let slash_rate_0 = validator_slashes_handle(val_addr) + .get(&s, 0) + .unwrap() + .unwrap() + .rate; + let slash_rate_1 = validator_slashes_handle(val_addr) + .get(&s, 1) + .unwrap() + .unwrap() + .rate; + + let expected_withdrawn_amount = Dec::from( + (Dec::one() - slash_rate_1) + * (Dec::one() - slash_rate_0) + * unbond_amount, + ); + // Allow some rounding error, 1 NAMNAM per each slash + let rounding_error_tolerance = + Dec::new(2, NATIVE_MAX_DECIMAL_PLACES).unwrap(); + assert!( + expected_withdrawn_amount.abs_diff(&Dec::from(withdrawn_tokens)) + <= rounding_error_tolerance + ); + + // TODO: finish once implemented + // let slash_0 = decimal_mult_amount(slash_rate_0, val_tokens); + // let slash_1 = decimal_mult_amount(slash_rate_1, val_tokens - slash_0); + // let expected_slash_pool = slash_0 + slash_1; + // let slash_pool_balance = + // read_balance(&s, &token, &SLASH_POOL_ADDRESS).unwrap(); + // assert_eq!(expected_slash_pool, slash_pool_balance); +} + +proptest! { + // Generate arb valid input for `test_simple_redelegation_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_redelegation_with_slashing( + + genesis_validators in arb_genesis_validators(2..4, None), + (amount_delegate, amount_redelegate, amount_unbond) in arb_redelegation_amounts(20) + + ) { + test_redelegation_with_slashing_aux(genesis_validators, amount_delegate, amount_redelegate, amount_unbond) + } +} + +fn test_redelegation_with_slashing_aux( + mut validators: Vec, + amount_delegate: token::Amount, + amount_redelegate: token::Amount, + amount_unbond: token::Amount, +) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let dest_validator = validators[1].address.clone(); + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + // Avoid empty consensus set by removing the threshold + validator_stake_threshold: token::Amount::zero(), + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + for _ in 0..5 { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + } + + let init_epoch = current_epoch; + + // Delegate in epoch 5 to src_validator + bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + amount_delegate, + current_epoch, + None, + ) + .unwrap(); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 8 + redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + amount_redelegate, + ) + .unwrap(); + + // Checks + let redelegated = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&(current_epoch + params.pipeline_len)) + .at(&src_validator) + .get(&storage, &(init_epoch + params.pipeline_len)) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + let redel_start_epoch = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap() + .unwrap(); + assert_eq!(redel_start_epoch, current_epoch + params.pipeline_len); + + let redelegated = validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(¤t_epoch.prev()) + .get(&storage, ¤t_epoch) + .unwrap() + .unwrap(); + assert_eq!(redelegated, amount_redelegate); + + // Advance three epochs + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Unbond in epoch 11 from dest_validator + let _ = unbond_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + amount_unbond, + current_epoch, + false, + ) + .unwrap(); + + // Advance one epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Discover evidence + slash( + &mut storage, + ¶ms, + current_epoch, + init_epoch + 2 * params.pipeline_len, + 0u64, + SlashType::DuplicateVote, + &src_validator, + current_epoch.next(), + ) + .unwrap(); + + let bond_start = init_epoch + params.pipeline_len; + let redelegation_end = bond_start + params.pipeline_len + 1u64; + let unbond_end = + redelegation_end + params.withdrawable_epoch_offset() + 1u64; + let unbond_materialized = redelegation_end + params.pipeline_len + 1u64; + + // Checks + let redelegated_remaining = delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(redelegated_remaining, amount_redelegate - amount_unbond); + + let redel_unbonded = delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .at(&redelegation_end) + .at(&unbond_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(redel_unbonded, amount_unbond); + + let total_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&unbond_materialized) + .at(&redelegation_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap(); + assert_eq!(total_redel_unbonded, amount_unbond); + + // Advance to withdrawal epoch + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == unbond_end { + break; + } + } + + // Withdraw + withdraw_tokens( + &mut storage, + Some(&delegator), + &dest_validator, + current_epoch, + ) + .unwrap(); + + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .at(&dest_validator) + .is_empty(&storage) + .unwrap() + ); + + let delegator_balance = storage + .read::(&token::balance_key(&staking_token, &delegator)) + .unwrap() + .unwrap_or_default(); + assert_eq!(delegator_balance, del_balance - amount_delegate); +} + +proptest! { + // Generate arb valid input for `test_chain_redelegations_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_chain_redelegations( + + genesis_validators in arb_genesis_validators(3..4, None), + + ) { + test_chain_redelegations_aux(genesis_validators) + } +} + +fn test_chain_redelegations_aux(mut validators: Vec) { + validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); + + let src_validator = validators[0].address.clone(); + let _init_stake_src = validators[0].tokens; + let dest_validator = validators[1].address.clone(); + let _init_stake_dest = validators[1].tokens; + let dest_validator_2 = validators[2].address.clone(); + let _init_stake_dest_2 = validators[2].tokens; + + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Delegate in epoch 0 to src_validator + let bond_amount: token::Amount = 100.into(); + bond_tokens( + &mut storage, + Some(&delegator), + &src_validator, + bond_amount, + current_epoch, + None, + ) + .unwrap(); + + let bond_start = current_epoch + params.pipeline_len; + + // Advance one epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate in epoch 1 to dest_validator + let redel_amount_1: token::Amount = 58.into(); + redelegate_tokens( + &mut storage, + &delegator, + &src_validator, + &dest_validator, + current_epoch, + redel_amount_1, + ) + .unwrap(); + + let redel_start = current_epoch; + let redel_end = current_epoch + params.pipeline_len; + + // Checks ---------------- + + // Dest validator should have an incoming redelegation + let incoming_redelegation = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation, Some(redel_end)); + + // Src validator should have an outoging redelegation + let outgoing_redelegation = + validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(&bond_start) + .get(&storage, &redel_start) + .unwrap(); + assert_eq!(outgoing_redelegation, Some(redel_amount_1)); + + // Delegator should have redelegated bonds + let del_total_redelegated_bonded = + delegator_redelegated_bonds_handle(&delegator) + .at(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(del_total_redelegated_bonded, redel_amount_1); + + // There should be delegator bonds for both src and dest validators + let bonded_src = bond_handle(&delegator, &src_validator); + let bonded_dest = bond_handle(&delegator, &dest_validator); + assert_eq!( + bonded_src + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + assert_eq!( + bonded_dest + .get_delta_val(&storage, redel_end) + .unwrap() + .unwrap_or_default(), + redel_amount_1 + ); + + // The dest validator should have total redelegated bonded tokens + let dest_total_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(dest_total_redelegated_bonded, redel_amount_1); + + // The dest validator's total bonded should have an entry for the genesis + // bond and the redelegation + let dest_total_bonded = total_bonded_handle(&dest_validator) + .get_data_handler() + .collect_map(&storage) + .unwrap(); + assert!( + dest_total_bonded.len() == 2 + && dest_total_bonded.contains_key(&Epoch::default()) + ); + assert_eq!( + dest_total_bonded + .get(&redel_end) + .cloned() + .unwrap_or_default(), + redel_amount_1 + ); + + // The src validator should have a total bonded entry for the original bond + // accounting for the redelegation + assert_eq!( + total_bonded_handle(&src_validator) + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + + // The src validator should have a total unbonded entry due to the + // redelegation + let src_total_unbonded = total_unbonded_handle(&src_validator) + .at(&redel_end) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(src_total_unbonded, redel_amount_1); + + // Attempt to redelegate in epoch 3 to dest_validator + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + let redel_amount_2: token::Amount = 23.into(); + let redel_att = redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ); + assert!(redel_att.is_err()); + + // Advance to right before the redelegation can be redelegated again + assert_eq!(redel_end, current_epoch); + let epoch_can_redel = + redel_end.prev() + params.slash_processing_epoch_offset(); + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == epoch_can_redel.prev() { + break; + } + } + + // Attempt to redelegate in epoch before we actually are able to + let redel_att = redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ); + assert!(redel_att.is_err()); + + // Advance one more epoch + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Redelegate from dest_validator to dest_validator_2 now + redelegate_tokens( + &mut storage, + &delegator, + &dest_validator, + &dest_validator_2, + current_epoch, + redel_amount_2, + ) + .unwrap(); + + let redel_2_start = current_epoch; + let redel_2_end = current_epoch + params.pipeline_len; + + // Checks ----------------------------------- + + // Both the dest validator and dest validator 2 should have incoming + // redelegations + let incoming_redelegation_1 = + validator_incoming_redelegations_handle(&dest_validator) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation_1, Some(redel_end)); + let incoming_redelegation_2 = + validator_incoming_redelegations_handle(&dest_validator_2) + .get(&storage, &delegator) + .unwrap(); + assert_eq!(incoming_redelegation_2, Some(redel_2_end)); + + // Both the src validator and dest validator should have outgoing + // redelegations + let outgoing_redelegation_1 = + validator_outgoing_redelegations_handle(&src_validator) + .at(&dest_validator) + .at(&bond_start) + .get(&storage, &redel_start) + .unwrap(); + assert_eq!(outgoing_redelegation_1, Some(redel_amount_1)); + + let outgoing_redelegation_2 = + validator_outgoing_redelegations_handle(&dest_validator) + .at(&dest_validator_2) + .at(&redel_end) + .get(&storage, &redel_2_start) + .unwrap(); + assert_eq!(outgoing_redelegation_2, Some(redel_amount_2)); + + // All three validators should have bonds + let bonded_dest2 = bond_handle(&delegator, &dest_validator_2); + assert_eq!( + bonded_src + .get_delta_val(&storage, bond_start) + .unwrap() + .unwrap_or_default(), + bond_amount - redel_amount_1 + ); + assert_eq!( + bonded_dest + .get_delta_val(&storage, redel_end) + .unwrap() + .unwrap_or_default(), + redel_amount_1 - redel_amount_2 + ); + assert_eq!( + bonded_dest2 + .get_delta_val(&storage, redel_2_end) + .unwrap() + .unwrap_or_default(), + redel_amount_2 + ); + + // There should be no unbond entries + let unbond_src = unbond_handle(&delegator, &src_validator); + let unbond_dest = unbond_handle(&delegator, &dest_validator); + assert!(unbond_src.is_empty(&storage).unwrap()); + assert!(unbond_dest.is_empty(&storage).unwrap()); + + // The dest validator should have some total unbonded due to the second + // redelegation + let dest_total_unbonded = total_unbonded_handle(&dest_validator) + .at(&redel_2_end) + .get(&storage, &redel_end) + .unwrap(); + assert_eq!(dest_total_unbonded, Some(redel_amount_2)); + + // Delegator should have redelegated bonds due to both redelegations + let del_redelegated_bonds = delegator_redelegated_bonds_handle(&delegator); + assert_eq!( + Some(redel_amount_1 - redel_amount_2), + del_redelegated_bonds + .at(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + ); + assert_eq!( + Some(redel_amount_2), + del_redelegated_bonds + .at(&dest_validator_2) + .at(&redel_2_end) + .at(&dest_validator) + .get(&storage, &redel_end) + .unwrap() + ); + + // Delegator redelegated unbonds should be empty + assert!( + delegator_redelegated_unbonds_handle(&delegator) + .is_empty(&storage) + .unwrap() + ); + + // Both the dest validator and dest validator 2 should have total + // redelegated bonds + let dest_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + let dest2_redelegated_bonded = + validator_total_redelegated_bonded_handle(&dest_validator_2) + .at(&redel_2_end) + .at(&dest_validator) + .get(&storage, &redel_end) + .unwrap() + .unwrap_or_default(); + assert_eq!(dest_redelegated_bonded, redel_amount_1 - redel_amount_2); + assert_eq!(dest2_redelegated_bonded, redel_amount_2); + + // Total redelegated unbonded should be empty for src_validator and + // dest_validator_2 + assert!( + validator_total_redelegated_unbonded_handle(&dest_validator_2) + .is_empty(&storage) + .unwrap() + ); + assert!( + validator_total_redelegated_unbonded_handle(&src_validator) + .is_empty(&storage) + .unwrap() + ); + + // The dest_validator should have total_redelegated unbonded + let tot_redel_unbonded = + validator_total_redelegated_unbonded_handle(&dest_validator) + .at(&redel_2_end) + .at(&redel_end) + .at(&src_validator) + .get(&storage, &bond_start) + .unwrap() + .unwrap_or_default(); + assert_eq!(tot_redel_unbonded, redel_amount_2); +} + +proptest! { + // Generate arb valid input for `test_overslashing_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_overslashing( + + genesis_validators in arb_genesis_validators(4..5, None), + + ) { + test_overslashing_aux(genesis_validators) + } +} + +/// Test precisely that we are not overslashing, as originally discovered by Tomas in this issue: https://github.com/informalsystems/partnership-heliax/issues/74 +fn test_overslashing_aux(mut validators: Vec) { + assert_eq!(validators.len(), 4); + + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + let offending_stake = token::Amount::native_whole(110); + let other_stake = token::Amount::native_whole(100); + + // Set stakes so we know we will get a slashing rate between 0.5 -1.0 + validators[0].tokens = offending_stake; + validators[1].tokens = other_stake; + validators[2].tokens = other_stake; + validators[3].tokens = other_stake; + + // Get the offending validator + let validator = validators[0].address.clone(); + + // println!("\nTest inputs: {params:?}, genesis validators: + // {validators:#?}"); + let mut storage = TestWlStorage::default(); + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + // Get a delegator with some tokens + let staking_token = storage.storage.native_token.clone(); + let delegator = address::testing::gen_implicit_address(); + let amount_del = token::Amount::native_whole(5); + credit_tokens(&mut storage, &staking_token, &delegator, amount_del) + .unwrap(); + + // Delegate tokens in epoch 0 to validator + bond_tokens( + &mut storage, + Some(&delegator), + &validator, + amount_del, + current_epoch, + None, + ) + .unwrap(); + + let self_bond_epoch = current_epoch; + let delegation_epoch = current_epoch + params.pipeline_len; + + // Advance to pipeline epoch + for _ in 0..params.pipeline_len { + current_epoch = advance_epoch(&mut storage, ¶ms); + } + assert_eq!(delegation_epoch, current_epoch); + + // Find a misbehavior committed in epoch 0 + slash( + &mut storage, + ¶ms, + current_epoch, + self_bond_epoch, + 0_u64, + SlashType::DuplicateVote, + &validator, + current_epoch.next(), + ) + .unwrap(); + + // Find a misbehavior committed in current epoch + slash( + &mut storage, + ¶ms, + current_epoch, + delegation_epoch, + 0_u64, + SlashType::DuplicateVote, + &validator, + current_epoch.next(), + ) + .unwrap(); + + let processing_epoch_1 = + self_bond_epoch + params.slash_processing_epoch_offset(); + let processing_epoch_2 = + delegation_epoch + params.slash_processing_epoch_offset(); + + // Advance to processing epoch 1 + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == processing_epoch_1 { + break; + } + } + + let total_stake_1 = offending_stake + 3 * other_stake; + let stake_frac = Dec::from(offending_stake) / Dec::from(total_stake_1); + let slash_rate_1 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; + + let exp_slashed_1 = offending_stake.mul_ceil(slash_rate_1); + + // Check that the proper amount was slashed + let epoch = current_epoch.next(); + let validator_stake = + read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); + let exp_validator_stake = offending_stake - exp_slashed_1 + amount_del; + assert_eq!(validator_stake, exp_validator_stake); + + let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); + let exp_total_stake = + offending_stake - exp_slashed_1 + amount_del + 3 * other_stake; + assert_eq!(total_stake, exp_total_stake); + + let self_bond_id = BondId { + source: validator.clone(), + validator: validator.clone(), + }; + let bond_amount = + crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); + let exp_bond_amount = offending_stake - exp_slashed_1; + assert_eq!(bond_amount, exp_bond_amount); + + // Advance to processing epoch 2 + loop { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + if current_epoch == processing_epoch_2 { + break; + } + } + + let total_stake_2 = offending_stake + amount_del + 3 * other_stake; + let stake_frac = + Dec::from(offending_stake + amount_del) / Dec::from(total_stake_2); + let slash_rate_2 = Dec::from_str("9.0").unwrap() * stake_frac * stake_frac; + + let exp_slashed_from_delegation = amount_del.mul_ceil(slash_rate_2); + + // Check that the proper amount was slashed. We expect that all of the + // validator self-bond has been slashed and some of the delegation has been + // slashed due to the second infraction. + let epoch = current_epoch.next(); + + let validator_stake = + read_validator_stake(&storage, ¶ms, &validator, epoch).unwrap(); + let exp_validator_stake = amount_del - exp_slashed_from_delegation; + assert_eq!(validator_stake, exp_validator_stake); + + let total_stake = read_total_stake(&storage, ¶ms, epoch).unwrap(); + let exp_total_stake = + amount_del - exp_slashed_from_delegation + 3 * other_stake; + assert_eq!(total_stake, exp_total_stake); + + let delegation_id = BondId { + source: delegator.clone(), + validator: validator.clone(), + }; + let delegation_amount = + crate::bond_amount(&storage, &delegation_id, epoch).unwrap(); + let exp_del_amount = amount_del - exp_slashed_from_delegation; + assert_eq!(delegation_amount, exp_del_amount); + + let self_bond_amount = + crate::bond_amount(&storage, &self_bond_id, epoch).unwrap(); + let exp_bond_amount = token::Amount::zero(); + assert_eq!(self_bond_amount, exp_bond_amount); +} + +proptest! { + // Generate arb valid input for `test_slashed_bond_amount_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_slashed_bond_amount( + + genesis_validators in arb_genesis_validators(4..5, None), + + ) { + test_slashed_bond_amount_aux(genesis_validators) + } +} + +fn test_slashed_bond_amount_aux(validators: Vec) { + let mut storage = TestWlStorage::default(); + let params = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + let init_tot_stake = validators + .clone() + .into_iter() + .fold(token::Amount::zero(), |acc, v| acc + v.tokens); + let val1_init_stake = validators[0].tokens; + + let mut validators = validators; + validators[0].tokens = (init_tot_stake - val1_init_stake) / 30; + + // Genesis + let mut current_epoch = storage.storage.block.epoch; + let params = test_init_genesis( + &mut storage, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + storage.commit_block().unwrap(); + + let validator1 = validators[0].address.clone(); + let validator2 = validators[1].address.clone(); + + // Get a delegator with some tokens + let staking_token = staking_token_address(&storage); + let delegator = address::testing::gen_implicit_address(); + let del_balance = token::Amount::from_uint(1_000_000, 0).unwrap(); + credit_tokens(&mut storage, &staking_token, &delegator, del_balance) + .unwrap(); + + // Bond to validator 1 + bond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 10_000.into(), + current_epoch, + None, + ) + .unwrap(); + + // Unbond some from validator 1 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 1_342.into(), + current_epoch, + false, + ) + .unwrap(); + + // Redelegate some from validator 1 -> 2 + redelegate_tokens( + &mut storage, + &delegator, + &validator1, + &validator2, + current_epoch, + 1_875.into(), + ) + .unwrap(); + + // Unbond some from validator 2 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator2, + 584.into(), + current_epoch, + false, + ) + .unwrap(); + + // Advance an epoch to 1 + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Bond to validator 1 + bond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 384.into(), + current_epoch, + None, + ) + .unwrap(); + + // Unbond some from validator 1 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 144.into(), + current_epoch, + false, + ) + .unwrap(); + + // Redelegate some from validator 1 -> 2 + redelegate_tokens( + &mut storage, + &delegator, + &validator1, + &validator2, + current_epoch, + 3_448.into(), + ) + .unwrap(); + + // Unbond some from validator 2 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator2, + 699.into(), + current_epoch, + false, + ) + .unwrap(); + + // Advance an epoch to ep 2 + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + + // Bond to validator 1 + bond_tokens( + &mut storage, + Some(&delegator), + &validator1, + 4_384.into(), + current_epoch, + None, + ) + .unwrap(); + + // Redelegate some from validator 1 -> 2 + redelegate_tokens( + &mut storage, + &delegator, + &validator1, + &validator2, + current_epoch, + 1_008.into(), + ) + .unwrap(); + + // Unbond some from validator 2 + unbond_tokens( + &mut storage, + Some(&delegator), + &validator2, + 3_500.into(), + current_epoch, + false, + ) + .unwrap(); + + // Advance two epochs to ep 4 + for _ in 0..2 { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + } + + // Find some slashes committed in various epochs + slash( + &mut storage, + ¶ms, + current_epoch, + Epoch(1), + 1_u64, + SlashType::DuplicateVote, + &validator1, + current_epoch, + ) + .unwrap(); + slash( + &mut storage, + ¶ms, + current_epoch, + Epoch(2), + 1_u64, + SlashType::DuplicateVote, + &validator1, + current_epoch, + ) + .unwrap(); + slash( + &mut storage, + ¶ms, + current_epoch, + Epoch(2), + 1_u64, + SlashType::DuplicateVote, + &validator1, + current_epoch, + ) + .unwrap(); + slash( + &mut storage, + ¶ms, + current_epoch, + Epoch(3), + 1_u64, + SlashType::DuplicateVote, + &validator1, + current_epoch, + ) + .unwrap(); + + // Advance such that these slashes are all processed + for _ in 0..params.slash_processing_epoch_offset() { + current_epoch = advance_epoch(&mut storage, ¶ms); + process_slashes(&mut storage, current_epoch).unwrap(); + } + + let pipeline_epoch = current_epoch + params.pipeline_len; + + let del_bond_amount = crate::bond_amount( + &storage, + &BondId { + source: delegator.clone(), + validator: validator1.clone(), + }, + pipeline_epoch, + ) + .unwrap_or_default(); + + let self_bond_amount = crate::bond_amount( + &storage, + &BondId { + source: validator1.clone(), + validator: validator1.clone(), + }, + pipeline_epoch, + ) + .unwrap_or_default(); + + let val_stake = crate::read_validator_stake( + &storage, + ¶ms, + &validator1, + pipeline_epoch, + ) + .unwrap(); + + let diff = val_stake - self_bond_amount - del_bond_amount; + assert!(diff <= 2.into()); +} diff --git a/proof_of_stake/src/tests/test_validator.rs b/proof_of_stake/src/tests/test_validator.rs new file mode 100644 index 0000000000..418c9527e5 --- /dev/null +++ b/proof_of_stake/src/tests/test_validator.rs @@ -0,0 +1,1308 @@ +use std::cmp::min; + +use namada_core::ledger::storage::testing::TestWlStorage; +use namada_core::ledger::storage::WlStorage; +use namada_core::ledger::storage_api::collections::lazy_map; +use namada_core::ledger::storage_api::token::credit_tokens; +use namada_core::types::address::testing::arb_established_address; +use namada_core::types::address::{self, Address, EstablishedAddressGen}; +use namada_core::types::dec::Dec; +use namada_core::types::key::testing::{ + arb_common_keypair, common_sk_from_simple_seed, +}; +use namada_core::types::key::{self, common, RefTo}; +use namada_core::types::storage::Epoch; +use namada_core::types::token; +use proptest::prelude::*; +use proptest::test_runner::Config; +// Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to see +// `tracing` logs from tests +use test_log::test; + +use crate::epoched::DEFAULT_NUM_PAST_EPOCHS; +use crate::storage::{ + below_capacity_validator_set_handle, bond_handle, + consensus_validator_set_handle, find_validator_by_raw_hash, + get_num_consensus_validators, + read_below_capacity_validator_set_addresses_with_stake, + read_below_threshold_validator_set_addresses, + read_consensus_validator_set_addresses_with_stake, update_validator_deltas, + validator_addresses_handle, validator_consensus_key_handle, + validator_set_positions_handle, write_validator_address_raw_hash, +}; +use crate::test_utils::{init_genesis_helper, test_init_genesis}; +use crate::tests::helpers::{ + advance_epoch, arb_genesis_validators, arb_params_and_genesis_validators, + get_tendermint_set_updates, +}; +use crate::types::{ + into_tm_voting_power, ConsensusValidator, GenesisValidator, Position, + ReverseOrdTokenAmount, ValidatorSetUpdate, WeightedValidator, +}; +use crate::validator_set_update::{ + insert_validator_into_validator_set, update_validator_set, +}; +use crate::{ + become_validator, bond_tokens, is_validator, staking_token_address, + unbond_tokens, withdraw_tokens, BecomeValidator, OwnedPosParams, +}; + +proptest! { + // Generate arb valid input for `test_become_validator_aux` + #![proptest_config(Config { + cases: 100, + .. Config::default() + })] + #[test] + fn test_become_validator( + + (pos_params, genesis_validators) in arb_params_and_genesis_validators(Some(5), 1..3), + new_validator in arb_established_address().prop_map(Address::Established), + new_validator_consensus_key in arb_common_keypair(), + + ) { + test_become_validator_aux(pos_params, new_validator, + new_validator_consensus_key, genesis_validators) + } +} + +/// Test validator initialization. +fn test_become_validator_aux( + params: OwnedPosParams, + new_validator: Address, + new_validator_consensus_key: common::SecretKey, + validators: Vec, +) { + // println!( + // "Test inputs: {params:?}, new validator: {new_validator}, genesis \ + // validators: {validators:#?}" + // ); + + let mut s = TestWlStorage::default(); + + // Genesis + let mut current_epoch = s.storage.block.epoch; + let params = test_init_genesis( + &mut s, + params, + validators.clone().into_iter(), + current_epoch, + ) + .unwrap(); + s.commit_block().unwrap(); + + // Advance to epoch 1 + current_epoch = advance_epoch(&mut s, ¶ms); + + let num_consensus_before = + get_num_consensus_validators(&s, current_epoch + params.pipeline_len) + .unwrap(); + let num_validators_over_thresh = validators + .iter() + .filter(|validator| { + validator.tokens >= params.validator_stake_threshold + }) + .count(); + + assert_eq!( + min( + num_validators_over_thresh as u64, + params.max_validator_slots + ), + num_consensus_before + ); + assert!(!is_validator(&s, &new_validator).unwrap()); + + // Credit the `new_validator` account + let staking_token = staking_token_address(&s); + let amount = token::Amount::from_uint(100_500_000, 0).unwrap(); + // Credit twice the amount as we're gonna bond it in delegation first, then + // self-bond + credit_tokens(&mut s, &staking_token, &new_validator, amount * 2).unwrap(); + + // Add a delegation from `new_validator` to `genesis_validator` + let genesis_validator = &validators.first().unwrap().address; + bond_tokens( + &mut s, + Some(&new_validator), + genesis_validator, + amount, + current_epoch, + None, + ) + .unwrap(); + + let consensus_key = new_validator_consensus_key.to_public(); + let protocol_sk = common_sk_from_simple_seed(0); + let protocol_key = protocol_sk.to_public(); + let eth_hot_key = key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::().ref_to(), + ); + let eth_cold_key = key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::().ref_to(), + ); + + // Try to become a validator - it should fail as there is a delegation + let result = become_validator( + &mut s, + BecomeValidator { + params: ¶ms, + address: &new_validator, + consensus_key: &consensus_key, + protocol_key: &protocol_key, + eth_cold_key: ð_cold_key, + eth_hot_key: ð_hot_key, + current_epoch, + commission_rate: Dec::new(5, 2).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(5, 2) + .expect("Dec creation failed"), + metadata: Default::default(), + offset_opt: None, + }, + ); + assert!(result.is_err()); + assert!(!is_validator(&s, &new_validator).unwrap()); + + // Unbond the delegation + unbond_tokens( + &mut s, + Some(&new_validator), + genesis_validator, + amount, + current_epoch, + false, + ) + .unwrap(); + + // Try to become a validator account again - it should pass now + become_validator( + &mut s, + BecomeValidator { + params: ¶ms, + address: &new_validator, + consensus_key: &consensus_key, + protocol_key: &protocol_key, + eth_cold_key: ð_cold_key, + eth_hot_key: ð_hot_key, + current_epoch, + commission_rate: Dec::new(5, 2).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(5, 2) + .expect("Dec creation failed"), + metadata: Default::default(), + offset_opt: None, + }, + ) + .unwrap(); + assert!(is_validator(&s, &new_validator).unwrap()); + + let num_consensus_after = + get_num_consensus_validators(&s, current_epoch + params.pipeline_len) + .unwrap(); + // The new validator is initialized with no stake and thus is in the + // below-threshold set + assert_eq!(num_consensus_before, num_consensus_after); + + // Advance to epoch 2 + current_epoch = advance_epoch(&mut s, ¶ms); + + // Self-bond to the new validator + bond_tokens(&mut s, None, &new_validator, amount, current_epoch, None) + .unwrap(); + + // Check the bond delta + let bond_handle = bond_handle(&new_validator, &new_validator); + let pipeline_epoch = current_epoch + params.pipeline_len; + let delta = bond_handle.get_delta_val(&s, pipeline_epoch).unwrap(); + assert_eq!(delta, Some(amount)); + + // Check the validator in the validator set - + // If the consensus validator slots are full and all the genesis validators + // have stake GTE the new validator's self-bond amount, the validator should + // be added to the below-capacity set, or the consensus otherwise + if params.max_validator_slots <= validators.len() as u64 + && validators + .iter() + .all(|validator| validator.tokens >= amount) + { + let set = read_below_capacity_validator_set_addresses_with_stake( + &s, + pipeline_epoch, + ) + .unwrap(); + assert!(set.into_iter().any( + |WeightedValidator { + bonded_stake, + address, + }| { + address == new_validator && bonded_stake == amount + } + )); + } else { + let set = read_consensus_validator_set_addresses_with_stake( + &s, + pipeline_epoch, + ) + .unwrap(); + assert!(set.into_iter().any( + |WeightedValidator { + bonded_stake, + address, + }| { + address == new_validator && bonded_stake == amount + } + )); + } + + // Advance to epoch 3 + current_epoch = advance_epoch(&mut s, ¶ms); + + // Unbond the self-bond + unbond_tokens(&mut s, None, &new_validator, amount, current_epoch, false) + .unwrap(); + + let withdrawable_offset = params.unbonding_len + params.pipeline_len; + + // Advance to withdrawable epoch + for _ in 0..withdrawable_offset { + current_epoch = advance_epoch(&mut s, ¶ms); + } + + // Withdraw the self-bond + withdraw_tokens(&mut s, None, &new_validator, current_epoch).unwrap(); +} + +#[test] +fn test_validator_raw_hash() { + let mut storage = TestWlStorage::default(); + let address = address::testing::established_address_1(); + let consensus_sk = key::testing::keypair_1(); + let consensus_pk = consensus_sk.to_public(); + let expected_raw_hash = key::tm_consensus_key_raw_hash(&consensus_pk); + + assert!( + find_validator_by_raw_hash(&storage, &expected_raw_hash) + .unwrap() + .is_none() + ); + write_validator_address_raw_hash(&mut storage, &address, &consensus_pk) + .unwrap(); + let found = + find_validator_by_raw_hash(&storage, &expected_raw_hash).unwrap(); + assert_eq!(found, Some(address)); +} + +#[test] +fn test_validator_sets() { + let mut s = TestWlStorage::default(); + // Only 3 consensus validator slots + let params = OwnedPosParams { + max_validator_slots: 3, + ..Default::default() + }; + let addr_seed = "seed"; + let mut address_gen = EstablishedAddressGen::new(addr_seed); + let mut sk_seed = 0; + let mut gen_validator = || { + let res = ( + address_gen.generate_address(addr_seed), + key::testing::common_sk_from_simple_seed(sk_seed).to_public(), + ); + // bump the sk seed + sk_seed += 1; + res + }; + + // Create genesis validators + let ((val1, pk1), stake1) = + (gen_validator(), token::Amount::native_whole(1)); + let ((val2, pk2), stake2) = + (gen_validator(), token::Amount::native_whole(1)); + let ((val3, pk3), stake3) = + (gen_validator(), token::Amount::native_whole(10)); + let ((val4, pk4), stake4) = + (gen_validator(), token::Amount::native_whole(1)); + let ((val5, pk5), stake5) = + (gen_validator(), token::Amount::native_whole(100)); + let ((val6, pk6), stake6) = + (gen_validator(), token::Amount::native_whole(1)); + let ((val7, pk7), stake7) = + (gen_validator(), token::Amount::native_whole(1)); + // println!("\nval1: {val1}, {pk1}, {}", stake1.to_string_native()); + // println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); + // println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); + // println!("val4: {val4}, {pk4}, {}", stake4.to_string_native()); + // println!("val5: {val5}, {pk5}, {}", stake5.to_string_native()); + // println!("val6: {val6}, {pk6}, {}", stake6.to_string_native()); + // println!("val7: {val7}, {pk7}, {}", stake7.to_string_native()); + + let start_epoch = Epoch::default(); + let epoch = start_epoch; + + let protocol_sk_1 = common_sk_from_simple_seed(0); + let protocol_sk_2 = common_sk_from_simple_seed(1); + + let params = test_init_genesis( + &mut s, + params, + [ + GenesisValidator { + address: val1.clone(), + tokens: stake1, + consensus_key: pk1.clone(), + protocol_key: protocol_sk_1.to_public(), + eth_hot_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + eth_cold_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + commission_rate: Dec::new(1, 1).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(1, 1) + .expect("Dec creation failed"), + metadata: Default::default(), + }, + GenesisValidator { + address: val2.clone(), + tokens: stake2, + consensus_key: pk2.clone(), + protocol_key: protocol_sk_2.to_public(), + eth_hot_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + eth_cold_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + commission_rate: Dec::new(1, 1).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(1, 1) + .expect("Dec creation failed"), + metadata: Default::default(), + }, + ] + .into_iter(), + epoch, + ) + .unwrap(); + + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &common::PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas(s, ¶ms, addr, stake.change(), epoch, None) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + + // Advance to EPOCH 1 + // + // We cannot call `get_tendermint_set_updates` for the genesis state as + // `validator_set_update_tendermint` is only called 2 blocks before the + // start of an epoch and so we need to give it a predecessor epoch (see + // `get_tendermint_set_updates`), which we cannot have on the first + // epoch. In any way, the initial validator set is given to Tendermint + // from InitChain, so `validator_set_update_tendermint` is + // not being used for it. + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Insert another validator with the greater stake 10 NAM + insert_validator(&mut s, &val3, &pk3, stake3, epoch); + // Insert validator with stake 1 NAM + insert_validator(&mut s, &val4, &pk4, stake4, epoch); + + // Validator `val3` and `val4` will be added at pipeline offset (2) - epoch + // 3 + let val3_and_4_epoch = pipeline_epoch; + + let consensus_vals: Vec<_> = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(consensus_vals.len(), 3); + assert!(matches!( + &consensus_vals[0], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val1 && stake == &stake1 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[1], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) + )); + assert!(matches!( + &consensus_vals[2], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val3 && stake == &stake3 && *position == Position(0) + )); + + // Check tendermint validator set updates - there should be none + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert!(tm_updates.is_empty()); + + // Advance to EPOCH 2 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Insert another validator with a greater stake still 1000 NAM. It should + // replace 2nd consensus validator with stake 1, which should become + // below-capacity + insert_validator(&mut s, &val5, &pk5, stake5, epoch); + // Validator `val5` will be added at pipeline offset (2) - epoch 4 + let val5_epoch = pipeline_epoch; + + let consensus_vals: Vec<_> = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(consensus_vals.len(), 3); + assert!(matches!( + &consensus_vals[0], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val1 && stake == &stake1 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[1], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val3 && stake == &stake3 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[2], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val5 && stake == &stake5 && *position == Position(0) + )); + + let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(below_capacity_vals.len(), 2); + assert!(matches!( + &below_capacity_vals[0], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val4 && stake == &stake4 && *position == Position(0) + )); + assert!(matches!( + &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) + )); + + // Advance to EPOCH 3 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!( + val3_and_4_epoch, epoch, + "val3 and val4 are in the validator sets now" + ); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + // `val4` is newly added below-capacity, must be skipped in updated in TM + assert_eq!(tm_updates.len(), 1); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk3, + bonded_stake: stake3, + }) + ); + + // Insert another validator with a stake 1 NAM. It should be added to the + // below-capacity set + insert_validator(&mut s, &val6, &pk6, stake6, epoch); + // Validator `val6` will be added at pipeline offset (2) - epoch 5 + let val6_epoch = pipeline_epoch; + + let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(below_capacity_vals.len(), 3); + assert!(matches!( + &below_capacity_vals[0], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val4 && stake == &stake4 && *position == Position(0) + )); + assert!(matches!( + &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) + )); + assert!(matches!( + &below_capacity_vals[2], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val6 && stake == &stake6 && *position == Position(2) + )); + + // Advance to EPOCH 4 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!(val5_epoch, epoch, "val5 is in the validator sets now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert_eq!(tm_updates.len(), 2); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk5, + bonded_stake: stake5, + }) + ); + assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk2)); + + // Unbond some stake from val1, it should be be swapped with the greatest + // below-capacity validator val2 into the below-capacity set. The stake of + // val1 will go below 1 NAM, which is the validator_stake_threshold, so it + // will enter the below-threshold validator set. + let unbond = token::Amount::from_uint(500_000, 0).unwrap(); + // let stake1 = stake1 - unbond; + + // Because `update_validator_set` and `update_validator_deltas` are + // effective from pipeline offset, we use pipeline epoch for the rest of the + // checks + update_validator_set(&mut s, ¶ms, &val1, -unbond.change(), epoch, None) + .unwrap(); + update_validator_deltas( + &mut s, + ¶ms, + &val1, + -unbond.change(), + epoch, + None, + ) + .unwrap(); + // Epoch 6 + let val1_unbond_epoch = pipeline_epoch; + + let consensus_vals: Vec<_> = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(consensus_vals.len(), 3); + assert!(matches!( + &consensus_vals[0], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val4 && stake == &stake4 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[1], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val3 && stake == &stake3 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[2], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val5 && stake == &stake5 && *position == Position(0) + )); + + let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(below_capacity_vals.len(), 2); + assert!(matches!( + &below_capacity_vals[0], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) + )); + assert!(matches!( + &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val6 && stake == &stake6 && *position == Position(2) + )); + + let below_threshold_vals = + read_below_threshold_validator_set_addresses(&s, pipeline_epoch) + .unwrap() + .into_iter() + .collect::>(); + + assert_eq!(below_threshold_vals.len(), 1); + assert_eq!(&below_threshold_vals[0], &val1); + + // Advance to EPOCH 5 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!(val6_epoch, epoch, "val6 is in the validator sets now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert!(tm_updates.is_empty()); + + // Insert another validator with stake 1 - it should be added to below + // capacity set + insert_validator(&mut s, &val7, &pk7, stake7, epoch); + // Epoch 7 + let val7_epoch = pipeline_epoch; + + let consensus_vals: Vec<_> = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(consensus_vals.len(), 3); + assert!(matches!( + &consensus_vals[0], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val4 && stake == &stake4 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[1], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val3 && stake == &stake3 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[2], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val5 && stake == &stake5 && *position == Position(0) + )); + + let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(below_capacity_vals.len(), 3); + assert!(matches!( + &below_capacity_vals[0], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) + )); + assert!(matches!( + &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val6 && stake == &stake6 && *position == Position(2) + )); + assert!(matches!( + &below_capacity_vals[2], + ( + lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, + address + ) + if address == &val7 && stake == &stake7 && *position == Position(3) + )); + + let below_threshold_vals = + read_below_threshold_validator_set_addresses(&s, pipeline_epoch) + .unwrap() + .into_iter() + .collect::>(); + + assert_eq!(below_threshold_vals.len(), 1); + assert_eq!(&below_threshold_vals[0], &val1); + + // Advance to EPOCH 6 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Check tendermint validator set updates + assert_eq!(val1_unbond_epoch, epoch, "val1's unbond is applied now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert_eq!(tm_updates.len(), 2); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk4.clone(), + bonded_stake: stake4, + }) + ); + assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk1)); + + // Bond some stake to val6, it should be be swapped with the lowest + // consensus validator val2 into the consensus set + let bond = token::Amount::from_uint(500_000, 0).unwrap(); + let stake6 = stake6 + bond; + + update_validator_set(&mut s, ¶ms, &val6, bond.change(), epoch, None) + .unwrap(); + update_validator_deltas(&mut s, ¶ms, &val6, bond.change(), epoch, None) + .unwrap(); + let val6_bond_epoch = pipeline_epoch; + + let consensus_vals: Vec<_> = consensus_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(consensus_vals.len(), 3); + assert!(matches!( + &consensus_vals[0], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val6 && stake == &stake6 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[1], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val3 && stake == &stake3 && *position == Position(0) + )); + assert!(matches!( + &consensus_vals[2], + (lazy_map::NestedSubKey::Data { + key: stake, + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val5 && stake == &stake5 && *position == Position(0) + )); + + let below_capacity_vals: Vec<_> = below_capacity_validator_set_handle() + .at(&pipeline_epoch) + .iter(&s) + .unwrap() + .map(Result::unwrap) + .collect(); + + assert_eq!(below_capacity_vals.len(), 3); + + assert!(matches!( + &below_capacity_vals[0], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val2 && stake == &stake2 && *position == Position(1) + )); + assert!(matches!( + &below_capacity_vals[1], + (lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, address) + if address == &val7 && stake == &stake7 && *position == Position(3) + )); + assert!(matches!( + &below_capacity_vals[2], + ( + lazy_map::NestedSubKey::Data { + key: ReverseOrdTokenAmount(stake), + nested_sub_key: lazy_map::SubKey::Data(position), + }, + address + ) + if address == &val4 && stake == &stake4 && *position == Position(4) + )); + + let below_threshold_vals = + read_below_threshold_validator_set_addresses(&s, pipeline_epoch) + .unwrap() + .into_iter() + .collect::>(); + + assert_eq!(below_threshold_vals.len(), 1); + assert_eq!(&below_threshold_vals[0], &val1); + + // Advance to EPOCH 7 + let epoch = advance_epoch(&mut s, ¶ms); + assert_eq!(val7_epoch, epoch, "val6 is in the validator sets now"); + + // Check tendermint validator set updates + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + assert!(tm_updates.is_empty()); + + // Advance to EPOCH 8 + let epoch = advance_epoch(&mut s, ¶ms); + + // Check tendermint validator set updates + assert_eq!(val6_bond_epoch, epoch, "val5's bond is applied now"); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + // dbg!(&tm_updates); + assert_eq!(tm_updates.len(), 2); + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk6, + bonded_stake: stake6, + }) + ); + assert_eq!(tm_updates[1], ValidatorSetUpdate::Deactivated(pk4)); + + // Check that the below-capacity validator set was purged for the old epochs + // but that the consensus_validator_set was not + let last_epoch = epoch; + for e in Epoch::iter_bounds_inclusive( + start_epoch, + last_epoch + .sub_or_default(Epoch(DEFAULT_NUM_PAST_EPOCHS)) + .sub_or_default(Epoch(1)), + ) { + assert!( + !consensus_validator_set_handle() + .at(&e) + .is_empty(&s) + .unwrap() + ); + assert!( + below_capacity_validator_set_handle() + .at(&e) + .is_empty(&s) + .unwrap() + ); + } +} + +/// When a consensus set validator with 0 voting power adds a bond in the same +/// epoch as another below-capacity set validator with 0 power, but who adds +/// more bonds than the validator who is in the consensus set, they get swapped +/// in the sets. But if both of their new voting powers are still 0 after +/// bonding, the newly below-capacity validator must not be given to tendermint +/// with 0 voting power, because it wasn't it its set before +#[test] +fn test_validator_sets_swap() { + let mut s = TestWlStorage::default(); + // Only 2 consensus validator slots + let params = OwnedPosParams { + max_validator_slots: 2, + // Set the stake threshold to 0 so no validators are in the + // below-threshold set + validator_stake_threshold: token::Amount::zero(), + // Set 0.1 votes per token + tm_votes_per_token: Dec::new(1, 1).expect("Dec creation failed"), + ..Default::default() + }; + + let addr_seed = "seed"; + let mut address_gen = EstablishedAddressGen::new(addr_seed); + let mut sk_seed = 0; + let mut gen_validator = || { + let res = ( + address_gen.generate_address(addr_seed), + key::testing::common_sk_from_simple_seed(sk_seed).to_public(), + ); + // bump the sk seed + sk_seed += 1; + res + }; + + // Start with two genesis validators, one with 1 voting power and other 0 + let epoch = Epoch::default(); + // 1M voting power + let ((val1, pk1), stake1) = + (gen_validator(), token::Amount::native_whole(10)); + // 0 voting power + let ((val2, pk2), stake2) = + (gen_validator(), token::Amount::from_uint(5, 0).unwrap()); + // 0 voting power + let ((val3, pk3), stake3) = + (gen_validator(), token::Amount::from_uint(5, 0).unwrap()); + // println!("val1: {val1}, {pk1}, {}", stake1.to_string_native()); + // println!("val2: {val2}, {pk2}, {}", stake2.to_string_native()); + // println!("val3: {val3}, {pk3}, {}", stake3.to_string_native()); + + let protocol_sk_1 = common_sk_from_simple_seed(0); + let protocol_sk_2 = common_sk_from_simple_seed(1); + + let params = test_init_genesis( + &mut s, + params, + [ + GenesisValidator { + address: val1, + tokens: stake1, + consensus_key: pk1, + protocol_key: protocol_sk_1.to_public(), + eth_hot_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + eth_cold_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + commission_rate: Dec::new(1, 1).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(1, 1) + .expect("Dec creation failed"), + metadata: Default::default(), + }, + GenesisValidator { + address: val2.clone(), + tokens: stake2, + consensus_key: pk2, + protocol_key: protocol_sk_2.to_public(), + eth_hot_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + eth_cold_key: key::common::PublicKey::Secp256k1( + key::testing::gen_keypair::() + .ref_to(), + ), + commission_rate: Dec::new(1, 1).expect("Dec creation failed"), + max_commission_rate_change: Dec::new(1, 1) + .expect("Dec creation failed"), + metadata: Default::default(), + }, + ] + .into_iter(), + epoch, + ) + .unwrap(); + + // A helper to insert a non-genesis validator + let insert_validator = |s: &mut TestWlStorage, + addr, + pk: &common::PublicKey, + stake: token::Amount, + epoch: Epoch| { + insert_validator_into_validator_set( + s, + ¶ms, + addr, + stake, + epoch, + params.pipeline_len, + ) + .unwrap(); + + update_validator_deltas(s, ¶ms, addr, stake.change(), epoch, None) + .unwrap(); + + // Set their consensus key (needed for + // `validator_set_update_tendermint` fn) + validator_consensus_key_handle(addr) + .set(s, pk.clone(), epoch, params.pipeline_len) + .unwrap(); + }; + + // Advance to EPOCH 1 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Insert another validator with 0 voting power + insert_validator(&mut s, &val3, &pk3, stake3, epoch); + + assert_eq!(stake2, stake3); + + // Add 2 bonds, one for val2 and greater one for val3 + let bonds_epoch_1 = pipeline_epoch; + let bond2 = token::Amount::from_uint(1, 0).unwrap(); + let stake2 = stake2 + bond2; + let bond3 = token::Amount::from_uint(4, 0).unwrap(); + let stake3 = stake3 + bond3; + + assert!(stake2 < stake3); + assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake2), 0); + assert_eq!(into_tm_voting_power(params.tm_votes_per_token, stake3), 0); + + update_validator_set(&mut s, ¶ms, &val2, bond2.change(), epoch, None) + .unwrap(); + update_validator_deltas( + &mut s, + ¶ms, + &val2, + bond2.change(), + epoch, + None, + ) + .unwrap(); + + update_validator_set(&mut s, ¶ms, &val3, bond3.change(), epoch, None) + .unwrap(); + update_validator_deltas( + &mut s, + ¶ms, + &val3, + bond3.change(), + epoch, + None, + ) + .unwrap(); + + // Advance to EPOCH 2 + let epoch = advance_epoch(&mut s, ¶ms); + let pipeline_epoch = epoch + params.pipeline_len; + + // Add 2 more bonds, same amount for `val2` and val3` + let bonds_epoch_2 = pipeline_epoch; + let bonds = token::Amount::native_whole(1); + let stake2 = stake2 + bonds; + let stake3 = stake3 + bonds; + assert!(stake2 < stake3); + assert_eq!( + into_tm_voting_power(params.tm_votes_per_token, stake2), + into_tm_voting_power(params.tm_votes_per_token, stake3) + ); + + update_validator_set(&mut s, ¶ms, &val2, bonds.change(), epoch, None) + .unwrap(); + update_validator_deltas( + &mut s, + ¶ms, + &val2, + bonds.change(), + epoch, + None, + ) + .unwrap(); + + update_validator_set(&mut s, ¶ms, &val3, bonds.change(), epoch, None) + .unwrap(); + update_validator_deltas( + &mut s, + ¶ms, + &val3, + bonds.change(), + epoch, + None, + ) + .unwrap(); + + // Advance to EPOCH 3 + let epoch = advance_epoch(&mut s, ¶ms); + + // Check tendermint validator set updates + assert_eq!(bonds_epoch_1, epoch); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + // `val2` must not be given to tendermint - even though it was in the + // consensus set, its voting power was 0, so it wasn't in TM set before the + // bond + assert!(tm_updates.is_empty()); + + // Advance to EPOCH 4 + let epoch = advance_epoch(&mut s, ¶ms); + + // Check tendermint validator set updates + assert_eq!(bonds_epoch_2, epoch); + let tm_updates = get_tendermint_set_updates(&s, ¶ms, epoch); + // dbg!(&tm_updates); + assert_eq!(tm_updates.len(), 1); + // `val2` must not be given to tendermint as it was and still is below + // capacity + assert_eq!( + tm_updates[0], + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: pk3, + bonded_stake: stake3, + }) + ); +} + +proptest! { + // Generate arb valid input for `test_purge_validator_information_aux` + #![proptest_config(Config { + cases: 1, + .. Config::default() + })] + #[test] + fn test_purge_validator_information( + + genesis_validators in arb_genesis_validators(4..5, None), + + ) { + test_purge_validator_information_aux( genesis_validators) + } +} + +/// Test validator initialization. +fn test_purge_validator_information_aux(validators: Vec) { + let owned = OwnedPosParams { + unbonding_len: 4, + ..Default::default() + }; + + let mut s = TestWlStorage::default(); + let mut current_epoch = s.storage.block.epoch; + + // Genesis + let gov_params = + namada_core::ledger::governance::parameters::GovernanceParameters { + max_proposal_period: 5, + ..Default::default() + }; + + gov_params.init_storage(&mut s).unwrap(); + let params = crate::read_non_pos_owned_params(&s, owned).unwrap(); + init_genesis_helper(&mut s, ¶ms, validators.into_iter(), current_epoch) + .unwrap(); + + s.commit_block().unwrap(); + + let default_past_epochs = 2; + let consensus_val_set_len = + gov_params.max_proposal_period + default_past_epochs; + + let consensus_val_set = consensus_validator_set_handle(); + // let below_cap_val_set = below_capacity_validator_set_handle(); + let validator_positions = validator_set_positions_handle(); + let all_validator_addresses = validator_addresses_handle(); + + let check_is_data = |storage: &WlStorage<_, _>, + start: Epoch, + end: Epoch| { + for ep in Epoch::iter_bounds_inclusive(start, end) { + assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); + // assert!(!below_cap_val_set.at(&ep).is_empty(storage). + // unwrap()); + assert!(!validator_positions.at(&ep).is_empty(storage).unwrap()); + assert!( + !all_validator_addresses.at(&ep).is_empty(storage).unwrap() + ); + } + }; + + // Check that there is validator data for epochs 0 - pipeline_len + check_is_data(&s, current_epoch, Epoch(params.owned.pipeline_len)); + + // Advance to epoch 1 + for _ in 0..default_past_epochs { + current_epoch = advance_epoch(&mut s, ¶ms); + } + assert_eq!(s.storage.block.epoch.0, default_past_epochs); + assert_eq!(current_epoch.0, default_past_epochs); + + check_is_data( + &s, + Epoch(0), + Epoch(params.owned.pipeline_len + default_past_epochs), + ); + + current_epoch = advance_epoch(&mut s, ¶ms); + assert_eq!(current_epoch.0, default_past_epochs + 1); + + check_is_data( + &s, + Epoch(1), + Epoch(params.pipeline_len + default_past_epochs + 1), + ); + assert!(!consensus_val_set.at(&Epoch(0)).is_empty(&s).unwrap()); + assert!(validator_positions.at(&Epoch(0)).is_empty(&s).unwrap()); + assert!(all_validator_addresses.at(&Epoch(0)).is_empty(&s).unwrap()); + + // Advance to the epoch `consensus_val_set_len` + 1 + loop { + assert!(!consensus_val_set.at(&Epoch(0)).is_empty(&s).unwrap()); + + current_epoch = advance_epoch(&mut s, ¶ms); + if current_epoch.0 == consensus_val_set_len + 1 { + break; + } + } + + assert!(consensus_val_set.at(&Epoch(0)).is_empty(&s).unwrap()); + + current_epoch = advance_epoch(&mut s, ¶ms); + for ep in Epoch::default().iter_range(2) { + assert!(consensus_val_set.at(&ep).is_empty(&s).unwrap()); + } + for ep in Epoch::iter_bounds_inclusive( + Epoch(2), + current_epoch + params.pipeline_len, + ) { + assert!(!consensus_val_set.at(&ep).is_empty(&s).unwrap()); + } +} diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types/mod.rs similarity index 98% rename from proof_of_stake/src/types.rs rename to proof_of_stake/src/types/mod.rs index 0149f2d365..2d297bd72f 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types/mod.rs @@ -590,6 +590,17 @@ pub struct VoteInfo { pub validator_vp: u64, } +/// Temp: In quint this is from `ResultUnbondTx` field `resultSlashing: {sum: +/// int, epochMap: Epoch -> int}` +#[derive(Debug, Default)] +pub struct ResultSlashing { + /// The token amount unbonded from the validator stake after accounting for + /// slashes + pub sum: token::Amount, + /// Map from bond start epoch to token amount after slashing + pub epoch_map: BTreeMap, +} + /// Bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs. pub type BondsAndUnbondsDetails = HashMap; diff --git a/proof_of_stake/src/validator_set_update.rs b/proof_of_stake/src/validator_set_update.rs new file mode 100644 index 0000000000..9ad53c20b1 --- /dev/null +++ b/proof_of_stake/src/validator_set_update.rs @@ -0,0 +1,1069 @@ +//! Validator set updates + +use std::collections::{HashMap, HashSet}; + +use namada_core::ledger::storage_api::collections::lazy_map::{ + NestedSubKey, SubKey, +}; +use namada_core::ledger::storage_api::{self, StorageRead, StorageWrite}; +use namada_core::types::address::Address; +use namada_core::types::key::PublicKeyTmRawHash; +use namada_core::types::storage::Epoch; +use namada_core::types::token; +use once_cell::unsync::Lazy; + +use crate::storage::{ + below_capacity_validator_set_handle, consensus_validator_set_handle, + get_num_consensus_validators, read_validator_stake, + validator_addresses_handle, validator_consensus_key_handle, + validator_set_positions_handle, validator_state_handle, +}; +use crate::types::{ + into_tm_voting_power, BelowCapacityValidatorSet, ConsensusValidator, + ConsensusValidatorSet, Position, ReverseOrdTokenAmount, + ValidatorPositionAddresses, ValidatorSetUpdate, ValidatorState, +}; +use crate::PosParams; + +/// Update validator set at the pipeline epoch when a validator receives a new +/// bond and when its bond is unbonded (self-bond or delegation). +pub fn update_validator_set( + storage: &mut S, + params: &PosParams, + validator: &Address, + token_change: token::Change, + current_epoch: Epoch, + offset: Option, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + if token_change.is_zero() { + return Ok(()); + } + let offset = offset.unwrap_or(params.pipeline_len); + let epoch = current_epoch + offset; + tracing::debug!( + "Update epoch for validator set: {epoch}, validator: {validator}" + ); + let consensus_validator_set = consensus_validator_set_handle(); + let below_capacity_validator_set = below_capacity_validator_set_handle(); + + // Validator sets at the pipeline offset + let consensus_val_handle = consensus_validator_set.at(&epoch); + let below_capacity_val_handle = below_capacity_validator_set.at(&epoch); + + let tokens_pre = read_validator_stake(storage, params, validator, epoch)?; + + let tokens_post = tokens_pre + .change() + .checked_add(&token_change) + .expect("Post-validator set update token amount has overflowed"); + debug_assert!(tokens_post.non_negative()); + let tokens_post = token::Amount::from_change(tokens_post); + + // If token amounts both before and after the action are below the threshold + // stake, do nothing + if tokens_pre < params.validator_stake_threshold + && tokens_post < params.validator_stake_threshold + { + return Ok(()); + } + + // The position is only set when the validator is in consensus or + // below_capacity set (not in below_threshold set) + let position = + read_validator_set_position(storage, validator, epoch, params)?; + if let Some(position) = position { + let consensus_vals_pre = consensus_val_handle.at(&tokens_pre); + + let in_consensus = if consensus_vals_pre.contains(storage, &position)? { + let val_address = consensus_vals_pre.get(storage, &position)?; + debug_assert!(val_address.is_some()); + val_address == Some(validator.clone()) + } else { + false + }; + + if in_consensus { + // It's initially consensus + tracing::debug!("Target validator is consensus"); + + // First remove the consensus validator + consensus_vals_pre.remove(storage, &position)?; + + let max_below_capacity_validator_amount = + get_max_below_capacity_validator_amount( + &below_capacity_val_handle, + storage, + )? + .unwrap_or_default(); + + if tokens_post < params.validator_stake_threshold { + tracing::debug!( + "Demoting this validator to the below-threshold set" + ); + // Set the validator state as below-threshold + validator_state_handle(validator).set( + storage, + ValidatorState::BelowThreshold, + current_epoch, + offset, + )?; + + // Remove the validator's position from storage + validator_set_positions_handle() + .at(&epoch) + .remove(storage, validator)?; + + // Promote the next below-cap validator if there is one + if let Some(max_bc_amount) = + get_max_below_capacity_validator_amount( + &below_capacity_val_handle, + storage, + )? + { + // Remove the max below-capacity validator first + let below_capacity_vals_max = + below_capacity_val_handle.at(&max_bc_amount.into()); + let lowest_position = + find_first_position(&below_capacity_vals_max, storage)? + .unwrap(); + let removed_max_below_capacity = below_capacity_vals_max + .remove(storage, &lowest_position)? + .expect("Must have been removed"); + + // Insert the previous max below-capacity validator into the + // consensus set + insert_validator_into_set( + &consensus_val_handle.at(&max_bc_amount), + storage, + &epoch, + &removed_max_below_capacity, + )?; + validator_state_handle(&removed_max_below_capacity).set( + storage, + ValidatorState::Consensus, + current_epoch, + offset, + )?; + } + } else if tokens_post < max_below_capacity_validator_amount { + tracing::debug!( + "Demoting this validator to the below-capacity set and \ + promoting another to the consensus set" + ); + // Place the validator into the below-capacity set and promote + // the lowest position max below-capacity + // validator. + + // Remove the max below-capacity validator first + let below_capacity_vals_max = below_capacity_val_handle + .at(&max_below_capacity_validator_amount.into()); + let lowest_position = + find_first_position(&below_capacity_vals_max, storage)? + .unwrap(); + let removed_max_below_capacity = below_capacity_vals_max + .remove(storage, &lowest_position)? + .expect("Must have been removed"); + + // Insert the previous max below-capacity validator into the + // consensus set + insert_validator_into_set( + &consensus_val_handle + .at(&max_below_capacity_validator_amount), + storage, + &epoch, + &removed_max_below_capacity, + )?; + validator_state_handle(&removed_max_below_capacity).set( + storage, + ValidatorState::Consensus, + current_epoch, + offset, + )?; + + // Insert the current validator into the below-capacity set + insert_validator_into_set( + &below_capacity_val_handle.at(&tokens_post.into()), + storage, + &epoch, + validator, + )?; + validator_state_handle(validator).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + offset, + )?; + } else { + tracing::debug!("Validator remains in consensus set"); + // The current validator should remain in the consensus set - + // place it into a new position + insert_validator_into_set( + &consensus_val_handle.at(&tokens_post), + storage, + &epoch, + validator, + )?; + } + } else { + // It's initially below-capacity + tracing::debug!("Target validator is below-capacity"); + + let below_capacity_vals_pre = + below_capacity_val_handle.at(&tokens_pre.into()); + let removed = below_capacity_vals_pre.remove(storage, &position)?; + debug_assert!(removed.is_some()); + debug_assert_eq!(&removed.unwrap(), validator); + + let min_consensus_validator_amount = + get_min_consensus_validator_amount( + &consensus_val_handle, + storage, + )?; + + if tokens_post > min_consensus_validator_amount { + // Place the validator into the consensus set and demote the + // last position min consensus validator to the + // below-capacity set + tracing::debug!( + "Inserting validator into the consensus set and demoting \ + a consensus validator to the below-capacity set" + ); + + insert_into_consensus_and_demote_to_below_cap( + storage, + validator, + tokens_post, + min_consensus_validator_amount, + current_epoch, + offset, + &consensus_val_handle, + &below_capacity_val_handle, + )?; + } else if tokens_post >= params.validator_stake_threshold { + tracing::debug!("Validator remains in below-capacity set"); + // The current validator should remain in the below-capacity set + insert_validator_into_set( + &below_capacity_val_handle.at(&tokens_post.into()), + storage, + &epoch, + validator, + )?; + validator_state_handle(validator).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + offset, + )?; + } else { + // The current validator is demoted to the below-threshold set + tracing::debug!( + "Demoting this validator to the below-threshold set" + ); + + validator_state_handle(validator).set( + storage, + ValidatorState::BelowThreshold, + current_epoch, + offset, + )?; + + // Remove the validator's position from storage + validator_set_positions_handle() + .at(&epoch) + .remove(storage, validator)?; + } + } + } else { + // At non-zero offset (0 is genesis only) + if offset > 0 { + // If there is no position at pipeline offset, then the validator + // must be in the below-threshold set + debug_assert!(tokens_pre < params.validator_stake_threshold); + } + tracing::debug!("Target validator is below-threshold"); + + // Move the validator into the appropriate set + let num_consensus_validators = + get_num_consensus_validators(storage, epoch)?; + if num_consensus_validators < params.max_validator_slots { + // Just insert into the consensus set + tracing::debug!("Inserting validator into the consensus set"); + + insert_validator_into_set( + &consensus_val_handle.at(&tokens_post), + storage, + &epoch, + validator, + )?; + validator_state_handle(validator).set( + storage, + ValidatorState::Consensus, + current_epoch, + offset, + )?; + } else { + let min_consensus_validator_amount = + get_min_consensus_validator_amount( + &consensus_val_handle, + storage, + )?; + if tokens_post > min_consensus_validator_amount { + // Insert this validator into consensus and demote one into the + // below-capacity + tracing::debug!( + "Inserting validator into the consensus set and demoting \ + a consensus validator to the below-capacity set" + ); + + insert_into_consensus_and_demote_to_below_cap( + storage, + validator, + tokens_post, + min_consensus_validator_amount, + current_epoch, + offset, + &consensus_val_handle, + &below_capacity_val_handle, + )?; + } else { + // Insert this validator into below-capacity + tracing::debug!( + "Inserting validator into the below-capacity set" + ); + + insert_validator_into_set( + &below_capacity_val_handle.at(&tokens_post.into()), + storage, + &epoch, + validator, + )?; + validator_state_handle(validator).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + offset, + )?; + } + } + } + + Ok(()) +} + +/// Insert the new validator into the right validator set (depending on its +/// stake) +pub fn insert_validator_into_validator_set( + storage: &mut S, + params: &PosParams, + address: &Address, + stake: token::Amount, + current_epoch: Epoch, + offset: u64, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let target_epoch = current_epoch + offset; + let consensus_set = consensus_validator_set_handle().at(&target_epoch); + let below_cap_set = below_capacity_validator_set_handle().at(&target_epoch); + + let num_consensus_validators = + get_num_consensus_validators(storage, target_epoch)?; + + if stake < params.validator_stake_threshold { + validator_state_handle(address).set( + storage, + ValidatorState::BelowThreshold, + current_epoch, + offset, + )?; + } else if num_consensus_validators < params.max_validator_slots { + insert_validator_into_set( + &consensus_set.at(&stake), + storage, + &target_epoch, + address, + )?; + validator_state_handle(address).set( + storage, + ValidatorState::Consensus, + current_epoch, + offset, + )?; + } else { + // Check to see if the current genesis validator should replace one + // already in the consensus set + let min_consensus_amount = + get_min_consensus_validator_amount(&consensus_set, storage)?; + if stake > min_consensus_amount { + // Swap this genesis validator in and demote the last min consensus + // validator + let min_consensus_handle = consensus_set.at(&min_consensus_amount); + // Remove last min consensus validator + let last_min_consensus_position = + find_last_position(&min_consensus_handle, storage)?.expect( + "There must be always be at least 1 consensus validator", + ); + let removed = min_consensus_handle + .remove(storage, &last_min_consensus_position)? + .expect( + "There must be always be at least 1 consensus validator", + ); + // Insert last min consensus validator into the below-capacity set + insert_validator_into_set( + &below_cap_set.at(&min_consensus_amount.into()), + storage, + &target_epoch, + &removed, + )?; + validator_state_handle(&removed).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + offset, + )?; + // Insert the current genesis validator into the consensus set + insert_validator_into_set( + &consensus_set.at(&stake), + storage, + &target_epoch, + address, + )?; + // Update and set the validator states + validator_state_handle(address).set( + storage, + ValidatorState::Consensus, + current_epoch, + offset, + )?; + } else { + // Insert the current genesis validator into the below-capacity set + insert_validator_into_set( + &below_cap_set.at(&stake.into()), + storage, + &target_epoch, + address, + )?; + validator_state_handle(address).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + offset, + )?; + } + } + Ok(()) +} + +/// Remove a validator from the consensus validator set +pub fn remove_consensus_validator( + storage: &mut S, + params: &PosParams, + epoch: Epoch, + validator: &Address, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let stake = read_validator_stake(storage, params, validator, epoch)?; + let consensus_set = consensus_validator_set_handle().at(&epoch).at(&stake); + let val_position = validator_set_positions_handle() + .at(&epoch) + .get(storage, validator)? + .expect("Could not find validator's position in storage."); + + // Removal + let removed = consensus_set.remove(storage, &val_position)?; + debug_assert_eq!(removed, Some(validator.clone())); + + validator_set_positions_handle() + .at(&epoch) + .remove(storage, validator)?; + + Ok(()) +} + +/// Remove a validator from the below-capacity set +pub fn remove_below_capacity_validator( + storage: &mut S, + params: &PosParams, + epoch: Epoch, + validator: &Address, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let stake = read_validator_stake(storage, params, validator, epoch)?; + let below_cap_set = below_capacity_validator_set_handle() + .at(&epoch) + .at(&stake.into()); + let val_position = validator_set_positions_handle() + .at(&epoch) + .get(storage, validator)? + .expect("Could not find validator's position in storage."); + + // Removal + let removed = below_cap_set.remove(storage, &val_position)?; + debug_assert_eq!(removed, Some(validator.clone())); + + validator_set_positions_handle() + .at(&epoch) + .remove(storage, validator)?; + + Ok(()) +} + +/// Promote the next below-capacity validator to the consensus validator set, +/// determined as the validator in the below-capacity set with the largest stake +/// and the lowest `Position`. Assumes that there is adequate space within the +/// consensus set already. +pub fn promote_next_below_capacity_validator_to_consensus( + storage: &mut S, + epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let below_cap_set = below_capacity_validator_set_handle().at(&epoch); + let max_below_capacity_amount = + get_max_below_capacity_validator_amount(&below_cap_set, storage)?; + + if let Some(max_below_capacity_amount) = max_below_capacity_amount { + let max_bc_vals = below_cap_set.at(&max_below_capacity_amount.into()); + let position_to_promote = find_first_position(&max_bc_vals, storage)? + .expect("Should be at least one below-capacity validator"); + + let promoted_validator = max_bc_vals + .remove(storage, &position_to_promote)? + .expect("Should have returned a removed validator."); + + insert_validator_into_set( + &consensus_validator_set_handle() + .at(&epoch) + .at(&max_below_capacity_amount), + storage, + &epoch, + &promoted_validator, + )?; + validator_state_handle(&promoted_validator).set( + storage, + ValidatorState::Consensus, + epoch, + 0, + )?; + } + + Ok(()) +} + +/// Communicate imminent validator set updates to Tendermint. This function is +/// called two blocks before the start of a new epoch because Tendermint +/// validator updates become active two blocks after the updates are submitted. +pub fn validator_set_update_tendermint( + storage: &S, + params: &PosParams, + current_epoch: Epoch, + f: impl FnMut(ValidatorSetUpdate) -> T, +) -> storage_api::Result> +where + S: StorageRead, +{ + tracing::debug!("Communicating validator set updates to Tendermint."); + // Because this is called 2 blocks before a start on an epoch, we're gonna + // give Tendermint updates for the next epoch + let next_epoch = current_epoch.next(); + + let new_consensus_validator_handle = + consensus_validator_set_handle().at(&next_epoch); + let prev_consensus_validator_handle = + consensus_validator_set_handle().at(¤t_epoch); + + let new_consensus_validators = new_consensus_validator_handle + .iter(storage)? + .map(|validator| { + let ( + NestedSubKey::Data { + key: new_stake, + nested_sub_key: _, + }, + address, + ) = validator.unwrap(); + + tracing::debug!( + "Consensus validator address {address}, stake {}", + new_stake.to_string_native() + ); + + let new_consensus_key = validator_consensus_key_handle(&address) + .get(storage, next_epoch, params) + .unwrap() + .unwrap(); + + let old_consensus_key = validator_consensus_key_handle(&address) + .get(storage, current_epoch, params) + .unwrap(); + + // Check if the validator was consensus in the previous epoch with + // the same stake. If so, no updated is needed. + // Look up previous state and prev and current voting powers + if !prev_consensus_validator_handle.is_empty(storage).unwrap() { + let prev_state = validator_state_handle(&address) + .get(storage, current_epoch, params) + .unwrap(); + let prev_tm_voting_power = Lazy::new(|| { + let prev_validator_stake = read_validator_stake( + storage, + params, + &address, + current_epoch, + ) + .unwrap(); + into_tm_voting_power( + params.tm_votes_per_token, + prev_validator_stake, + ) + }); + let new_tm_voting_power = Lazy::new(|| { + into_tm_voting_power(params.tm_votes_per_token, new_stake) + }); + + // If it was in `Consensus` before and voting power has not + // changed, skip the update + if matches!(prev_state, Some(ValidatorState::Consensus)) + && *prev_tm_voting_power == *new_tm_voting_power + { + if old_consensus_key.as_ref().unwrap() == &new_consensus_key + { + tracing::debug!( + "skipping validator update, {address} is in \ + consensus set but voting power hasn't changed" + ); + return vec![]; + } else { + return vec![ + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: new_consensus_key, + bonded_stake: new_stake, + }), + ValidatorSetUpdate::Deactivated( + old_consensus_key.unwrap(), + ), + ]; + } + } + // If both previous and current voting powers are 0, and the + // validator_stake_threshold is 0, skip update + if params.validator_stake_threshold.is_zero() + && *prev_tm_voting_power == 0 + && *new_tm_voting_power == 0 + { + tracing::info!( + "skipping validator update, {address} is in consensus \ + set but without voting power" + ); + return vec![]; + } + } + + tracing::debug!( + "{address} consensus key {}", + new_consensus_key.tm_raw_hash() + ); + + if old_consensus_key.as_ref() == Some(&new_consensus_key) + || old_consensus_key.is_none() + { + vec![ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: new_consensus_key, + bonded_stake: new_stake, + })] + } else { + vec![ + ValidatorSetUpdate::Consensus(ConsensusValidator { + consensus_key: new_consensus_key, + bonded_stake: new_stake, + }), + ValidatorSetUpdate::Deactivated(old_consensus_key.unwrap()), + ] + } + }); + + let prev_consensus_validators = prev_consensus_validator_handle + .iter(storage)? + .map(|validator| { + let ( + NestedSubKey::Data { + key: _prev_stake, + nested_sub_key: _, + }, + address, + ) = validator.unwrap(); + + let new_state = validator_state_handle(&address) + .get(storage, next_epoch, params) + .unwrap(); + + let prev_tm_voting_power = Lazy::new(|| { + let prev_validator_stake = read_validator_stake( + storage, + params, + &address, + current_epoch, + ) + .unwrap(); + into_tm_voting_power( + params.tm_votes_per_token, + prev_validator_stake, + ) + }); + + let old_consensus_key = validator_consensus_key_handle(&address) + .get(storage, current_epoch, params) + .unwrap() + .unwrap(); + + // If the validator is still in the Consensus set, we accounted for + // it in the `new_consensus_validators` iterator above + if matches!(new_state, Some(ValidatorState::Consensus)) { + return vec![]; + } else if params.validator_stake_threshold.is_zero() + && *prev_tm_voting_power == 0 + { + // If the new state is not Consensus but its prev voting power + // was 0 and the stake threshold is 0, we can also skip the + // update + tracing::info!( + "skipping validator update, {address} is in consensus set \ + but without voting power" + ); + return vec![]; + } + + // The remaining validators were previously Consensus but no longer + // are, so they must be deactivated + let consensus_key = validator_consensus_key_handle(&address) + .get(storage, next_epoch, params) + .unwrap() + .unwrap(); + tracing::debug!( + "{address} consensus key {}", + consensus_key.tm_raw_hash() + ); + vec![ValidatorSetUpdate::Deactivated(old_consensus_key)] + }); + + Ok(new_consensus_validators + .chain(prev_consensus_validators) + .flatten() + .map(f) + .collect()) +} + +/// Copy the consensus and below-capacity validator sets and positions into a +/// future epoch. Also copies the epoched set of all known validators in the +/// network. +pub fn copy_validator_sets_and_positions( + storage: &mut S, + params: &PosParams, + current_epoch: Epoch, + target_epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let prev_epoch = target_epoch.prev(); + + let consensus_validator_set = consensus_validator_set_handle(); + let below_capacity_validator_set = below_capacity_validator_set_handle(); + + let (consensus, below_capacity) = ( + consensus_validator_set.at(&prev_epoch), + below_capacity_validator_set.at(&prev_epoch), + ); + debug_assert!(!consensus.is_empty(storage)?); + + // Need to copy into memory here to avoid borrowing a ref + // simultaneously as immutable and mutable + let mut consensus_in_mem: HashMap<(token::Amount, Position), Address> = + HashMap::new(); + let mut below_cap_in_mem: HashMap< + (ReverseOrdTokenAmount, Position), + Address, + > = HashMap::new(); + + for val in consensus.iter(storage)? { + let ( + NestedSubKey::Data { + key: stake, + nested_sub_key: SubKey::Data(position), + }, + address, + ) = val?; + consensus_in_mem.insert((stake, position), address); + } + for val in below_capacity.iter(storage)? { + let ( + NestedSubKey::Data { + key: stake, + nested_sub_key: SubKey::Data(position), + }, + address, + ) = val?; + below_cap_in_mem.insert((stake, position), address); + } + + for ((val_stake, val_position), val_address) in consensus_in_mem.into_iter() + { + consensus_validator_set + .at(&target_epoch) + .at(&val_stake) + .insert(storage, val_position, val_address)?; + } + + for ((val_stake, val_position), val_address) in below_cap_in_mem.into_iter() + { + below_capacity_validator_set + .at(&target_epoch) + .at(&val_stake) + .insert(storage, val_position, val_address)?; + } + // Purge consensus and below-capacity validator sets + consensus_validator_set.update_data(storage, params, current_epoch)?; + below_capacity_validator_set.update_data(storage, params, current_epoch)?; + + // Copy validator positions + let mut positions = HashMap::::default(); + let validator_set_positions_handle = validator_set_positions_handle(); + let positions_handle = validator_set_positions_handle.at(&prev_epoch); + + for result in positions_handle.iter(storage)? { + let (validator, position) = result?; + positions.insert(validator, position); + } + + let new_positions_handle = validator_set_positions_handle.at(&target_epoch); + for (validator, position) in positions { + let prev = new_positions_handle.insert(storage, validator, position)?; + debug_assert!(prev.is_none()); + } + validator_set_positions_handle.set_last_update(storage, current_epoch)?; + + // Purge old epochs of validator positions + validator_set_positions_handle.update_data( + storage, + params, + current_epoch, + )?; + + // Copy set of all validator addresses + let mut all_validators = HashSet::
::default(); + let validator_addresses_handle = validator_addresses_handle(); + let all_validators_handle = validator_addresses_handle.at(&prev_epoch); + for result in all_validators_handle.iter(storage)? { + let validator = result?; + all_validators.insert(validator); + } + let new_all_validators_handle = + validator_addresses_handle.at(&target_epoch); + for validator in all_validators { + let was_in = new_all_validators_handle.insert(storage, validator)?; + debug_assert!(!was_in); + } + + // Purge old epochs of all validator addresses + validator_addresses_handle.update_data(storage, params, current_epoch)?; + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +fn insert_into_consensus_and_demote_to_below_cap( + storage: &mut S, + validator: &Address, + tokens_post: token::Amount, + min_consensus_amount: token::Amount, + current_epoch: Epoch, + offset: u64, + consensus_set: &ConsensusValidatorSet, + below_capacity_set: &BelowCapacityValidatorSet, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // First, remove the last position min consensus validator + let consensus_vals_min = consensus_set.at(&min_consensus_amount); + let last_position_of_min_consensus_vals = + find_last_position(&consensus_vals_min, storage)? + .expect("There must be always be at least 1 consensus validator"); + let removed_min_consensus = consensus_vals_min + .remove(storage, &last_position_of_min_consensus_vals)? + .expect("There must be always be at least 1 consensus validator"); + + let offset_epoch = current_epoch + offset; + + // Insert the min consensus validator into the below-capacity + // set + insert_validator_into_set( + &below_capacity_set.at(&min_consensus_amount.into()), + storage, + &offset_epoch, + &removed_min_consensus, + )?; + validator_state_handle(&removed_min_consensus).set( + storage, + ValidatorState::BelowCapacity, + current_epoch, + offset, + )?; + + // Insert the current validator into the consensus set + insert_validator_into_set( + &consensus_set.at(&tokens_post), + storage, + &offset_epoch, + validator, + )?; + validator_state_handle(validator).set( + storage, + ValidatorState::Consensus, + current_epoch, + offset, + )?; + Ok(()) +} + +/// Find the first (lowest) position in a validator set if it is not empty +fn find_first_position( + handle: &ValidatorPositionAddresses, + storage: &S, +) -> storage_api::Result> +where + S: StorageRead, +{ + let lowest_position = handle + .iter(storage)? + .next() + .transpose()? + .map(|(position, _addr)| position); + Ok(lowest_position) +} + +/// Find the last (greatest) position in a validator set if it is not empty +fn find_last_position( + handle: &ValidatorPositionAddresses, + storage: &S, +) -> storage_api::Result> +where + S: StorageRead, +{ + let position = handle + .iter(storage)? + .last() + .transpose()? + .map(|(position, _addr)| position); + Ok(position) +} + +/// Find next position in a validator set or 0 if empty +fn find_next_position( + handle: &ValidatorPositionAddresses, + storage: &S, +) -> storage_api::Result +where + S: StorageRead, +{ + let position_iter = handle.iter(storage)?; + let next = position_iter + .last() + .transpose()? + .map(|(position, _address)| position.next()) + .unwrap_or_default(); + Ok(next) +} + +fn get_min_consensus_validator_amount( + handle: &ConsensusValidatorSet, + storage: &S, +) -> storage_api::Result +where + S: StorageRead, +{ + Ok(handle + .iter(storage)? + .next() + .transpose()? + .map(|(subkey, _address)| match subkey { + NestedSubKey::Data { + key, + nested_sub_key: _, + } => key, + }) + .unwrap_or_default()) +} + +/// Returns `Ok(None)` when the below capacity set is empty. +fn get_max_below_capacity_validator_amount( + handle: &BelowCapacityValidatorSet, + storage: &S, +) -> storage_api::Result> +where + S: StorageRead, +{ + Ok(handle + .iter(storage)? + .next() + .transpose()? + .map(|(subkey, _address)| match subkey { + NestedSubKey::Data { + key, + nested_sub_key: _, + } => token::Amount::from(key), + })) +} + +/// Inserts a validator into the provided `handle` within some validator set at +/// the next position. Also updates the validator set position for the +/// validator. +fn insert_validator_into_set( + handle: &ValidatorPositionAddresses, + storage: &mut S, + epoch: &Epoch, + address: &Address, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let next_position = find_next_position(handle, storage)?; + tracing::debug!( + "Inserting validator {} into position {:?} at epoch {}", + address.clone(), + next_position.clone(), + epoch.clone() + ); + handle.insert(storage, next_position, address.clone())?; + validator_set_positions_handle().at(epoch).insert( + storage, + address.clone(), + next_position, + )?; + Ok(()) +} + +/// Read the position of the validator in the subset of validators that have the +/// same bonded stake. This information is held in its own epoched structure in +/// addition to being inside the validator sets. +fn read_validator_set_position( + storage: &S, + validator: &Address, + epoch: Epoch, + _params: &PosParams, +) -> storage_api::Result> +where + S: StorageRead, +{ + let handle = validator_set_positions_handle(); + handle.get_data_handler().at(&epoch).get(storage, validator) +} diff --git a/scripts/generator.sh b/scripts/generator.sh deleted file mode 100755 index 1618d5fc9c..0000000000 --- a/scripts/generator.sh +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/env bash - -# A script to generate some transaction test vectors. It must be executed at the -# root of the Namada repository. All transaction types except vote-proposal are -# tested. This is because vote-proposal needs to query RPC for delegation. This -# script assumes that the WASM scripts have already been built using -# `make build-wasm-scripts`. Run `./scripts/online_generator server` to start a -# server and then run `./scripts/online_generator client` to generate the test -# vectors. - -NAMADA_DIR="$(pwd)" -NAMADA_BASE_DIR_FILE="$(pwd)/namada_base_dir" -export NAMADA_LEDGER_LOG_PATH="$(pwd)/vectors.json" -export NAMADA_TX_LOG_PATH="$(pwd)/debugs.txt" -export NAMADA_DEV=false - -if [ "$#" -ne 1 ]; then - echo "Illegal number of parameters" -elif [ "$1" = "server" ]; then - cp genesis/e2e-tests-single-node.toml genesis/test-vectors-single-node.toml - - sed -i 's/^epochs_per_year = 31_536_000$/epochs_per_year = 262_800/' genesis/test-vectors-single-node.toml - - NAMADA_GENESIS_FILE=$(cargo run --bin namadac --package namada_apps --manifest-path Cargo.toml -- utils init-network --genesis-path genesis/test-vectors-single-node.toml --wasm-checksums-path wasm/checksums.json --chain-prefix e2e-test --unsafe-dont-encrypt --localhost --dont-archive --allow-duplicate-ip | grep 'Genesis file generated at ' | sed 's/^Genesis file generated at //') - - rm genesis/test-vectors-single-node.toml - - NAMADA_BASE_DIR=${NAMADA_GENESIS_FILE%.toml} - echo $NAMADA_BASE_DIR > $NAMADA_BASE_DIR_FILE - - sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/config.toml - - cp wasm/*.wasm $NAMADA_BASE_DIR/wasm/ - - cp wasm/*.wasm $NAMADA_BASE_DIR/setup/validator-0/.namada/$(basename $NAMADA_BASE_DIR)/wasm/ - - cp $NAMADA_BASE_DIR/setup/other/wallet.toml $NAMADA_BASE_DIR/wallet.toml - - sed -i 's/^mode = "RemoteEndpoint"$/mode = "Off"/' $NAMADA_BASE_DIR/setup/validator-0/.namada/$(basename $NAMADA_BASE_DIR)/config.toml - - cargo run --bin namadan --package namada_apps --manifest-path Cargo.toml -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada/ ledger -elif [ "$1" = "client" ]; then - if test -f "$NAMADA_BASE_DIR_FILE"; then - NAMADA_BASE_DIR="$(cat $NAMADA_BASE_DIR_FILE)" - fi - - echo > $NAMADA_TX_LOG_PATH - - echo $'[' > $NAMADA_LEDGER_LOG_PATH - - ALBERT_ADDRESS=$(cargo run --bin namadaw -- address find --alias albert | sed 's/^Found address Established: //') - - echo '{ - "proposal": { - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "voting_end_epoch":24, - "voting_start_epoch":12 - } - }' > proposal_default.json - - echo '{ - "data":['$(od -An -tu1 -v wasm_for_tests/tx_proposal_code.wasm | tr '\n' ' ' | sed 's/\b\s\+\b/,/g')'], - "proposal": { - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "voting_end_epoch":24, - "voting_start_epoch":12 - } - }' > proposal_default_with_data.json - - echo '{ - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "tally_epoch":1 - }' > proposal_offline.json - - echo '{ - "proposal": { - "author":"'$ALBERT_ADDRESS'", - "content":{ - "abstract":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros. Nullam sed ex justo. Ut at placerat ipsum, sit amet rhoncus libero. Sed blandit non purus non suscipit. Phasellus sed quam nec augue bibendum bibendum ut vitae urna. Sed odio diam, ornare nec sapien eget, congue viverra enim.", - "authors":"test@test.com", - "created":"2022-03-10T08:54:37Z", - "details":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices. Quisque viverra varius cursus. Praesent sed mauris gravida, pharetra turpis non, gravida eros.", - "discussions-to":"www.github.com/anoma/aip/1", - "license":"MIT", - "motivation":"Ut convallis eleifend orci vel venenatis. Duis vulputate metus in lacus sollicitudin vestibulum. Suspendisse vel velit ac est consectetur feugiat nec ac urna. Ut faucibus ex nec dictum fermentum. Morbi aliquet purus at sollicitudin ultrices.", - "requires":"2", - "title":"TheTitle" - }, - "grace_epoch":30, - "voting_end_epoch":24, - "voting_start_epoch":12 - }, - "data": {"add":"'$ALBERT_ADDRESS'","remove":[]} - }' > proposal_pgf_steward_add.json - - # proposal_default - - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- unjail-validator --validator Bertha --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- deactivate-validator --validator Bertha --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- reactivate-validator --validator Bertha --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.02 --gas-token NAM --force --node 127.0.0.1:27657 - - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default.json --node 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - - cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_default_with_data.json --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote nay --address Bertha --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address Albert --node 127.0.0.1:27657 - - # proposal_offline - - cargo run --bin namadac --features std -- bond --validator validator-0 --source Albert --amount 900 --gas-token NAM --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Albert --commission-rate 0.05 --gas-token NAM --force --node 127.0.0.1:27657 - - PROPOSAL_OFFLINE_SIGNED=$(cargo run --bin namadac --features std -- init-proposal --force --data-path proposal_offline.json --signing-keys albert-key --offline --node 127.0.0.1:27657 | grep -o -P '(?<=Proposal serialized to:\s).*') - - cargo run --bin namadac --features std -- vote-proposal --data-path $PROPOSAL_OFFLINE_SIGNED --vote yay --address Albert --offline --node 127.0.0.1:27657 - - # pgf_governance_proposal - - cargo run --bin namadac --features std -- bond --validator validator-0 --source Bertha --amount 900 --gas-token NAM --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.09 --gas-token NAM --force --node 127.0.0.1:27657 - - PROPOSAL_ID_0=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - - PROPOSAL_ID_1=$(cargo run --bin namadac --features std -- init-proposal --pgf-stewards --force --data-path proposal_pgf_steward_add.json --ledger-address 127.0.0.1:27657 | grep -o -P '(?<=/proposal/).*(?=/author)') - - cargo run --bin namadac --features std -- --base-dir $NAMADA_BASE_DIR/setup/validator-0/.namada vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address validator-0 --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_0 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- vote-proposal --force --proposal-id $PROPOSAL_ID_1 --vote yay --address Bertha --signing-keys bertha-key --ledger-address 127.0.0.1:27657 - - # non-proposal tests - - cargo run --bin namadac --features std -- transfer --source bertha --target christel --token btc --amount 23 --force --signing-keys bertha-key --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- bond --validator bertha --amount 25 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- change-commission-rate --validator Bertha --commission-rate 0.11 --gas-token NAM --force --node 127.0.0.1:27657 - - cargo run --bin namadac --features std -- reveal-pk --public-key albert-key --gas-payer albert-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- update-account --code-path vp_user.wasm --address bertha --public-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-validator --email me@me.com --alias bertha-validator --account-keys bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys bertha-key --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-validator --email me@me.com --alias validator-mult --account-keys albert-key,bertha-key --commission-rate 0.05 --max-commission-rate-change 0.01 --signing-keys albert-key,bertha-key --threshold 2 --unsafe-dont-encrypt --force --ledger-address 127.0.0.1:27657 - - # TODO works but panics - cargo run --bin namadac --features std -- unbond --validator christel --amount 5 --signing-keys christel-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- withdraw --validator albert --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-account --alias albert-account --public-keys albert-key --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- init-account --alias account-mul --public-keys albert-key,bertha-key,christel-key --signing-keys albert-key,bertha-key,christel-key --threshold 2 --force --ledger-address 127.0.0.1:27657 - - # TODO panics, no vector produced - # cargo run --bin namadac --features std -- tx --code-path $NAMADA_DIR/wasm_for_tests/tx_no_op.wasm --data-path README.md --signing-keys albert-key --owner albert --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- ibc-transfer --source bertha --receiver christel --token btc --amount 24 --channel-id channel-141 --signing-keys bertha-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadac --features std -- ibc-transfer --source albert --receiver bertha --token nam --amount 100000 --channel-id channel-0 --port-id transfer --signing-keys albert-key --timeout-sec-offset 5 --force --ledger-address 127.0.0.1:27657 - - cargo run --bin namadaw -- masp add --alias a_spending_key --value zsknam1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu69au6gn3su5ewneas486hdccyayx32hxvt64p3d0hfuprpgcgv2q9gdx3jvxrn02f0nnp3jtdd6f5vwscfuyum083cvfv4jun75ak5sdgrm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxc9q0cqr --unsafe-dont-encrypt - - cargo run --bin namadaw -- masp add --alias b_spending_key --value zsknam1qqqqqqqqqqqqqqpagte43rsza46v55dlz8cffahv0fnr6eqacvnrkyuf9lmndgal7c2k4r7f7zu2yr5rjwr374unjjeuzrh6mquzy6grfdcnnu5clzaq2llqhr70a8yyx0p62aajqvrqjxrht3myuyypsvm725uyt5vm0fqzrzuuedtf6fala4r4nnazm9y9hq5yu6pq24arjskmpv4mdgfn3spffxxv8ugvym36kmnj45jcvvmm227vqjm5fq8882yhjsq97p7xrwqf599qq --unsafe-dont-encrypt - - cargo run --bin namadaw -- masp add --alias ab_payment_address --value znam1qp562jexfndtcw63equndlwgwawutf6l4p4xgkcvp9sjqf9x7kdlvc48mrh3stfvwk9s9fgsmhuz6 - - cargo run --bin namadaw -- masp add --alias aa_payment_address --value znam1qr57pyghrt5ek7v42nxsqdqggltwqrgj2hjlvm5sj0nr8hezzryxcu44qzcea7qdx6wh02cvt9jlu - - cargo run --bin namadaw -- masp add --alias bb_payment_address --value znam1qpsr9ass6lfmwlkamk3fpwapht94qqe8dq3slykkfd6wjnd4s9snlqszvxsksk3tegqv2yg9rcrzd - - # TODO vector produced only when epoch boundaries not straddled - cargo run --bin namadac --features std -- transfer --source albert --target aa_payment_address --token btc --amount 20 --force --ledger-address 127.0.0.1:27657 - - # TODO vector produced only when epoch boundaries not straddled - cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target ab_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - - # TODO fragile - until cargo run --bin namadac -- epoch --ledger-address 127.0.0.1:27657 | grep -m1 "Last committed epoch: 2" ; do sleep 10 ; done; - - # TODO vector produced only when epoch boundaries not straddled - cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 7 --force --ledger-address 127.0.0.1:27657 - - # TODO vector produced only when epoch boundaries not straddled - cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source a_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - - # TODO vector produced only when epoch boundaries not straddled - cargo run --bin namadac --features std -- transfer --gas-payer albert-key --source b_spending_key --target bb_payment_address --token btc --amount 6 --force --ledger-address 127.0.0.1:27657 - - rm -f proposal_default.json - - rm -f proposal_default_with_data.json - - rm -f proposal_offline.json - - rm -f proposal_pgf_steward_add.json - - perl -0777 -i.original -pe 's/,\s*$//igs' $NAMADA_LEDGER_LOG_PATH - - echo $'\n]' >> $NAMADA_LEDGER_LOG_PATH -fi diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 3093de003a..f4d23d57b9 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -49,6 +49,7 @@ testing = [ "namada_ethereum_bridge/testing", "namada_proof_of_stake/testing", "async-client", + "proptest", ] # Download MASP params if they're not present @@ -78,6 +79,7 @@ orion.workspace = true owo-colors = "3.5.0" parse_duration = "2.1.1" paste.workspace = true +proptest = {workspace = true, optional = true} prost.workspace = true rand.workspace = true rand_core.workspace = true @@ -108,4 +110,5 @@ namada_core = {path = "../core", default-features = false, features = ["rand", " namada_ethereum_bridge = {path = "../ethereum_bridge", default-features = false, features = ["testing"]} namada_proof_of_stake = {path = "../proof_of_stake", default-features = false, features = ["testing"]} namada_test_utils = {path = "../test_utils"} +proptest.workspace = true tempfile.workspace = true diff --git a/sdk/src/args.rs b/sdk/src/args.rs index 86a489cf7f..c12ecdf537 100644 --- a/sdk/src/args.rs +++ b/sdk/src/args.rs @@ -13,7 +13,7 @@ use namada_core::types::dec::Dec; use namada_core::types::ethereum_events::EthAddress; use namada_core::types::keccak::KeccakHash; use namada_core::types::key::{common, SchemeType}; -use namada_core::types::masp::MaspValue; +use namada_core::types::masp::PaymentAddress; use namada_core::types::storage::Epoch; use namada_core::types::time::DateTimeUtc; use namada_core::types::transaction::GasLimit; @@ -178,8 +178,7 @@ impl TxCustom { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_custom(context, self).await } } @@ -296,7 +295,7 @@ pub struct TxIbcTransfer { /// Common tx arguments pub tx: Tx, /// Transfer source address - pub source: C::Address, + pub source: C::TransferSource, /// Transfer target address pub receiver: String, /// Transferred token address @@ -331,7 +330,7 @@ impl TxBuilder for TxIbcTransfer { impl TxIbcTransfer { /// Transfer source address - pub fn source(self, source: C::Address) -> Self { + pub fn source(self, source: C::TransferSource) -> Self { Self { source, ..self } } @@ -487,8 +486,7 @@ impl InitProposal { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { let current_epoch = rpc::query_epoch(context.client()).await?; let governance_parameters = rpc::query_governance_parameters(context.client()).await; @@ -644,8 +642,7 @@ impl VoteProposal { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { let current_epoch = rpc::query_epoch(context.client()).await?; tx::build_vote_proposal(context, self, current_epoch).await } @@ -717,8 +714,7 @@ impl TxInitAccount { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_init_account(context, self).await } } @@ -872,8 +868,7 @@ impl TxUpdateAccount { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_update_account(context, self).await } } @@ -950,8 +945,7 @@ impl Bond { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_bond(context, self).await } } @@ -980,7 +974,6 @@ impl Unbond { ) -> crate::error::Result<( crate::proto::Tx, SigningTxData, - Option, Option<(Epoch, token::Amount)>, )> { tx::build_unbond(context, self).await @@ -1131,8 +1124,7 @@ impl RevealPk { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_reveal_pk(context, &self.tx, &self.public_key).await } } @@ -1215,8 +1207,7 @@ impl Withdraw { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_withdraw(context, self).await } } @@ -1252,8 +1243,7 @@ impl ClaimRewards { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_claim_rewards(context, self).await } } @@ -1385,8 +1375,7 @@ impl CommissionRateChange { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_validator_commission_change(context, self).await } } @@ -1400,6 +1389,8 @@ pub struct ConsensusKeyChange { pub validator: C::Address, /// New consensus key pub consensus_key: Option, + /// Don't encrypt the keypair + pub unsafe_dont_encrypt: bool, /// Path to the TX WASM code file pub tx_code_path: PathBuf, } @@ -1503,8 +1494,7 @@ impl MetaDataChange { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_validator_metadata_change(context, self).await } } @@ -1559,8 +1549,7 @@ impl UpdateStewardCommission { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_update_steward_commission(context, self).await } } @@ -1608,8 +1597,7 @@ impl ResignSteward { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_resign_steward(context, self).await } } @@ -1657,8 +1645,7 @@ impl TxUnjailValidator { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_unjail_validator(context, self).await } } @@ -1706,8 +1693,7 @@ impl TxDeactivateValidator { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_deactivate_validator(context, self).await } } @@ -1755,8 +1741,7 @@ impl TxReactivateValidator { pub async fn build( &self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { tx::build_reactivate_validator(context, self).await } } @@ -2040,66 +2025,35 @@ impl TxBuilder for Tx { } } -/// MASP add key or address arguments -#[derive(Clone, Debug)] -pub struct MaspAddrKeyAdd { - /// Key alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Any MASP value - pub value: MaspValue, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, -} - -/// MASP generate spending key arguments -#[derive(Clone, Debug)] -pub struct MaspSpendKeyGen { - /// Key alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Don't encrypt the keypair - pub unsafe_dont_encrypt: bool, -} - -/// MASP generate payment address arguments -#[derive(Clone, Debug)] -pub struct MaspPayAddrGen { - /// Key alias - pub alias: String, - /// Whether to force overwrite the alias - pub alias_force: bool, - /// Viewing key - pub viewing_key: C::ViewingKey, - /// Pin - pub pin: bool, -} - /// Wallet generate key and implicit address arguments #[derive(Clone, Debug)] -pub struct KeyAndAddressGen { +pub struct KeyGen { /// Scheme type pub scheme: SchemeType, + /// Whether to generate a spending key for the shielded pool + pub shielded: bool, + /// Whether to generate a raw non-hd key + pub raw: bool, /// Key alias - pub alias: Option, - /// Whether to force overwrite the alias, if provided + pub alias: String, + /// Whether to force overwrite the alias pub alias_force: bool, /// Don't encrypt the keypair pub unsafe_dont_encrypt: bool, - /// BIP44 derivation path + /// BIP44 / ZIP32 derivation path pub derivation_path: String, } /// Wallet restore key and implicit address arguments #[derive(Clone, Debug)] -pub struct KeyAndAddressDerive { +pub struct KeyDerive { /// Scheme type pub scheme: SchemeType, + /// Whether to generate a MASP spending key + pub shielded: bool, /// Key alias - pub alias: Option, - /// Whether to force overwrite the alias, if provided + pub alias: String, + /// Whether to force overwrite the alias pub alias_force: bool, /// Don't encrypt the keypair pub unsafe_dont_encrypt: bool, @@ -2109,46 +2063,45 @@ pub struct KeyAndAddressDerive { pub use_device: bool, } -/// Wallet key lookup arguments -#[derive(Clone, Debug)] -pub struct KeyFind { - /// Public key to lookup keypair with - pub public_key: Option, - /// Key alias to lookup keypair with - pub alias: Option, - /// Public key hash to lookup keypair with - pub value: Option, - /// Show secret keys to user - pub unsafe_show_secret: bool, -} - -/// Wallet find shielded address or key arguments -#[derive(Clone, Debug)] -pub struct AddrKeyFind { - /// Address/key alias - pub alias: String, - /// Show secret keys to user - pub unsafe_show_secret: bool, -} - -/// Wallet list shielded keys arguments -#[derive(Clone, Debug)] -pub struct MaspKeysList { - /// Don't decrypt spending keys +/// Wallet list arguments +#[derive(Clone, Copy, Debug)] +pub struct KeyAddressList { + /// Whether to list transparent secret keys only + pub transparent_only: bool, + /// Whether to list MASP spending keys only + pub shielded_only: bool, + /// List keys only + pub keys_only: bool, + /// List addresses only + pub addresses_only: bool, + /// Whether to decrypt secret / spending keys pub decrypt: bool, /// Show secret keys to user pub unsafe_show_secret: bool, } -/// Wallet list keys arguments +/// Wallet key / address lookup arguments #[derive(Clone, Debug)] -pub struct KeyList { - /// Don't decrypt keypairs +pub struct KeyAddressFind { + /// Alias to find + pub alias: Option, + /// Address to find + pub address: Option
, + /// Public key to lookup keypair with + pub public_key: Option, + /// Public key hash to lookup keypair with + pub public_key_hash: Option, + /// Payment address to find + pub payment_address: Option, + /// Find keys only + pub keys_only: bool, + /// Find addresses only + pub addresses_only: bool, + /// Whether to decrypt secret / spending keys pub decrypt: bool, /// Show secret keys to user pub unsafe_show_secret: bool, } - /// Wallet key export arguments #[derive(Clone, Debug)] pub struct KeyExport { @@ -2156,24 +2109,52 @@ pub struct KeyExport { pub alias: String, } -/// Wallet address lookup arguments +/// Wallet key import arguments #[derive(Clone, Debug)] -pub struct AddressOrAliasFind { - /// Alias to find - pub alias: Option, - /// Address to find - pub address: Option
, +pub struct KeyImport { + /// File name + pub file_path: String, + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Don't encrypt the key + pub unsafe_dont_encrypt: bool, } -/// Wallet address add arguments +/// Wallet key / address add arguments #[derive(Clone, Debug)] -pub struct AddressAdd { +pub struct KeyAddressAdd { /// Address alias pub alias: String, /// Whether to force overwrite the alias pub alias_force: bool, - /// Address to add - pub address: Address, + /// Any supported value + pub value: String, + /// Don't encrypt the key + pub unsafe_dont_encrypt: bool, +} + +/// Wallet key / address remove arguments +#[derive(Clone, Debug)] +pub struct KeyAddressRemove { + /// Address alias + pub alias: String, + /// Confirmation to remove the alias + pub do_it: bool, +} + +/// Generate payment address arguments +#[derive(Clone, Debug)] +pub struct PayAddressGen { + /// Key alias + pub alias: String, + /// Whether to force overwrite the alias + pub alias_force: bool, + /// Viewing key + pub viewing_key: C::ViewingKey, + /// Pin + pub pin: bool, } /// Bridge pool batch recommendation. @@ -2292,8 +2273,7 @@ impl EthereumBridgePool { pub async fn build( self, context: &impl Namada, - ) -> crate::error::Result<(crate::proto::Tx, SigningTxData, Option)> - { + ) -> crate::error::Result<(crate::proto::Tx, SigningTxData)> { bridge_pool::build_bridge_pool_tx(context, self).await } } @@ -2418,8 +2398,8 @@ pub struct GenIbcShieldedTransafer { pub output_folder: Option, /// The target address pub target: C::TransferTarget, - /// The token address - pub token: C::Address, + /// The token address which could be a non-namada address + pub token: String, /// Transferred token amount pub amount: InputAmount, /// Port ID via which the token is received diff --git a/sdk/src/control_flow/mod.rs b/sdk/src/control_flow/mod.rs index 9b75b6e921..42294d5191 100644 --- a/sdk/src/control_flow/mod.rs +++ b/sdk/src/control_flow/mod.rs @@ -66,7 +66,7 @@ pub fn install_shutdown_signal() -> ShutdownSignal { } #[cfg(unix)] -async fn shutdown_send(tx: oneshot::Sender<()>) { +pub async fn shutdown_send(tx: oneshot::Sender<()>) { use tokio::signal::unix::{signal, SignalKind}; let mut sigterm = signal(SignalKind::terminate()).unwrap(); let mut sighup = signal(SignalKind::hangup()).unwrap(); @@ -107,7 +107,7 @@ async fn shutdown_send(tx: oneshot::Sender<()>) { } #[cfg(windows)] -async fn shutdown_send(tx: oneshot::Sender<()>) { +pub async fn shutdown_send(tx: oneshot::Sender<()>) { let mut sigbreak = tokio::signal::windows::ctrl_break().unwrap(); tokio::select! { signal = tokio::signal::ctrl_c() => { diff --git a/sdk/src/eth_bridge/bridge_pool.rs b/sdk/src/eth_bridge/bridge_pool.rs index e9784b81bb..fa6eba1d02 100644 --- a/sdk/src/eth_bridge/bridge_pool.rs +++ b/sdk/src/eth_bridge/bridge_pool.rs @@ -2,7 +2,7 @@ use std::borrow::Cow; use std::cmp::Ordering; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use borsh_ext::BorshSerializeExt; @@ -18,8 +18,7 @@ use namada_core::types::eth_bridge_pool::{ }; use namada_core::types::ethereum_events::EthAddress; use namada_core::types::keccak::KeccakHash; -use namada_core::types::storage::Epoch; -use namada_core::types::token::{balance_key, Amount, DenominatedAmount}; +use namada_core::types::token::{balance_key, Amount}; use namada_core::types::voting_power::FractionalVotingPower; use owo_colors::OwoColorize; use serde::Serialize; @@ -36,7 +35,7 @@ use crate::io::Io; use crate::proto::Tx; use crate::queries::{ Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, - RPC, + TransferToEthereumStatus, RPC, }; use crate::rpc::{query_storage_value, query_wasm_code_hash, validate_amount}; use crate::signing::aux_signing_data; @@ -61,7 +60,7 @@ pub async fn build_bridge_pool_tx( fee_token, code_path, }: args::EthereumBridgePool, -) -> Result<(Tx, SigningTxData, Option), Error> { +) -> Result<(Tx, SigningTxData), Error> { let sender_ = sender.clone(); let (transfer, tx_code_hash, signing_data) = futures::try_join!( validate_bridge_pool_tx( @@ -99,7 +98,7 @@ pub async fn build_bridge_pool_tx( ) .add_data(transfer); - let epoch = prepare_tx( + prepare_tx( context, &tx_args, &mut tx, @@ -108,7 +107,7 @@ pub async fn build_bridge_pool_tx( ) .await?; - Ok((tx, signing_data, epoch)) + Ok((tx, signing_data)) } /// Perform client validation checks on a Bridge pool transfer. @@ -145,12 +144,8 @@ async fn validate_bridge_pool_tx( }); // validate amounts - let ( - tok_denominated @ DenominatedAmount { amount, .. }, - fee_denominated @ DenominatedAmount { - amount: fee_amount, .. - }, - ) = futures::try_join!(validate_token_amount, validate_fee_amount)?; + let (tok_denominated, fee_denominated) = + futures::try_join!(validate_token_amount, validate_fee_amount)?; // build pending Bridge pool transfer let fee_payer = fee_payer.unwrap_or_else(|| sender.clone()); @@ -159,7 +154,7 @@ async fn validate_bridge_pool_tx( asset, recipient, sender, - amount, + amount: tok_denominated.amount(), kind: if nut { TransferToEthereumKind::Nut } else { @@ -168,7 +163,7 @@ async fn validate_bridge_pool_tx( }, gas_fee: GasFee { token: fee_token, - amount: fee_amount, + amount: fee_denominated.amount(), payer: fee_payer, }, }; @@ -700,6 +695,37 @@ where Ok(()) } +/// Query the status of a set of transfers to Ethreum, indexed +/// by their keccak hash. +/// +/// Any unrecognized hashes (i.e. not pertaining to transfers +/// in Namada's event log nor the Bridge pool) will be flagged +/// as such. Unrecognized transfers could have been relayed to +/// Ethereum, or could have expired from the Bridge pool. If +/// these scenarios verify, it should be possible to retrieve +/// the status of these transfers by querying CometBFT's block +/// data. +pub async fn query_eth_transfer_status( + client: &C, + transfers: T, +) -> Result +where + C: Client + Sync, + T: Into>, +{ + RPC.shell() + .eth_bridge() + .pending_eth_transfer_status( + client, + Some(transfers.into().serialize_to_vec()), + None, + false, + ) + .await + .map_err(|e| Error::Query(QueryError::General(e.to_string()))) + .map(|result| result.data) +} + mod recommendations { use std::collections::BTreeSet; diff --git a/sdk/src/eth_bridge/mod.rs b/sdk/src/eth_bridge/mod.rs index b8577956ca..cfcc750444 100644 --- a/sdk/src/eth_bridge/mod.rs +++ b/sdk/src/eth_bridge/mod.rs @@ -11,8 +11,8 @@ use itertools::Either; pub use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; pub use namada_core::ledger::eth_bridge::{ADDRESS, INTERNAL_ADDRESS}; pub use namada_core::types::ethereum_structs as structs; -pub use namada_ethereum_bridge::parameters::*; pub use namada_ethereum_bridge::storage::eth_bridge_queries::*; +pub use namada_ethereum_bridge::storage::parameters::*; pub use namada_ethereum_bridge::*; use num256::Uint256; diff --git a/sdk/src/events/mod.rs b/sdk/src/events/mod.rs index 3ebd5dcae8..8c5dc8573f 100644 --- a/sdk/src/events/mod.rs +++ b/sdk/src/events/mod.rs @@ -8,6 +8,7 @@ use std::ops::{Index, IndexMut}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::types::ethereum_structs::{BpTransferStatus, EthBridgeEvent}; use namada_core::types::ibc::IbcEvent; use namada_core::types::transaction::TxType; use serde_json::Value; @@ -16,6 +17,37 @@ use serde_json::Value; use crate::error::{EncodingError, Error, EventError}; use crate::tendermint_proto::v0_37::abci::EventAttribute; +impl From for Event { + #[inline] + fn from(event: EthBridgeEvent) -> Event { + Self::from(&event) + } +} + +impl From<&EthBridgeEvent> for Event { + fn from(event: &EthBridgeEvent) -> Event { + match event { + EthBridgeEvent::BridgePool { tx_hash, status } => Event { + event_type: EventType::EthereumBridge, + level: EventLevel::Tx, + attributes: { + let mut attrs = HashMap::new(); + attrs.insert( + "kind".into(), + match status { + BpTransferStatus::Relayed => "bridge_pool_relayed", + BpTransferStatus::Expired => "bridge_pool_expired", + } + .into(), + ); + attrs.insert("tx_hash".into(), tx_hash.to_string()); + attrs + }, + }, + } + } +} + /// Indicates if an event is emitted do to /// an individual Tx or the nature of a finalized block #[derive(Clone, Debug, Eq, PartialEq, BorshSerialize, BorshDeserialize)] @@ -52,6 +84,8 @@ pub enum EventType { Proposal, /// The pgf payment PgfPayment, + /// Ethereum Bridge event + EthereumBridge, } impl Display for EventType { @@ -62,6 +96,7 @@ impl Display for EventType { EventType::Ibc(t) => write!(f, "{}", t), EventType::Proposal => write!(f, "proposal"), EventType::PgfPayment => write!(f, "pgf_payment"), + EventType::EthereumBridge => write!(f, "ethereum_bridge"), }?; Ok(()) } @@ -82,6 +117,7 @@ impl FromStr for EventType { "write_acknowledgement" => { Ok(EventType::Ibc("write_acknowledgement".to_string())) } + "ethereum_bridge" => Ok(EventType::EthereumBridge), _ => Err(EventError::InvalidEventType), } } diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 29aab3041a..65b4a45cf4 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -42,7 +42,6 @@ use namada_core::types::ethereum_events::EthAddress; use namada_core::types::key::*; use namada_core::types::masp::{TransferSource, TransferTarget}; use namada_core::types::token; -use namada_core::types::token::NATIVE_MAX_DECIMAL_PLACES; use namada_core::types::transaction::GasLimit; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -53,7 +52,7 @@ use crate::rpc::{ denominate_amount, format_denominated_amount, query_native_token, }; use crate::signing::SigningTxData; -use crate::token::DenominatedAmount; +use crate::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use crate::tx::{ ProcessTxResponse, TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, TX_CHANGE_COMMISSION_WASM, @@ -245,7 +244,7 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// Make a TxIbcTransfer builder from the given minimum set of arguments fn new_ibc_transfer( &self, - source: Address, + source: TransferSource, receiver: String, token: Address, amount: InputAmount, @@ -332,6 +331,7 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { validator, consensus_key: None, tx_code_path: PathBuf::from(TX_CHANGE_CONSENSUS_KEY_WASM), + unsafe_dont_encrypt: false, tx: self.tx_builder(), } } @@ -489,10 +489,12 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { recipient, asset, amount, - fee_amount: InputAmount::Unvalidated(token::DenominatedAmount { - amount: token::Amount::default(), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }), + fee_amount: InputAmount::Unvalidated( + token::DenominatedAmount::new( + token::Amount::default(), + NATIVE_MAX_DECIMAL_PLACES.into(), + ), + ), fee_payer: None, fee_token: self.native_token(), nut: false, @@ -756,3 +758,606 @@ where } } } + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for transactions +pub mod testing { + use ibc::primitives::proto::Any; + use namada_core::ledger::governance::storage::proposal::ProposalType; + use namada_core::ledger::ibc::testing::arb_ibc_any; + use namada_core::types::address::testing::{ + arb_established_address, arb_non_internal_address, + }; + use namada_core::types::eth_bridge_pool::PendingTransfer; + use namada_core::types::hash::testing::arb_hash; + use namada_core::types::storage::testing::arb_epoch; + use namada_core::types::token::testing::{ + arb_denominated_amount, arb_transfer, + }; + use namada_core::types::token::Transfer; + use namada_core::types::transaction::account::{ + InitAccount, UpdateAccount, + }; + use namada_core::types::transaction::governance::{ + InitProposalData, VoteProposalData, + }; + use namada_core::types::transaction::pgf::UpdateStewardCommission; + use namada_core::types::transaction::pos::{ + BecomeValidator, Bond, CommissionChange, ConsensusKeyChange, + MetaDataChange, Redelegation, Unbond, Withdraw, + }; + use proptest::prelude::{Just, Strategy}; + use proptest::{option, prop_compose}; + use prost::Message; + + use super::*; + use crate::core::types::chain::ChainId; + use crate::core::types::eth_bridge_pool::testing::arb_pending_transfer; + use crate::core::types::key::testing::arb_common_pk; + use crate::core::types::time::{DateTime, DateTimeUtc, Utc}; + use crate::core::types::transaction::account::tests::{ + arb_init_account, arb_update_account, + }; + use crate::core::types::transaction::governance::tests::{ + arb_init_proposal, arb_vote_proposal, + }; + use crate::core::types::transaction::pgf::tests::arb_update_steward_commission; + use crate::core::types::transaction::pos::tests::{ + arb_become_validator, arb_bond, arb_commission_change, + arb_consensus_key_change, arb_metadata_change, arb_redelegation, + arb_withdraw, + }; + use crate::core::types::transaction::{ + DecryptedTx, Fee, TxType, WrapperTx, + }; + use crate::proto::{Code, Commitment, Header, Section}; + + #[derive(Debug)] + #[allow(clippy::large_enum_variant)] + // To facilitate propagating debugging information + pub enum TxData { + CommissionChange(CommissionChange), + ConsensusKeyChange(ConsensusKeyChange), + MetaDataChange(MetaDataChange), + ClaimRewards(Withdraw), + DeactivateValidator(Address), + InitAccount(InitAccount), + InitProposal(InitProposalData), + InitValidator(BecomeValidator), + ReactivateValidator(Address), + RevealPk(common::PublicKey), + Unbond(Unbond), + UnjailValidator(Address), + UpdateAccount(UpdateAccount), + VoteProposal(VoteProposalData), + Withdraw(Withdraw), + Transfer(Transfer), + Bond(Bond), + Redelegation(Redelegation), + UpdateStewardCommission(UpdateStewardCommission), + ResignSteward(Address), + PendingTransfer(PendingTransfer), + IbcAny(Any), + Custom(Box), + } + + prop_compose! { + // Generate an arbitrary commitment + pub fn arb_commitment()( + hash in arb_hash(), + ) -> Commitment { + Commitment::Hash(hash) + } + } + + prop_compose! { + // Generate an arbitrary code section + pub fn arb_code()( + salt: [u8; 8], + code in arb_commitment(), + tag in option::of("[a-zA-Z0-9_]*"), + ) -> Code { + Code { + salt, + code, + tag, + } + } + } + + prop_compose! { + // Generate a chain ID + pub fn arb_chain_id()(id in "[a-zA-Z0-9_]*") -> ChainId { + ChainId(id) + } + } + + prop_compose! { + // Generate a date and time + pub fn arb_date_time_utc()( + secs in DateTime::::MIN_UTC.timestamp()..=DateTime::::MAX_UTC.timestamp(), + nsecs in ..1000000000u32, + ) -> DateTimeUtc { + DateTimeUtc(DateTime::::from_timestamp(secs, nsecs).unwrap()) + } + } + + prop_compose! { + // Generate an arbitrary fee + pub fn arb_fee()( + amount_per_gas_unit in arb_denominated_amount(), + token in arb_established_address().prop_map(Address::Established), + ) -> Fee { + Fee { + amount_per_gas_unit, + token, + } + } + } + + prop_compose! { + // Generate an arbitrary gas limit + pub fn arb_gas_limit()(multiplier: u64) -> GasLimit { + multiplier.into() + } + } + + prop_compose! { + // Generate an arbitrary wrapper transaction + pub fn arb_wrapper_tx()( + fee in arb_fee(), + epoch in arb_epoch(), + pk in arb_common_pk(), + gas_limit in arb_gas_limit(), + unshield_section_hash in option::of(arb_hash()), + ) -> WrapperTx { + WrapperTx { + fee, + epoch, + pk, + gas_limit, + unshield_section_hash, + } + } + } + + prop_compose! { + // Generate an arbitrary decrypted transaction + pub fn arb_decrypted_tx()(discriminant in 0..2) -> DecryptedTx { + match discriminant { + 0 => DecryptedTx::Decrypted, + 1 => DecryptedTx::Undecryptable, + _ => unreachable!(), + } + } + } + + // Generate an arbitrary transaction type + pub fn arb_tx_type() -> impl Strategy { + let raw_tx = Just(TxType::Raw).boxed(); + let decrypted_tx = + arb_decrypted_tx().prop_map(TxType::Decrypted).boxed(); + let wrapper_tx = arb_wrapper_tx() + .prop_map(|x| TxType::Wrapper(Box::new(x))) + .boxed(); + raw_tx.prop_union(decrypted_tx).or(wrapper_tx) + } + + prop_compose! { + // Generate an arbitrary header + pub fn arb_header()( + chain_id in arb_chain_id(), + expiration in option::of(arb_date_time_utc()), + timestamp in arb_date_time_utc(), + code_hash in arb_hash(), + data_hash in arb_hash(), + tx_type in arb_tx_type(), + ) -> Header { + Header { + chain_id, + expiration, + timestamp, + data_hash, + code_hash, + tx_type, + } + } + } + + prop_compose! { + // Generate an arbitrary transfer transaction + pub fn arb_transfer_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + transfer in arb_transfer(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(transfer.clone()); + tx.add_code_from_hash(code_hash, Some(TX_TRANSFER_WASM.to_owned())); + (tx, TxData::Transfer(transfer)) + } + } + + prop_compose! { + // Generate an arbitrary bond transaction + pub fn arb_bond_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + bond in arb_bond(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(bond.clone()); + tx.add_code_from_hash(code_hash, Some(TX_BOND_WASM.to_owned())); + (tx, TxData::Bond(bond)) + } + } + + prop_compose! { + // Generate an arbitrary bond transaction + pub fn arb_unbond_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + unbond in arb_bond(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(unbond.clone()); + tx.add_code_from_hash(code_hash, Some(TX_UNBOND_WASM.to_owned())); + (tx, TxData::Unbond(unbond)) + } + } + + prop_compose! { + // Generate an arbitrary account initialization transaction + pub fn arb_init_account_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + mut init_account in arb_init_account(), + extra_data in arb_code(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + let vp_code_hash = tx.add_section(Section::ExtraData(extra_data)).get_hash(); + init_account.vp_code_hash = vp_code_hash; + tx.add_data(init_account.clone()); + tx.add_code_from_hash(code_hash, Some(TX_INIT_ACCOUNT_WASM.to_owned())); + (tx, TxData::InitAccount(init_account)) + } + } + + prop_compose! { + // Generate an arbitrary account initialization transaction + pub fn arb_become_validator_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + become_validator in arb_become_validator(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(become_validator.clone()); + tx.add_code_from_hash(code_hash, Some(TX_BECOME_VALIDATOR_WASM.to_owned())); + (tx, TxData::InitValidator(become_validator)) + } + } + + prop_compose! { + // Generate an arbitrary proposal initialization transaction + pub fn arb_init_proposal_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + mut init_proposal in arb_init_proposal(), + content_extra_data in arb_code(), + type_extra_data in arb_code(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + let content_hash = tx.add_section(Section::ExtraData(content_extra_data)).get_hash(); + init_proposal.content = content_hash; + if let ProposalType::Default(Some(hash)) = &mut init_proposal.r#type { + let type_hash = tx.add_section(Section::ExtraData(type_extra_data)).get_hash(); + *hash = type_hash; + } + tx.add_data(init_proposal.clone()); + tx.add_code_from_hash(code_hash, Some(TX_INIT_PROPOSAL.to_owned())); + (tx, TxData::InitProposal(init_proposal)) + } + } + + prop_compose! { + // Generate an arbitrary vote proposal transaction + pub fn arb_vote_proposal_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + vote_proposal in arb_vote_proposal(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(vote_proposal.clone()); + tx.add_code_from_hash(code_hash, Some(TX_VOTE_PROPOSAL.to_owned())); + (tx, TxData::VoteProposal(vote_proposal)) + } + } + + prop_compose! { + // Generate an arbitrary reveal public key transaction + pub fn arb_reveal_pk_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + pk in arb_common_pk(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(pk.clone()); + tx.add_code_from_hash(code_hash, Some(TX_REVEAL_PK.to_owned())); + (tx, TxData::RevealPk(pk)) + } + } + + prop_compose! { + // Generate an arbitrary account initialization transaction + pub fn arb_update_account_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + mut update_account in arb_update_account(), + extra_data in arb_code(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + if let Some(vp_code_hash) = &mut update_account.vp_code_hash { + let new_code_hash = tx.add_section(Section::ExtraData(extra_data)).get_hash(); + *vp_code_hash = new_code_hash; + } + tx.add_data(update_account.clone()); + tx.add_code_from_hash(code_hash, Some(TX_UPDATE_ACCOUNT_WASM.to_owned())); + (tx, TxData::UpdateAccount(update_account)) + } + } + + prop_compose! { + // Generate an arbitrary reveal public key transaction + pub fn arb_withdraw_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + withdraw in arb_withdraw(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(withdraw.clone()); + tx.add_code_from_hash(code_hash, Some(TX_WITHDRAW_WASM.to_owned())); + (tx, TxData::Withdraw(withdraw)) + } + } + + prop_compose! { + // Generate an arbitrary claim rewards transaction + pub fn arb_claim_rewards_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + claim_rewards in arb_withdraw(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(claim_rewards.clone()); + tx.add_code_from_hash(code_hash, Some(TX_CLAIM_REWARDS_WASM.to_owned())); + (tx, TxData::ClaimRewards(claim_rewards)) + } + } + + prop_compose! { + // Generate an arbitrary commission change transaction + pub fn arb_commission_change_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + commission_change in arb_commission_change(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(commission_change.clone()); + tx.add_code_from_hash(code_hash, Some(TX_CHANGE_COMMISSION_WASM.to_owned())); + (tx, TxData::CommissionChange(commission_change)) + } + } + + prop_compose! { + // Generate an arbitrary commission change transaction + pub fn arb_metadata_change_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + metadata_change in arb_metadata_change(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(metadata_change.clone()); + tx.add_code_from_hash(code_hash, Some(TX_CHANGE_METADATA_WASM.to_owned())); + (tx, TxData::MetaDataChange(metadata_change)) + } + } + + prop_compose! { + // Generate an arbitrary unjail validator transaction + pub fn arb_unjail_validator_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + address in arb_non_internal_address(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(address.clone()); + tx.add_code_from_hash(code_hash, Some(TX_UNJAIL_VALIDATOR_WASM.to_owned())); + (tx, TxData::UnjailValidator(address)) + } + } + + prop_compose! { + // Generate an arbitrary deactivate validator transaction + pub fn arb_deactivate_validator_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + address in arb_non_internal_address(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(address.clone()); + tx.add_code_from_hash(code_hash, Some(TX_DEACTIVATE_VALIDATOR_WASM.to_owned())); + (tx, TxData::DeactivateValidator(address)) + } + } + + prop_compose! { + // Generate an arbitrary reactivate validator transaction + pub fn arb_reactivate_validator_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + address in arb_non_internal_address(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(address.clone()); + tx.add_code_from_hash(code_hash, Some(TX_REACTIVATE_VALIDATOR_WASM.to_owned())); + (tx, TxData::ReactivateValidator(address)) + } + } + + prop_compose! { + // Generate an arbitrary consensus key change transaction + pub fn arb_consensus_key_change_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + consensus_key_change in arb_consensus_key_change(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(consensus_key_change.clone()); + tx.add_code_from_hash(code_hash, Some(TX_CHANGE_CONSENSUS_KEY_WASM.to_owned())); + (tx, TxData::ConsensusKeyChange(consensus_key_change)) + } + } + + prop_compose! { + // Generate an arbitrary redelegation transaction + pub fn arb_redelegation_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + redelegation in arb_redelegation(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(redelegation.clone()); + tx.add_code_from_hash(code_hash, Some(TX_REDELEGATE_WASM.to_owned())); + (tx, TxData::Redelegation(redelegation)) + } + } + + prop_compose! { + // Generate an arbitrary redelegation transaction + pub fn arb_update_steward_commission_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + update_steward_commission in arb_update_steward_commission(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(update_steward_commission.clone()); + tx.add_code_from_hash(code_hash, Some(TX_UPDATE_STEWARD_COMMISSION.to_owned())); + (tx, TxData::UpdateStewardCommission(update_steward_commission)) + } + } + + prop_compose! { + // Generate an arbitrary redelegation transaction + pub fn arb_resign_steward_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + steward in arb_non_internal_address(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(steward.clone()); + tx.add_code_from_hash(code_hash, Some(TX_RESIGN_STEWARD.to_owned())); + (tx, TxData::ResignSteward(steward)) + } + } + + prop_compose! { + // Generate an arbitrary pending transfer transaction + pub fn arb_pending_transfer_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + pending_transfer in arb_pending_transfer(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + tx.add_data(pending_transfer.clone()); + tx.add_code_from_hash(code_hash, Some(TX_BRIDGE_POOL_WASM.to_owned())); + (tx, TxData::PendingTransfer(pending_transfer)) + } + } + + prop_compose! { + // Generate an arbitrary IBC any transaction + pub fn arb_ibc_any_tx()( + mut header in arb_header(), + wrapper in arb_wrapper_tx(), + ibc_any in arb_ibc_any(), + code_hash in arb_hash(), + ) -> (Tx, TxData) { + header.tx_type = TxType::Wrapper(Box::new(wrapper)); + let mut tx = Tx { header, sections: vec![] }; + let mut tx_data = vec![]; + ibc_any.encode(&mut tx_data).expect("unable to encode IBC data"); + tx.add_serialized_data(tx_data); + tx.add_code_from_hash(code_hash, Some(TX_IBC_WASM.to_owned())); + (tx, TxData::IbcAny(ibc_any)) + } + } + + // Generate an arbitrary tx + pub fn arb_tx() -> impl Strategy { + arb_transfer_tx() + .boxed() + .prop_union(arb_bond_tx().boxed()) + .or(arb_unbond_tx().boxed()) + .or(arb_init_account_tx().boxed()) + .or(arb_become_validator_tx().boxed()) + .or(arb_init_proposal_tx().boxed()) + .or(arb_vote_proposal_tx().boxed()) + .or(arb_reveal_pk_tx().boxed()) + .or(arb_update_account_tx().boxed()) + .or(arb_withdraw_tx().boxed()) + .or(arb_claim_rewards_tx().boxed()) + .or(arb_commission_change_tx().boxed()) + .or(arb_metadata_change_tx().boxed()) + .or(arb_unjail_validator_tx().boxed()) + .or(arb_deactivate_validator_tx().boxed()) + .or(arb_reactivate_validator_tx().boxed()) + .or(arb_consensus_key_change_tx().boxed()) + .or(arb_redelegation_tx().boxed()) + .or(arb_update_steward_commission_tx().boxed()) + .or(arb_resign_steward_tx().boxed()) + .or(arb_pending_transfer_tx().boxed()) + .or(arb_ibc_any_tx().boxed()) + } +} diff --git a/sdk/src/masp.rs b/sdk/src/masp.rs index 812db3171c..576f311cb1 100644 --- a/sdk/src/masp.rs +++ b/sdk/src/masp.rs @@ -55,6 +55,7 @@ use namada_core::types::masp::{ TransferTarget, }; use namada_core::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use namada_core::types::time::{DateTimeUtc, DurationSecs}; use namada_core::types::token; use namada_core::types::token::{ Change, MaspDenom, Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, @@ -941,7 +942,7 @@ impl ShieldedContext { tx.source.clone(), MaspChange { asset: token_addr, - change: -tx.amount.amount.change(), + change: -tx.amount.amount().change(), }, ); self.last_txidx += 1; @@ -1590,12 +1591,54 @@ impl ShieldedContext { }; // Now we build up the transaction within this object - let mut builder = - Builder::::new_with_rng(NETWORK, 1.into(), rng); + let expiration_height: u32 = match context.tx_builder().expiration { + Some(expiration) => { + // Try to match a DateTime expiration with a plausible + // corresponding block height + let last_block_height: u64 = + crate::rpc::query_block(context.client()) + .await? + .map_or_else(|| 1, |block| u64::from(block.height)); + let current_time = DateTimeUtc::now(); + let delta_time = + expiration.0.signed_duration_since(current_time.0); + + let max_expected_time_per_block_key = + namada_core::ledger::parameters::storage::get_max_expected_time_per_block_key(); + let max_block_time = + crate::rpc::query_storage_value::<_, DurationSecs>( + context.client(), + &max_expected_time_per_block_key, + ) + .await?; + + let delta_blocks = u32::try_from( + delta_time.num_seconds() / max_block_time.0 as i64, + ) + .map_err(|e| Error::Other(e.to_string()))?; + u32::try_from(last_block_height) + .map_err(|e| Error::Other(e.to_string()))? + + delta_blocks + } + None => { + // NOTE: The masp library doesn't support optional expiration so + // we set the max to mimic a never-expiring tx. We also need to + // remove 20 which is going to be added back by the builder + u32::MAX - 20 + } + }; + let mut builder = Builder::::new_with_rng( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.into(), + rng, + ); // Convert transaction amount into MASP types let (asset_types, masp_amount) = - convert_amount(epoch, token, amount.amount)?; + convert_amount(epoch, token, amount.amount())?; // If there are shielded inputs if let Some(sk) = spending_key { @@ -1650,7 +1693,7 @@ impl ShieldedContext { builder .add_transparent_input(TxOut { asset_type: *asset_type, - value: denom.denominate(&amount), + value: denom.denominate(&amount.amount()), address: script, }) .map_err(builder::Error::TransparentBuild)?; @@ -1668,7 +1711,7 @@ impl ShieldedContext { ovk_opt, pa.into(), *asset_type, - denom.denominate(&amount), + denom.denominate(&amount.amount()), memo.clone(), ) .map_err(builder::Error::SaplingBuild)?; @@ -1689,7 +1732,7 @@ impl ShieldedContext { )); for (denom, asset_type) in MaspDenom::iter().zip(asset_types.iter()) { - let vout = denom.denominate(&amount); + let vout = denom.denominate(&amount.amount()); if vout != 0 { builder .add_transparent_output( @@ -1961,7 +2004,7 @@ impl ShieldedContext { transfer.source.clone(), MaspChange { asset: transfer.token.clone(), - change: -transfer.amount.amount.change(), + change: -transfer.amount.amount().change(), }, )]); diff --git a/sdk/src/queries/mod.rs b/sdk/src/queries/mod.rs index 354cd5b6d0..4dbc5173b8 100644 --- a/sdk/src/queries/mod.rs +++ b/sdk/src/queries/mod.rs @@ -16,7 +16,7 @@ use vp::{Vp, VP}; pub use self::shell::eth_bridge::{ Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, - TransferToErcArgs, + TransferToErcArgs, TransferToEthereumStatus, }; use crate::{MaybeSend, MaybeSync}; diff --git a/sdk/src/queries/router.rs b/sdk/src/queries/router.rs index c78c0a6fcd..d27e612d66 100644 --- a/sdk/src/queries/router.rs +++ b/sdk/src/queries/router.rs @@ -1047,25 +1047,25 @@ mod test { let result = TEST_RPC.b1(&client).await.unwrap(); assert_eq!(result, "b1"); - let balance = token::DenominatedAmount { - amount: token::Amount::native_whole(123_000_000), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; + let balance = token::DenominatedAmount::new( + token::Amount::native_whole(123_000_000), + NATIVE_MAX_DECIMAL_PLACES.into(), + ); let result = TEST_RPC.b2i(&client, &balance).await.unwrap(); assert_eq!(result, format!("b2i/{balance}")); - let a1 = token::DenominatedAmount { - amount: token::Amount::native_whole(345), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; - let a2 = token::DenominatedAmount { - amount: token::Amount::native_whole(123_000), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; - let a3 = token::DenominatedAmount { - amount: token::Amount::native_whole(1_000_999), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }; + let a1 = token::DenominatedAmount::new( + token::Amount::native_whole(345), + NATIVE_MAX_DECIMAL_PLACES.into(), + ); + let a2 = token::DenominatedAmount::new( + token::Amount::native_whole(123_000), + NATIVE_MAX_DECIMAL_PLACES.into(), + ); + let a3 = token::DenominatedAmount::new( + token::Amount::native_whole(1_000_999), + NATIVE_MAX_DECIMAL_PLACES.into(), + ); let result = TEST_RPC.b3(&client, &a1, &a2, &a3).await.unwrap(); assert_eq!(result, format!("b3/{a1}/{a2}/{a3}")); diff --git a/sdk/src/queries/shell.rs b/sdk/src/queries/shell.rs index 23f7441cae..813348dacb 100644 --- a/sdk/src/queries/shell.rs +++ b/sdk/src/queries/shell.rs @@ -84,6 +84,10 @@ router! {SHELL, // Conversion state access - read conversion ( "conversions" ) -> BTreeMap = read_conversions, + + // Conversion state access - read conversion + ( "masp_reward_tokens" ) -> BTreeMap = masp_reward_tokens, + // Block results access - read bit-vec ( "results" ) -> Vec = read_results, @@ -210,6 +214,17 @@ where } } +/// Query to read the tokens that earn masp rewards. +fn masp_reward_tokens( + ctx: RequestCtx<'_, D, H, V, T>, +) -> storage_api::Result> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + Ok(ctx.wl_storage.storage.conversion_state.tokens.clone()) +} + fn epoch( ctx: RequestCtx<'_, D, H, V, T>, ) -> storage_api::Result diff --git a/sdk/src/queries/shell/eth_bridge.rs b/sdk/src/queries/shell/eth_bridge.rs index b7a760126d..b113eef1f8 100644 --- a/sdk/src/queries/shell/eth_bridge.rs +++ b/sdk/src/queries/shell/eth_bridge.rs @@ -1,11 +1,12 @@ //! Ethereum bridge related shell queries. use std::borrow::Cow; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; +use namada_core::hints; use namada_core::ledger::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_core::ledger::storage::merkle_tree::StoreRef; use namada_core::ledger::storage::{DBIter, StorageHasher, StoreType, DB}; @@ -29,21 +30,57 @@ use namada_core::types::vote_extensions::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, }; use namada_core::types::voting_power::FractionalVotingPower; -use namada_ethereum_bridge::parameters::UpgradeableContract; use namada_ethereum_bridge::protocol::transactions::votes::{ EpochedVotingPower, EpochedVotingPowerExt, }; use namada_ethereum_bridge::storage::eth_bridge_queries::EthBridgeQueries; +use namada_ethereum_bridge::storage::parameters::UpgradeableContract; use namada_ethereum_bridge::storage::proof::{sort_sigs, EthereumProof}; use namada_ethereum_bridge::storage::vote_tallies::{eth_msgs_prefix, Keys}; use namada_ethereum_bridge::storage::{ bridge_contract_key, native_erc20_key, vote_tallies, }; use namada_proof_of_stake::pos_queries::PosQueries; +use serde::{Deserialize, Serialize}; use crate::eth_bridge::ethers::abi::AbiDecode; +use crate::events::EventType; use crate::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; +/// Container for the status of queried transfers to Ethereum. +#[derive( + Default, + Debug, + Clone, + Eq, + PartialEq, + BorshSerialize, + BorshDeserialize, + Serialize, + Deserialize, +)] +pub struct TransferToEthereumStatus { + /// The block height at which the query was performed. + /// + /// This value may be used to busy wait while a Bridge pool + /// proof is being constructed for it, such that clients can + /// safely perform additional actions. + pub queried_height: BlockHeight, + /// Transfers in the query whose status it was determined + /// to be `pending`. + pub pending: HashSet, + /// Transfers in the query whose status it was determined + /// to be `relayed`. + pub relayed: HashSet, + /// Transfers in the query whose status it was determined + /// to be `expired`. + pub expired: HashSet, + /// Hashes pertaining to bogus data that might have been queried, + /// or transfers that were not in the event log, despite having + /// been relayed to Ethereum or expiring from the Bridge pool. + pub unrecognized: HashSet, +} + /// Contains information about the flow control of some ERC20 /// wrapped asset. #[derive( @@ -129,6 +166,11 @@ router! {ETH_BRIDGE, -> HashMap = transfer_to_ethereum_progress, + // Given a list of keccak hashes, check whether they have been + // relayed, expired or if they are still pending. + ( "pool" / "transfer_status" ) + -> TransferToEthereumStatus = (with_options pending_eth_transfer_status), + // Request a proof of a validator set signed off for // the given epoch. // @@ -175,6 +217,123 @@ router! {ETH_BRIDGE, -> Erc20FlowControl = get_erc20_flow_control, } +/// Given a list of keccak hashes, check whether they have been +/// relayed, expired or if they are still pending. +fn pending_eth_transfer_status( + ctx: RequestCtx<'_, D, H, V, T>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let mut transfer_hashes: HashSet = + BorshDeserialize::try_from_slice(&request.data) + .into_storage_result()?; + + if transfer_hashes.is_empty() { + return Ok(Default::default()); + } + + let mut status = TransferToEthereumStatus { + queried_height: ctx.wl_storage.storage.get_last_block_height(), + ..Default::default() + }; + + // check which transfers in the Bridge pool match the requested hashes + let merkle_tree = ctx + .wl_storage + .storage + .get_merkle_tree( + ctx.wl_storage.storage.get_last_block_height(), + Some(StoreType::BridgePool), + ) + .expect("We should always be able to read the database"); + let stores = merkle_tree.stores(); + let store = match stores.store(&StoreType::BridgePool) { + StoreRef::BridgePool(store) => store, + _ => unreachable!(), + }; + if hints::likely(store.len() > transfer_hashes.len()) { + transfer_hashes.retain(|hash| { + let transfer_in_pool = store.contains_key(hash); + if transfer_in_pool { + status.pending.insert(hash.clone()); + } + !transfer_in_pool + }); + } else { + for hash in store.keys() { + if transfer_hashes.remove(hash) { + status.pending.insert(hash.clone()); + } + if transfer_hashes.is_empty() { + break; + } + } + } + + if transfer_hashes.is_empty() { + let data = status.serialize_to_vec(); + return Ok(EncodedResponseQuery { + data, + ..Default::default() + }); + } + + // INVARIANT: transfers that are in the event log will have already + // been processed and therefore removed from the Bridge pool at the + // time of this query + let kind_key: String = "kind".into(); + let completed_transfers = ctx.event_log.iter().filter_map(|ev| { + if !matches!(&ev.event_type, EventType::EthereumBridge) { + return None; + } + let eth_event_kind = + ev.attributes.get(&kind_key).map(|k| k.as_str())?; + let is_relayed = match eth_event_kind { + "bridge_pool_relayed" => true, + "bridge_pool_expired" => false, + _ => return None, + }; + let tx_hash: KeccakHash = ev + .attributes + .get("tx_hash") + .expect("The transfer hash must be available") + .as_str() + .try_into() + .expect("We must have a valid KeccakHash"); + if !transfer_hashes.remove(&tx_hash) { + return None; + } + Some((tx_hash, is_relayed, transfer_hashes.is_empty())) + }); + for (hash, is_relayed, early_exit) in completed_transfers { + if hints::likely(is_relayed) { + status.relayed.insert(hash.clone()); + } else { + status.expired.insert(hash.clone()); + } + if early_exit { + // early drop of the transfer hashes, in + // case its storage capacity was big + transfer_hashes = Default::default(); + break; + } + } + + let status = { + // any remaining transfers are returned as + // unrecognized hashes + status.unrecognized = transfer_hashes; + status + }; + Ok(EncodedResponseQuery { + data: status.serialize_to_vec(), + ..Default::default() + }) +} + /// Read the total supply and respective cap of some wrapped /// ERC20 token in Namada. fn get_erc20_flow_control( @@ -1559,6 +1718,132 @@ mod test_ethbridge_router { Ok(f) if f.supply == supply_amount && f.cap == cap_amount ); } + + /// Test that querying the status of the Bridge pool + /// returns the expected keccak hashes. + #[tokio::test] + async fn test_bridge_pool_status() { + let mut client = TestClient::new(RPC); + + // write a transfer into the bridge pool + let transfer = PendingTransfer { + transfer: TransferToEthereum { + kind: TransferToEthereumKind::Erc20, + asset: EthAddress([0; 20]), + recipient: EthAddress([0; 20]), + sender: bertha_address(), + amount: 0.into(), + }, + gas_fee: GasFee { + token: nam(), + amount: 0.into(), + payer: bertha_address(), + }, + }; + client + .wl_storage + .write_bytes( + &get_pending_key(&transfer), + transfer.serialize_to_vec(), + ) + .expect("Test failed"); + + // write transfers into the event log + let mut transfer2 = transfer.clone(); + transfer2.transfer.amount = 1.into(); + let mut transfer3 = transfer.clone(); + transfer3.transfer.amount = 2.into(); + client.event_log.log_events(vec![ + ethereum_structs::EthBridgeEvent::BridgePool { + tx_hash: transfer2.keccak256(), + status: ethereum_structs::BpTransferStatus::Expired, + } + .into(), + ethereum_structs::EthBridgeEvent::BridgePool { + tx_hash: transfer3.keccak256(), + status: ethereum_structs::BpTransferStatus::Relayed, + } + .into(), + ]); + + // some arbitrary transfer - since it's neither in the + // Bridge pool nor in the event log, it is assumed it has + // either been relayed or that it has expired + let mut transfer4 = transfer.clone(); + transfer4.transfer.amount = 3.into(); + + // change block height + client.wl_storage.storage.block.height = 1.into(); + + // write bridge pool signed root + { + let signed_root = BridgePoolRootProof { + signatures: Default::default(), + data: (KeccakHash([0; 32]), 0.into()), + }; + let written_height = client.wl_storage.storage.block.height; + client + .wl_storage + .write_bytes( + &get_signed_root_key(), + (signed_root, written_height).serialize_to_vec(), + ) + .expect("Test failed"); + client + .wl_storage + .storage + .commit_block(MockDBWriteBatch) + .expect("Test failed"); + } + + // commit storage changes + client.wl_storage.commit_block().expect("Test failed"); + + // check transfer statuses + let status = RPC + .shell() + .eth_bridge() + .pending_eth_transfer_status( + &client, + Some( + { + let mut req = HashSet::new(); + req.insert(transfer.keccak256()); + req.insert(transfer2.keccak256()); + req.insert(transfer3.keccak256()); + req.insert(transfer4.keccak256()); + req + } + .serialize_to_vec(), + ), + None, + false, + ) + .await + .unwrap() + .data; + + assert_eq!( + status.pending, + HashSet::from([transfer.keccak256()]), + "unexpected pending transfers" + ); + assert_eq!( + status.expired, + HashSet::from([transfer2.keccak256()]), + "unexpected expired transfers" + ); + assert_eq!( + status.relayed, + HashSet::from([transfer3.keccak256()]), + "unexpected relayed transfers" + ); + assert_eq!( + status.unrecognized, + HashSet::from([transfer4.keccak256()]), + "unexpected unrecognized transfers" + ); + } } #[cfg(any(feature = "testing", test))] diff --git a/sdk/src/queries/vp/pos.rs b/sdk/src/queries/vp/pos.rs index 1011964588..ff1b073dc4 100644 --- a/sdk/src/queries/vp/pos.rs +++ b/sdk/src/queries/vp/pos.rs @@ -12,14 +12,14 @@ use namada_core::types::key::common; use namada_core::types::storage::Epoch; use namada_core::types::token; use namada_proof_of_stake::parameters::PosParams; -use namada_proof_of_stake::types::{ - BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionPair, - Slash, ValidatorMetaData, ValidatorState, WeightedValidator, +use namada_proof_of_stake::queries::{ + find_delegation_validators, find_delegations, }; -use namada_proof_of_stake::{ - self, bond_amount, bond_handle, find_all_enqueued_slashes, - find_all_slashes, find_delegation_validators, find_delegations, - query_reward_tokens, read_all_validator_addresses, +use namada_proof_of_stake::slashing::{ + find_all_enqueued_slashes, find_all_slashes, +}; +use namada_proof_of_stake::storage::{ + bond_handle, read_all_validator_addresses, read_below_capacity_validator_set_addresses_with_stake, read_consensus_validator_set_addresses_with_stake, read_pos_params, read_total_stake, read_validator_description, @@ -29,6 +29,11 @@ use namada_proof_of_stake::{ validator_commission_rate_handle, validator_incoming_redelegations_handle, validator_slashes_handle, validator_state_handle, }; +use namada_proof_of_stake::types::{ + BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, CommissionPair, + Slash, ValidatorMetaData, ValidatorState, WeightedValidator, +}; +use namada_proof_of_stake::{self, bond_amount, query_reward_tokens}; use crate::queries::types::RequestCtx; @@ -546,7 +551,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::bonds_and_unbonds(ctx.wl_storage, source, validator) + namada_proof_of_stake::queries::bonds_and_unbonds( + ctx.wl_storage, + source, + validator, + ) } /// Find all the validator addresses to whom the given `owner` address has @@ -622,7 +631,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::find_validator_by_raw_hash(ctx.wl_storage, tm_addr) + namada_proof_of_stake::storage::find_validator_by_raw_hash( + ctx.wl_storage, + tm_addr, + ) } /// Native validator address by looking up the Tendermint address @@ -633,7 +645,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::get_consensus_key_set(ctx.wl_storage) + namada_proof_of_stake::storage::get_consensus_key_set(ctx.wl_storage) } /// Find if the given source address has any bonds. @@ -645,7 +657,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::has_bonds(ctx.wl_storage, &source) + namada_proof_of_stake::queries::has_bonds(ctx.wl_storage, &source) } /// Client-only methods for the router type are composed from router functions. diff --git a/sdk/src/rpc.rs b/sdk/src/rpc.rs index 38e4ee5b37..2bb7192a89 100644 --- a/sdk/src/rpc.rs +++ b/sdk/src/rpc.rs @@ -3,6 +3,7 @@ use std::cell::Cell; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::ops::ControlFlow; +use std::str::FromStr; use borsh::BorshDeserialize; use masp_primitives::asset_type::AssetType; @@ -25,6 +26,7 @@ use namada_core::types::storage::{ use namada_core::types::token::{ Amount, DenominatedAmount, Denomination, MaspDenom, }; +use namada_core::types::transaction::{ResultCode, TxResult}; use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{ @@ -283,6 +285,13 @@ pub async fn query_conversions( convert_response::(RPC.shell().read_conversions(client).await) } +/// Query to read the tokens that earn masp rewards. +pub async fn query_masp_reward_tokens( + client: &C, +) -> Result, Error> { + convert_response::(RPC.shell().masp_reward_tokens(client).await) +} + /// Query a wasm code hash pub async fn query_wasm_code_hash( context: &impl Namada, @@ -483,7 +492,20 @@ pub async fn dry_run_tx( .await, )? .data; - display_line!(context.io(), "Dry-run result: {}", result); + let result_str = if result.is_accepted() { + format!( + "Transaction was successfully applied. Used {} gas.", + result.gas_used + ) + } else { + format!( + "Transaction was rejected by VPs: {}.\nChanged key: {}", + serde_json::to_string_pretty(&result.vps_result.rejected_vps) + .unwrap(), + serde_json::to_string_pretty(&result.changed_keys).unwrap(), + ) + }; + display_line!(context.io(), "Dry-run result: {result_str}"); Ok(result) } @@ -511,20 +533,30 @@ pub enum TxBroadcastData { /// A parsed event from tendermint relating to a transaction #[derive(Debug, Serialize)] pub struct TxResponse { - /// Response information + /// Result of inner tx (wasm), if any + pub inner_tx: Option, + /// Response additional information pub info: String, /// Response log pub log: String, /// Block height - pub height: String, + pub height: BlockHeight, /// Transaction height pub hash: String, /// Response code - pub code: String, - /// Gas used + pub code: ResultCode, + /// Gas used. If there's an `inner_tx`, its gas is equal to this value. pub gas_used: String, - /// Initialized accounts - pub initialized_accounts: Vec
, +} + +/// Determines a result of an inner tx from [`TxResponse::inner_tx_result`]. +pub enum InnerTxResult<'a> { + /// Tx is applied and accepted by all VPs + Success(&'a TxResult), + /// Some VPs rejected the tx + VpsRejected(&'a TxResult), + /// Transaction failed in some other way + OtherFailure, } impl TryFrom for TxResponse { @@ -535,6 +567,9 @@ impl TryFrom for TxResponse { format!("Field \"{field}\" not present in event") } + let inner_tx = event + .get("inner_tx") + .map(|s| TxResult::from_str(s).unwrap()); let hash = event .get("hash") .ok_or_else(|| missing_field_err("hash"))? @@ -547,36 +582,29 @@ impl TryFrom for TxResponse { .get("log") .ok_or_else(|| missing_field_err("log"))? .clone(); - let height = event - .get("height") - .ok_or_else(|| missing_field_err("height"))? - .clone(); - let code = event - .get("code") - .ok_or_else(|| missing_field_err("code"))? - .clone(); + let height = BlockHeight::from_str( + event + .get("height") + .ok_or_else(|| missing_field_err("height"))?, + ) + .map_err(|e| e.to_string())?; + let code = ResultCode::from_str( + event.get("code").ok_or_else(|| missing_field_err("code"))?, + ) + .map_err(|e| e.to_string())?; let gas_used = event .get("gas_used") .ok_or_else(|| missing_field_err("gas_used"))? .clone(); - let initialized_accounts = event - .get("initialized_accounts") - .map(String::as_str) - // TODO: fix finalize block, to return initialized accounts, - // even when we reject a tx? - .map_or(Ok(vec![]), |initialized_accounts| { - serde_json::from_str(initialized_accounts) - .map_err(|err| format!("JSON decode error: {err}")) - })?; Ok(TxResponse { - hash, + inner_tx, info, + hash, log, height, code, gas_used, - initialized_accounts, }) } } @@ -588,6 +616,20 @@ impl TxResponse { panic!("Error fetching TxResponse: {err}"); }) } + + /// Check the result of the inner tx. This should not be used with wrapper + /// txs. + pub fn inner_tx_result(&self) -> InnerTxResult<'_> { + if let Some(tx) = self.inner_tx.as_ref() { + if tx.is_accepted() { + InnerTxResult::Success(tx) + } else { + InnerTxResult::VpsRejected(tx) + } + } else { + InnerTxResult::OtherFailure + } + } } /// Lookup the full response accompanying the specified transaction event @@ -646,17 +688,26 @@ pub async fn query_tx_response( .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) .collect(); // Summarize the transaction results that we were searching for + let inner_tx = event_map + .get("inner_tx") + .map(|s| { + TxResult::from_str(s).map_err(|_| { + TError::parse("Error parsing TxResult".to_string()) + }) + }) + .transpose()?; + let code = ResultCode::from_str(event_map["code"]) + .map_err(|_| TError::parse("Error parsing ResultCode".to_string()))?; + let height = BlockHeight::from_str(event_map["height"]) + .map_err(|_| TError::parse("Error parsing BlockHeight".to_string()))?; let result = TxResponse { + inner_tx, info: event_map["info"].to_string(), log: event_map["log"].to_string(), - height: event_map["height"].to_string(), + height, hash: event_map["hash"].to_string(), - code: event_map["code"].to_string(), + code, gas_used: event_map["gas_used"].to_string(), - initialized_accounts: serde_json::from_str( - event_map["initialized_accounts"], - ) - .unwrap_or_default(), }; Ok(result) } @@ -1025,7 +1076,7 @@ pub async fn validate_amount( "No denomination found for token: {token}, but --force \ was passed. Defaulting to the provided denomination." ); - Ok(input_amount.denom) + Ok(input_amount.denom()) } else { display_line!( context.io(), @@ -1038,7 +1089,7 @@ pub async fn validate_amount( } } }?; - if denom < input_amount.denom && !force { + if denom < input_amount.denom() && !force { display_line!( context.io(), "The input amount contained a higher precision than allowed by \ @@ -1144,7 +1195,7 @@ pub async fn denominate_amount( ); 0.into() }); - DenominatedAmount { amount, denom } + DenominatedAmount::new(amount, denom) } /// Look up the denomination of a token in order to format it @@ -1163,12 +1214,14 @@ pub async fn format_denominated_amount( /// Look up the IBC denomination from a IbcToken. pub async fn query_ibc_denom( context: &N, - token: &Address, + token: impl AsRef, owner: Option<&Address>, ) -> String { - let hash = match token { - Address::Internal(InternalAddress::IbcToken(hash)) => hash.to_string(), - _ => return token.to_string(), + let hash = match Address::decode(token.as_ref()) { + Ok(Address::Internal(InternalAddress::IbcToken(hash))) => { + hash.to_string() + } + _ => return token.as_ref().to_string(), }; if let Some(owner) = owner { @@ -1195,5 +1248,5 @@ pub async fn query_ibc_denom( } } - token.to_string() + token.as_ref().to_string() } diff --git a/sdk/src/signing.rs b/sdk/src/signing.rs index 3a3528f2f3..38aec72d4b 100644 --- a/sdk/src/signing.rs +++ b/sdk/src/signing.rs @@ -27,6 +27,7 @@ use namada_core::types::transaction::account::{InitAccount, UpdateAccount}; use namada_core::types::transaction::governance::{ InitProposalData, VoteProposalData, }; +use namada_core::types::transaction::pgf::UpdateStewardCommission; use namada_core::types::transaction::pos::BecomeValidator; use namada_core::types::transaction::{pos, Fee}; use prost::Message; @@ -41,6 +42,7 @@ use crate::core::ledger::governance::storage::proposal::ProposalType; use crate::core::ledger::governance::storage::vote::{ StorageProposalVote, VoteType, }; +use crate::core::types::eth_bridge_pool::PendingTransfer; use crate::error::{EncodingError, Error, TxError}; use crate::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use crate::ibc::primitives::proto::Any; @@ -49,24 +51,20 @@ use crate::masp::make_asset_type; use crate::proto::{MaspBuilder, Section, Tx}; use crate::rpc::validate_amount; use crate::tx::{ - TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_CHANGE_COMMISSION_WASM, - TX_CHANGE_CONSENSUS_KEY_WASM, TX_CHANGE_METADATA_WASM, - TX_CLAIM_REWARDS_WASM, TX_DEACTIVATE_VALIDATOR_WASM, TX_IBC_WASM, - TX_INIT_ACCOUNT_WASM, TX_INIT_PROPOSAL, TX_REACTIVATE_VALIDATOR_WASM, - TX_REVEAL_PK, TX_TRANSFER_WASM, TX_UNBOND_WASM, TX_UNJAIL_VALIDATOR_WASM, - TX_UPDATE_ACCOUNT_WASM, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, VP_USER_WASM, + TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, + TX_CHANGE_COMMISSION_WASM, TX_CHANGE_CONSENSUS_KEY_WASM, + TX_CHANGE_METADATA_WASM, TX_CLAIM_REWARDS_WASM, + TX_DEACTIVATE_VALIDATOR_WASM, TX_IBC_WASM, TX_INIT_ACCOUNT_WASM, + TX_INIT_PROPOSAL, TX_REACTIVATE_VALIDATOR_WASM, TX_REDELEGATE_WASM, + TX_RESIGN_STEWARD, TX_REVEAL_PK, TX_TRANSFER_WASM, TX_UNBOND_WASM, + TX_UNJAIL_VALIDATOR_WASM, TX_UPDATE_ACCOUNT_WASM, + TX_UPDATE_STEWARD_COMMISSION, TX_VOTE_PROPOSAL, TX_WITHDRAW_WASM, + VP_USER_WASM, }; pub use crate::wallet::store::AddressVpType; use crate::wallet::{Wallet, WalletIo}; use crate::{args, display_line, rpc, MaybeSend, Namada}; -#[cfg(feature = "std")] -/// Env. var specifying where to store signing test vectors -const ENV_VAR_LEDGER_LOG_PATH: &str = "NAMADA_LEDGER_LOG_PATH"; -#[cfg(feature = "std")] -/// Env. var specifying where to store transaction debug outputs -const ENV_VAR_TX_LOG_PATH: &str = "NAMADA_TX_LOG_PATH"; - /// A structure holding the signing data to craft a transaction #[derive(Clone)] pub struct SigningTxData { @@ -447,7 +445,7 @@ pub async fn wrap_tx( tx_source_balance: Option, epoch: Epoch, fee_payer: common::PublicKey, -) -> Result, Error> { +) -> Result<(), Error> { let fee_payer_address = Address::from(&fee_payer); // Validate fee amount and token let gas_cost_key = parameter_storage::get_gas_cost_key(); @@ -475,6 +473,9 @@ pub async fn wrap_tx( } } }; + let validated_minimum_fee = context + .denominate_amount(&args.fee_token, minimum_fee) + .await; let fee_amount = match args.fee_amount { Some(amount) => { let validated_fee_amount = @@ -482,26 +483,23 @@ pub async fn wrap_tx( .await .expect("Expected to be able to validate fee"); - let amount = - Amount::from_uint(validated_fee_amount.amount, 0).unwrap(); - - if amount >= minimum_fee { - amount + if validated_fee_amount >= validated_minimum_fee { + validated_fee_amount } else if !args.force { // Update the fee amount if it's not enough display_line!( context.io(), "The provided gas price {} is less than the minimum \ amount required {}, changing it to match the minimum", - amount.to_string_native(), - minimum_fee.to_string_native() + validated_fee_amount.to_string(), + validated_minimum_fee.to_string() ); - minimum_fee + validated_minimum_fee } else { - amount + validated_fee_amount } } - None => minimum_fee, + None => validated_minimum_fee, }; let mut updated_balance = match tx_source_balance { @@ -523,26 +521,24 @@ pub async fn wrap_tx( } }; - let total_fee = fee_amount * u64::from(args.gas_limit); + let total_fee = fee_amount.amount() * u64::from(args.gas_limit); - let (unshield, unshielding_epoch) = match total_fee - .checked_sub(updated_balance) - { + let unshield = match total_fee.checked_sub(updated_balance) { Some(diff) if !diff.is_zero() => { if let Some(spending_key) = args.fee_unshield.clone() { // Unshield funds for fee payment let target = namada_core::types::masp::TransferTarget::Address( fee_payer_address.clone(), ); - let fee_amount = DenominatedAmount { + let fee_amount = DenominatedAmount::new( // NOTE: must unshield the total fee amount, not the // diff, because the ledger evaluates the transaction in // reverse (wrapper first, inner second) and cannot know // ahead of time if the inner will modify the balance of // the gas payer - amount: total_fee, - denom: 0.into(), - }; + total_fee, + 0.into(), + ); match ShieldedContext::::gen_shielded_transfer( context, @@ -557,7 +553,7 @@ pub async fn wrap_tx( builder: _, masp_tx: transaction, metadata: _data, - epoch: unshielding_epoch, + epoch: _unshielding_epoch, })) => { let spends = transaction .sapling_bundle() @@ -600,7 +596,7 @@ pub async fn wrap_tx( } updated_balance += total_fee; - (Some(transaction), Some(unshielding_epoch)) + Some(transaction) } Ok(None) => { if !args.force { @@ -612,7 +608,7 @@ pub async fn wrap_tx( )); } - (None, None) + None } Err(e) => { if !args.force { @@ -621,7 +617,7 @@ pub async fn wrap_tx( )); } - (None, None) + None } } } else { @@ -641,7 +637,7 @@ pub async fn wrap_tx( ))); } - (None, None) + None } } _ => { @@ -652,7 +648,7 @@ pub async fn wrap_tx( unshielding spending key will be ignored" ); } - (None, None) + None } }; @@ -676,7 +672,7 @@ pub async fn wrap_tx( unshield_section_hash, ); - Ok(unshielding_epoch) + Ok(()) } #[allow(clippy::result_large_err)] @@ -685,14 +681,14 @@ fn other_err(string: String) -> Result { } /// Represents the transaction data that is displayed on a Ledger device -#[derive(Default, Serialize, Deserialize)] +#[derive(Default, Serialize, Deserialize, Debug, Clone)] pub struct LedgerVector { - blob: String, - index: u64, - name: String, - output: Vec, - output_expert: Vec, - valid: bool, + pub blob: String, + pub index: u64, + pub name: String, + pub output: Vec, + pub output_expert: Vec, + pub valid: bool, } /// Adds a Ledger output line describing a given transaction amount and address @@ -725,7 +721,6 @@ fn make_ledger_amount_addr( /// Adds a Ledger output line describing a given transaction amount and asset /// type async fn make_ledger_amount_asset( - context: &impl Namada, tokens: &HashMap, output: &mut Vec, amount: u64, @@ -735,22 +730,17 @@ async fn make_ledger_amount_asset( ) { if let Some((token, _, _epoch)) = assets.get(token) { // If the AssetType can be decoded, then at least display Addressees - let formatted_amt = context.format_amount(token, amount.into()).await; if let Some(token) = tokens.get(token) { output.push(format!( "{}Amount : {} {}", prefix, token.to_uppercase(), - to_ledger_decimal(&formatted_amt), + amount, )); } else { output.extend(vec![ format!("{}Token : {}", prefix, token), - format!( - "{}Amount : {}", - prefix, - to_ledger_decimal(&formatted_amt) - ), + format!("{}Amount : {}", prefix, amount,), ]); } } else { @@ -820,7 +810,6 @@ fn format_outputs(output: &mut Vec) { /// Adds a Ledger output for the sender and destination for transparent and MASP /// transactions pub async fn make_ledger_masp_endpoints( - context: &impl Namada, tokens: &HashMap, output: &mut Vec, transfer: &Transfer, @@ -843,7 +832,6 @@ pub async fn make_ledger_masp_endpoints( let vk = ExtendedViewingKey::from(*sapling_input.key()); output.push(format!("Sender : {}", vk)); make_ledger_amount_asset( - context, tokens, output, sapling_input.value(), @@ -870,7 +858,6 @@ pub async fn make_ledger_masp_endpoints( let pa = PaymentAddress::from(sapling_output.address()); output.push(format!("Destination : {}", pa)); make_ledger_amount_asset( - context, tokens, output, sapling_output.value(), @@ -892,57 +879,6 @@ pub async fn make_ledger_masp_endpoints( } } -/// Internal method used to generate transaction test vectors -#[cfg(feature = "std")] -pub async fn generate_test_vector( - context: &impl Namada, - tx: &Tx, -) -> Result<(), Error> { - use std::env; - use std::fs::File; - use std::io::Write; - - if let Ok(path) = env::var(ENV_VAR_LEDGER_LOG_PATH) { - let mut tx = tx.clone(); - // Contract the large data blobs in the transaction - tx.wallet_filter(); - // Convert the transaction to Ledger format - let decoding = to_ledger_vector(context, &tx).await?; - let output = serde_json::to_string(&decoding) - .map_err(|e| Error::from(EncodingError::Serde(e.to_string())))?; - // Record the transaction at the identified path - let mut f = File::options() - .append(true) - .create(true) - .open(path) - .map_err(|e| { - Error::Other(format!("failed to open test vector file: {}", e)) - })?; - writeln!(f, "{},", output).map_err(|_| { - Error::Other("unable to write test vector to file".to_string()) - })?; - } - - // Attempt to decode the construction - if let Ok(path) = env::var(ENV_VAR_TX_LOG_PATH) { - let mut tx = tx.clone(); - // Contract the large data blobs in the transaction - tx.wallet_filter(); - // Record the transaction at the identified path - let mut f = File::options() - .append(true) - .create(true) - .open(path) - .map_err(|_| { - Error::Other("unable to write test vector to file".to_string()) - })?; - writeln!(f, "{:x?},", tx).map_err(|_| { - Error::Other("unable to write test vector to file".to_string()) - })?; - } - Ok(()) -} - /// Convert decimal numbers into the format used by Ledger. Specifically remove /// all insignificant zeros occurring after decimal point. fn to_ledger_decimal(amount: &str) -> String { @@ -1005,13 +941,11 @@ impl<'a> Display for LedgerProposalType<'a> { /// Converts the given transaction to the form that is displayed on the Ledger /// device pub async fn to_ledger_vector( - context: &impl Namada, + wallet: &Wallet, tx: &Tx, ) -> Result { // To facilitate lookups of human-readable token names - let tokens: HashMap = context - .wallet() - .await + let tokens: HashMap = wallet .get_addresses() .into_iter() .map(|(alias, addr)| (addr, alias)) @@ -1259,9 +1193,25 @@ pub async fn to_ledger_vector( Error::from(EncodingError::Conversion(err.to_string())) })?; - tv.name = "Update_VP_0".to_string(); + tv.name = "Update_Account_0".to_string(); + tv.output.extend(vec![ + format!("Type : Update Account"), + format!("Address : {}", update_account.addr), + ]); + tv.output.extend( + update_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); + if update_account.threshold.is_some() { + tv.output.extend(vec![format!( + "Threshold : {}", + update_account.threshold.unwrap() + )]) + } - match &update_account.vp_code_hash { + let vp_code_data = match &update_account.vp_code_hash { Some(hash) => { let extra = tx .get_section(hash) @@ -1274,45 +1224,31 @@ pub async fn to_ledger_vector( } else { HEXLOWER.encode(&extra.code.hash().0) }; - tv.output.extend(vec![ - format!("Type : Update VP"), - format!("Address : {}", update_account.addr), - ]); - tv.output.extend( - update_account - .public_keys - .iter() - .map(|k| format!("Public key : {}", k)), - ); - if update_account.threshold.is_some() { - tv.output.extend(vec![format!( - "Threshold : {}", - update_account.threshold.unwrap() - )]) - } - tv.output.extend(vec![format!("VP type : {}", vp_code)]); - - tv.output_expert - .extend(vec![format!("Address : {}", update_account.addr)]); - tv.output_expert.extend( - update_account - .public_keys - .iter() - .map(|k| format!("Public key : {}", k)), - ); - if update_account.threshold.is_some() { - tv.output_expert.extend(vec![format!( - "Threshold : {}", - update_account.threshold.unwrap() - )]) - } - tv.output_expert.extend(vec![format!( - "VP type : {}", - HEXLOWER.encode(&extra.code.hash().0) - )]); + Some((vp_code, extra.code.hash())) } - None => (), + None => None, }; + if let Some((vp_code, _)) = &vp_code_data { + tv.output.extend(vec![format!("VP type : {}", vp_code)]); + } + tv.output_expert + .extend(vec![format!("Address : {}", update_account.addr)]); + tv.output_expert.extend( + update_account + .public_keys + .iter() + .map(|k| format!("Public key : {}", k)), + ); + if let Some(threshold) = update_account.threshold { + tv.output_expert + .extend(vec![format!("Threshold : {}", threshold,)]) + } + if let Some((_, extra_code_hash)) = vp_code_data { + tv.output_expert.extend(vec![format!( + "VP type : {}", + HEXLOWER.encode(&extra_code_hash.0) + )]); + } } else if code_sec.tag == Some(TX_TRANSFER_WASM.to_string()) { let transfer = Transfer::try_from_slice( &tx.data() @@ -1352,7 +1288,6 @@ pub async fn to_ledger_vector( tv.output.push("Type : Transfer".to_string()); make_ledger_masp_endpoints( - context, &tokens, &mut tv.output, &transfer, @@ -1361,7 +1296,6 @@ pub async fn to_ledger_vector( ) .await; make_ledger_masp_endpoints( - context, &tokens, &mut tv.output_expert, &transfer, @@ -1686,34 +1620,134 @@ pub async fn to_ledger_vector( ]); tv.output_expert.push(format!("Validator : {}", address)); + } else if code_sec.tag == Some(TX_REDELEGATE_WASM.to_string()) { + let redelegation = pos::Redelegation::try_from_slice( + &tx.data() + .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, + ) + .map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + + tv.name = "Redelegate_0".to_string(); + + tv.output.extend(vec![ + format!("Type : Redelegate"), + format!("Source Validator : {}", redelegation.src_validator), + format!("Destination Validator : {}", redelegation.dest_validator), + format!("Owner : {}", redelegation.owner), + format!( + "Amount : {}", + to_ledger_decimal(&redelegation.amount.to_string_native()) + ), + ]); + + tv.output_expert.extend(vec![ + format!("Source Validator : {}", redelegation.src_validator), + format!("Destination Validator : {}", redelegation.dest_validator), + format!("Owner : {}", redelegation.owner), + format!( + "Amount : {}", + to_ledger_decimal(&redelegation.amount.to_string_native()) + ), + ]); + } else if code_sec.tag == Some(TX_UPDATE_STEWARD_COMMISSION.to_string()) { + let update = UpdateStewardCommission::try_from_slice( + &tx.data() + .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, + ) + .map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + + tv.name = "Update_Steward_Commission_0".to_string(); + tv.output.extend(vec![ + format!("Type : Update Steward Commission"), + format!("Steward : {}", update.steward), + ]); + for (address, dec) in &update.commission { + tv.output.push(format!("Commission : {} {}", address, dec)); + } + + tv.output_expert + .push(format!("Steward : {}", update.steward)); + for (address, dec) in &update.commission { + tv.output_expert + .push(format!("Commission : {} {}", address, dec)); + } + } else if code_sec.tag == Some(TX_RESIGN_STEWARD.to_string()) { + let address = Address::try_from_slice( + &tx.data() + .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, + ) + .map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + + tv.name = "Resign_Steward_0".to_string(); + + tv.output.extend(vec![ + format!("Type : Resign Steward"), + format!("Steward : {}", address), + ]); + + tv.output_expert.push(format!("Steward : {}", address)); + } else if code_sec.tag == Some(TX_BRIDGE_POOL_WASM.to_string()) { + let transfer = PendingTransfer::try_from_slice( + &tx.data() + .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, + ) + .map_err(|err| { + Error::from(EncodingError::Conversion(err.to_string())) + })?; + + tv.name = "Bridge_Pool_Transfer_0".to_string(); + + tv.output.extend(vec![ + format!("Type : Bridge Pool Transfer"), + format!("Transfer Kind : {}", transfer.transfer.kind), + format!("Transfer Sender : {}", transfer.transfer.sender), + format!("Transfer Recipient : {}", transfer.transfer.recipient), + format!("Transfer Asset : {}", transfer.transfer.asset), + format!("Transfer Amount : {}", transfer.transfer.amount), + format!("Gas Payer : {}", transfer.gas_fee.payer), + format!("Gas Token : {}", transfer.gas_fee.token), + format!("Gas Amount : {}", transfer.gas_fee.amount), + ]); + + tv.output_expert.extend(vec![ + format!("Transfer Kind : {}", transfer.transfer.kind), + format!("Transfer Sender : {}", transfer.transfer.sender), + format!("Transfer Recipient : {}", transfer.transfer.recipient), + format!("Transfer Asset : {}", transfer.transfer.asset), + format!("Transfer Amount : {}", transfer.transfer.amount), + format!("Gas Payer : {}", transfer.gas_fee.payer), + format!("Gas Token : {}", transfer.gas_fee.token), + format!("Gas Amount : {}", transfer.gas_fee.amount), + ]); } else { tv.name = "Custom_0".to_string(); tv.output.push("Type : Custom".to_string()); } if let Some(wrapper) = tx.header.wrapper() { - let gas_token = wrapper.fee.token.clone(); - let gas_limit = context - .format_amount(&gas_token, Amount::from(wrapper.gas_limit)) - .await; - let fee_amount_per_gas_unit = context - .format_amount(&gas_token, wrapper.fee.amount_per_gas_unit) - .await; + let fee_amount_per_gas_unit = + to_ledger_decimal(&wrapper.fee.amount_per_gas_unit.to_string()); tv.output_expert.extend(vec![ format!("Timestamp : {}", tx.header.timestamp.0), format!("Pubkey : {}", wrapper.pk), format!("Epoch : {}", wrapper.epoch), - format!("Gas limit : {}", gas_limit), + format!("Gas limit : {}", u64::from(wrapper.gas_limit)), ]); if let Some(token) = tokens.get(&wrapper.fee.token) { tv.output_expert.push(format!( "Fees/gas unit : {} {}", token.to_uppercase(), - to_ledger_decimal(&fee_amount_per_gas_unit), + fee_amount_per_gas_unit, )); } else { tv.output_expert.extend(vec![ - format!("Fee token : {}", gas_token), + format!("Fee token : {}", wrapper.fee.token), format!("Fees/gas unit : {}", fee_amount_per_gas_unit), ]); } diff --git a/sdk/src/tx.rs b/sdk/src/tx.rs index 447beef3e1..de0474615b 100644 --- a/sdk/src/tx.rs +++ b/sdk/src/tx.rs @@ -6,6 +6,7 @@ use std::path::{Path, PathBuf}; use std::time::Duration; use borsh::BorshSerialize; +use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::transaction::builder; use masp_primitives::transaction::builder::Builder; @@ -34,7 +35,7 @@ use namada_core::ledger::pgf::cli::steward::Commission; use namada_core::types::address::{Address, InternalAddress, MASP}; use namada_core::types::dec::Dec; use namada_core::types::hash::Hash; -use namada_core::types::ibc::IbcShieldedTransfer; +use namada_core::types::ibc::{IbcShieldedTransfer, MsgShieldedTransfer}; use namada_core::types::key::*; use namada_core::types::masp::{TransferSource, TransferTarget}; use namada_core::types::storage::Epoch; @@ -45,7 +46,7 @@ use namada_core::types::transaction::governance::{ InitProposalData, VoteProposalData, }; use namada_core::types::transaction::pgf::UpdateStewardCommission; -use namada_core::types::transaction::pos; +use namada_core::types::transaction::{pos, ResultCode, TxResult}; use namada_core::types::{storage, token}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; @@ -59,7 +60,8 @@ use crate::masp::{make_asset_type, ShieldedContext, ShieldedTransfer}; use crate::proto::{MaspBuilder, Tx}; use crate::queries::Client; use crate::rpc::{ - self, query_wasm_code_hash, validate_amount, TxBroadcastData, TxResponse, + self, query_wasm_code_hash, validate_amount, InnerTxResult, + TxBroadcastData, TxResponse, }; use crate::signing::{self, SigningTxData, TxSourcePostBalance}; use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; @@ -128,17 +130,27 @@ pub enum ProcessTxResponse { /// Result of submitting a transaction to the mempool Broadcast(Response), /// Result of dry running transaction - DryRun, - /// Dump transaction to disk - Dump, + DryRun(TxResult), } impl ProcessTxResponse { - /// Get the the accounts that were reported to be initialized - pub fn initialized_accounts(&self) -> Vec
{ + // Returns a `TxResult` if the transaction applied and was it accepted by + // all VPs. Note that this always returns false for dry-run transactions. + pub fn is_applied_and_valid(&self) -> Option<&TxResult> { match self { - Self::Applied(result) => result.initialized_accounts.clone(), - _ => vec![], + ProcessTxResponse::Applied(resp) => { + if resp.code == ResultCode::Ok { + if let InnerTxResult::Success(result) = + resp.inner_tx_result() + { + return Some(result); + } + } + None + } + ProcessTxResponse::DryRun(_) | ProcessTxResponse::Broadcast(_) => { + None + } } } } @@ -176,14 +188,14 @@ pub async fn prepare_tx( tx: &mut Tx, fee_payer: common::PublicKey, tx_source_balance: Option, -) -> Result> { +) -> Result<()> { if !args.dry_run { let epoch = rpc::query_epoch(context.client()).await?; signing::wrap_tx(context, tx, args, tx_source_balance, epoch, fee_payer) .await } else { - Ok(None) + Ok(()) } } @@ -226,14 +238,18 @@ pub async fn process_tx( .map(ProcessTxResponse::Broadcast) } else { match submit_tx(context, to_broadcast).await { - Ok(x) => { - save_initialized_accounts( - context, - args, - x.initialized_accounts.clone(), - ) - .await; - Ok(ProcessTxResponse::Applied(x)) + Ok(resp) => { + if let InnerTxResult::Success(result) = + resp.inner_tx_result() + { + save_initialized_accounts( + context, + args, + result.initialized_accounts.clone(), + ) + .await; + } + Ok(ProcessTxResponse::Applied(resp)) } Err(x) => Err(x), } @@ -267,7 +283,7 @@ pub async fn build_reveal_pk( context: &impl Namada, args: &args::Tx, public_key: &common::PublicKey, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let signing_data = signing::aux_signing_data(context, args, None, Some(public_key.into())) .await?; @@ -282,7 +298,7 @@ pub async fn build_reveal_pk( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Broadcast a transaction to be included in the blockchain and checks that @@ -315,23 +331,18 @@ pub async fn broadcast_tx( )?; if response.code == 0.into() { - display_line!( - context.io(), - "Transaction added to mempool: {:?}", - response - ); + display_line!(context.io(), "Transaction added to mempool."); + tracing::debug!("Transaction mempool response: {response:#?}"); // Print the transaction identifiers to enable the extraction of // acceptance/application results later { display_line!( context.io(), - "Wrapper transaction hash: {:?}", - wrapper_tx_hash + "Wrapper transaction hash: {wrapper_tx_hash}", ); display_line!( context.io(), - "Inner transaction hash: {:?}", - decrypted_tx_hash + "Inner transaction hash: {decrypted_tx_hash}", ); } Ok(response) @@ -379,24 +390,19 @@ pub async fn submit_tx( "Awaiting transaction approval", ); - let parsed = { + let response = { let wrapper_query = rpc::TxEventQuery::Accepted(wrapper_hash.as_str()); let event = rpc::query_tx_status(context, wrapper_query, deadline).await?; - let parsed = TxResponse::from_event(event); - let tx_to_str = |parsed| { - serde_json::to_string_pretty(parsed).map_err(|err| { - Error::from(EncodingError::Serde(err.to_string())) - }) - }; - display_line!( - context.io(), - "Transaction accepted with result: {}", - tx_to_str(&parsed)? - ); - // The transaction is now on chain. We wait for it to be decrypted - // and applied - if parsed.code == 0.to_string() { + let wrapper_resp = TxResponse::from_event(event); + + if display_wrapper_resp_and_get_result(context, &wrapper_resp) { + display_line!( + context.io(), + "Waiting for inner transaction result..." + ); + // The transaction is now on chain. We wait for it to be decrypted + // and applied // We also listen to the event emitted when the encrypted // payload makes its way onto the blockchain let decrypted_query = @@ -404,24 +410,88 @@ pub async fn submit_tx( let event = rpc::query_tx_status(context, decrypted_query, deadline) .await?; - let parsed = TxResponse::from_event(event); - display_line!( - context.io(), - "Transaction applied with result: {}", - tx_to_str(&parsed)? - ); - Ok(parsed) + let inner_resp = TxResponse::from_event(event); + + display_inner_resp(context, &inner_resp); + Ok(inner_resp) } else { - Ok(parsed) + Ok(wrapper_resp) } }; + response +} + +/// Display a result of a wrapper tx. +/// Returns true if the wrapper tx was successful. +pub fn display_wrapper_resp_and_get_result( + context: &impl Namada, + resp: &TxResponse, +) -> bool { + let result = if resp.code != ResultCode::Ok { + display_line!( + context.io(), + "Wrapper transaction failed with error code {}. Used {} gas.", + resp.code, + resp.gas_used, + ); + false + } else { + display_line!( + context.io(), + "Wrapper transaction accepted at height {}. Used {} gas.", + resp.height, + resp.gas_used, + ); + true + }; + tracing::debug!( - transaction = ?to_broadcast, - "Transaction approved", + "Full wrapper result: {}", + serde_json::to_string_pretty(resp).unwrap() ); + result +} - parsed +/// Display a result of an inner tx. +pub fn display_inner_resp(context: &impl Namada, resp: &TxResponse) { + match resp.inner_tx_result() { + InnerTxResult::Success(inner) => { + display_line!( + context.io(), + "Transaction was successfully applied at height {}. Used {} \ + gas.", + resp.height, + inner.gas_used, + ); + } + InnerTxResult::VpsRejected(inner) => { + let changed_keys: Vec<_> = inner + .changed_keys + .iter() + .map(storage::Key::to_string) + .collect(); + edisplay_line!( + context.io(), + "Transaction was rejected by VPs: {}.\nChanged keys: {}", + serde_json::to_string_pretty(&inner.vps_result.rejected_vps) + .unwrap(), + serde_json::to_string_pretty(&changed_keys).unwrap(), + ); + } + InnerTxResult::OtherFailure => { + edisplay_line!( + context.io(), + "Transaction failed.\nDetails: {}", + serde_json::to_string_pretty(&resp).unwrap() + ); + } + } + + tracing::debug!( + "Full result: {}", + serde_json::to_string_pretty(&resp).unwrap() + ); } /// decode components of a masp note @@ -511,7 +581,7 @@ pub async fn build_validator_commission_change( rate, tx_code_path, }: &args::CommissionRateChange, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(validator.clone()); let signing_data = signing::aux_signing_data( context, @@ -611,7 +681,7 @@ pub async fn build_validator_commission_change( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit validator metadata change @@ -627,7 +697,7 @@ pub async fn build_validator_metadata_change( commission_rate, tx_code_path, }: &args::MetaDataChange, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(validator.clone()); let signing_data = signing::aux_signing_data( context, @@ -740,7 +810,7 @@ pub async fn build_validator_metadata_change( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Craft transaction to update a steward commission @@ -752,7 +822,7 @@ pub async fn build_update_steward_commission( commission, tx_code_path, }: &args::UpdateStewardCommission, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(steward.clone()); let signing_data = signing::aux_signing_data( context, @@ -799,7 +869,7 @@ pub async fn build_update_steward_commission( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Craft transaction to resign as a steward @@ -810,7 +880,7 @@ pub async fn build_resign_steward( steward, tx_code_path, }: &args::ResignSteward, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(steward.clone()); let signing_data = signing::aux_signing_data( context, @@ -839,7 +909,7 @@ pub async fn build_resign_steward( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit transaction to unjail a jailed validator @@ -850,7 +920,7 @@ pub async fn build_unjail_validator( validator, tx_code_path, }: &args::TxUnjailValidator, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(validator.clone()); let signing_data = signing::aux_signing_data( context, @@ -941,7 +1011,7 @@ pub async fn build_unjail_validator( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit transaction to deactivate a validator @@ -952,7 +1022,7 @@ pub async fn build_deactivate_validator( validator, tx_code_path, }: &args::TxDeactivateValidator, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(validator.clone()); let signing_data = signing::aux_signing_data( context, @@ -1012,7 +1082,7 @@ pub async fn build_deactivate_validator( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit transaction to deactivate a validator @@ -1023,7 +1093,7 @@ pub async fn build_reactivate_validator( validator, tx_code_path, }: &args::TxReactivateValidator, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(validator.clone()); let signing_data = signing::aux_signing_data( context, @@ -1082,7 +1152,7 @@ pub async fn build_reactivate_validator( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Redelegate bonded tokens from one validator to another @@ -1262,7 +1332,7 @@ pub async fn build_redelegation( None, ) .await - .map(|(tx, _epoch)| (tx, signing_data)) + .map(|tx| (tx, signing_data)) } /// Submit transaction to withdraw an unbond @@ -1274,7 +1344,7 @@ pub async fn build_withdraw( source, tx_code_path, }: &args::Withdraw, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_address = source.clone().unwrap_or(validator.clone()); let default_signer = Some(default_address.clone()); let signing_data = signing::aux_signing_data( @@ -1345,7 +1415,7 @@ pub async fn build_withdraw( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit transaction to withdraw an unbond @@ -1357,7 +1427,7 @@ pub async fn build_claim_rewards( source, tx_code_path, }: &args::ClaimRewards, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_address = source.clone().unwrap_or(validator.clone()); let default_signer = Some(default_address.clone()); let signing_data = signing::aux_signing_data( @@ -1393,7 +1463,7 @@ pub async fn build_claim_rewards( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit a transaction to unbond @@ -1406,12 +1476,7 @@ pub async fn build_unbond( source, tx_code_path, }: &args::Unbond, -) -> Result<( - Tx, - SigningTxData, - Option, - Option<(Epoch, token::Amount)>, -)> { +) -> Result<(Tx, SigningTxData, Option<(Epoch, token::Amount)>)> { // Require a positive amount of tokens to be bonded if amount.is_zero() { edisplay_line!( @@ -1490,7 +1555,7 @@ pub async fn build_unbond( source: source.clone(), }; - let (tx, epoch) = build( + let tx = build( context, tx_args, tx_code_path.clone(), @@ -1500,7 +1565,7 @@ pub async fn build_unbond( None, ) .await?; - Ok((tx, signing_data, epoch, latest_withdrawal_pre)) + Ok((tx, signing_data, latest_withdrawal_pre)) } /// Query the unbonds post-tx @@ -1585,7 +1650,7 @@ pub async fn build_bond( native_token, tx_code_path, }: &args::Bond, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { // Require a positive amount of tokens to be bonded if amount.is_zero() { edisplay_line!( @@ -1685,7 +1750,7 @@ pub async fn build_bond( tx_source_balance, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Build a default proposal governance @@ -1701,7 +1766,7 @@ pub async fn build_default_proposal( tx_code_path, }: &args::InitProposal, proposal: DefaultProposal, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(proposal.proposal.author.clone()); let signing_data = signing::aux_signing_data( context, @@ -1738,7 +1803,7 @@ pub async fn build_default_proposal( None, // TODO: need to pay the fee to submit a proposal ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Build a proposal vote @@ -1754,7 +1819,7 @@ pub async fn build_vote_proposal( tx_code_path, }: &args::VoteProposal, epoch: Epoch, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(voter.clone()); let signing_data = signing::aux_signing_data( context, @@ -1825,7 +1890,7 @@ pub async fn build_vote_proposal( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Build a pgf funding proposal governance @@ -1841,7 +1906,7 @@ pub async fn build_pgf_funding_proposal( tx_code_path, }: &args::InitProposal, proposal: PgfFundingProposal, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(proposal.proposal.author.clone()); let signing_data = signing::aux_signing_data( context, @@ -1870,7 +1935,7 @@ pub async fn build_pgf_funding_proposal( None, // TODO: need to pay the fee to submit a proposal ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Build a pgf funding proposal governance @@ -1886,7 +1951,7 @@ pub async fn build_pgf_stewards_proposal( tx_code_path, }: &args::InitProposal, proposal: PgfStewardProposal, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(proposal.proposal.author.clone()); let signing_data = signing::aux_signing_data( context, @@ -1916,7 +1981,7 @@ pub async fn build_pgf_stewards_proposal( None, // TODO: need to pay the fee to submit a proposal ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit an IBC transfer @@ -1924,18 +1989,17 @@ pub async fn build_ibc_transfer( context: &impl Namada, args: &args::TxIbcTransfer, ) -> Result<(Tx, SigningTxData, Option)> { - let default_signer = Some(args.source.clone()); + let source = args.source.effective_address(); let signing_data = signing::aux_signing_data( context, &args.tx, - Some(args.source.clone()), - default_signer, + Some(source.clone()), + Some(source.clone()), ) .await?; // Check that the source address exists on chain let source = - source_exists_or_err(args.source.clone(), args.tx.force, context) - .await?; + source_exists_or_err(source.clone(), args.tx.force, context).await?; // We cannot check the receiver // validate the amount given @@ -1943,7 +2007,7 @@ pub async fn build_ibc_transfer( validate_amount(context, args.amount, &args.token, args.tx.force) .await .expect("expected to validate amount"); - if validated_amount.canonical().denom.0 != 0 { + if validated_amount.canonical().denom().0 != 0 { return Err(Error::Other(format!( "The amount for the IBC transfer should be an integer: {}", validated_amount @@ -1956,7 +2020,7 @@ pub async fn build_ibc_transfer( let post_balance = check_balance_too_low_err( &args.token, &source, - validated_amount.amount, + validated_amount.amount(), balance_key, args.tx.force, context, @@ -1973,8 +2037,21 @@ pub async fn build_ibc_transfer( .await .map_err(|e| Error::from(QueryError::Wasm(e.to_string())))?; + // For transfer from a spending key + let shielded_parts = construct_shielded_parts( + context, + &args.source, + // The token will be escrowed to IBC address + &TransferTarget::Address(Address::Internal(InternalAddress::Ibc)), + &args.token, + validated_amount, + ) + .await?; + let shielded_tx_epoch = shielded_parts.as_ref().map(|trans| trans.0.epoch); + let ibc_denom = - rpc::query_ibc_denom(context, &args.token, Some(&source)).await; + rpc::query_ibc_denom(context, &args.token.to_string(), Some(&source)) + .await; let token = PrefixedCoin { denom: ibc_denom.parse().expect("Invalid IBC denom"), // Set the IBC amount as an integer @@ -2014,7 +2091,7 @@ pub async fn build_ibc_transfer( IbcTimestamp::none() }; - let msg = MsgTransfer { + let message = MsgTransfer { port_id_on_a: args.port_id.clone(), chan_id_on_a: args.channel_id.clone(), packet_data, @@ -2022,20 +2099,57 @@ pub async fn build_ibc_transfer( timeout_timestamp_on_b: timeout_timestamp, }; - let any_msg = msg.to_any(); - let mut data = vec![]; - prost::Message::encode(&any_msg, &mut data) - .map_err(TxError::EncodeFailure)?; - let chain_id = args.tx.chain_id.clone().unwrap(); let mut tx = Tx::new(chain_id, args.tx.expiration); + + let data = match shielded_parts { + Some((shielded_transfer, asset_types)) => { + let masp_tx_hash = + tx.add_masp_tx_section(shielded_transfer.masp_tx.clone()).1; + let transfer = token::Transfer { + source: source.clone(), + // The token will be escrowed to IBC address + target: Address::Internal(InternalAddress::Ibc), + token: args.token.clone(), + amount: validated_amount, + // The address could be a payment address, but the address isn't + // that of this chain. + key: None, + // Link the Transfer to the MASP Transaction by hash code + shielded: Some(masp_tx_hash), + }; + tx.add_masp_builder(MaspBuilder { + asset_types, + metadata: shielded_transfer.metadata, + builder: shielded_transfer.builder, + target: masp_tx_hash, + }); + let shielded_transfer = IbcShieldedTransfer { + transfer, + masp_tx: shielded_transfer.masp_tx, + }; + MsgShieldedTransfer { + message, + shielded_transfer, + } + .serialize_to_vec() + } + None => { + let any_msg = message.to_any(); + let mut data = vec![]; + prost::Message::encode(&any_msg, &mut data) + .map_err(TxError::EncodeFailure)?; + data + } + }; + tx.add_code_from_hash( tx_code_hash, Some(args.tx_code_path.to_string_lossy().into_owned()), ) .add_serialized_data(data); - let epoch = prepare_tx( + prepare_tx( context, &args.tx, &mut tx, @@ -2044,7 +2158,7 @@ pub async fn build_ibc_transfer( ) .await?; - Ok((tx, signing_data, epoch)) + Ok((tx, signing_data, shielded_tx_epoch)) } /// Abstraction for helping build transactions @@ -2057,7 +2171,7 @@ pub async fn build( on_tx: F, gas_payer: &common::PublicKey, tx_source_balance: Option, -) -> Result<(Tx, Option)> +) -> Result where F: FnOnce(&mut Tx, &mut D) -> Result<()>, D: BorshSerialize, @@ -2083,7 +2197,7 @@ async fn build_pow_flag( on_tx: F, gas_payer: &common::PublicKey, tx_source_balance: Option, -) -> Result<(Tx, Option)> +) -> Result where F: FnOnce(&mut Tx, &mut D) -> Result<()>, D: BorshSerialize, @@ -2105,7 +2219,7 @@ where ) .add_data(data); - let epoch = prepare_tx( + prepare_tx( context, tx_args, &mut tx_builder, @@ -2113,7 +2227,7 @@ where tx_source_balance, ) .await?; - Ok((tx_builder, epoch)) + Ok(tx_builder) } /// Try to decode the given asset type and add its decoding to the supplied set. @@ -2204,7 +2318,7 @@ pub async fn build_transfer( let post_balance = check_balance_too_low_err( &args.token, &source, - validated_amount.amount, + validated_amount.amount(), balance_key, args.tx.force, context, @@ -2222,61 +2336,35 @@ pub async fn build_transfer( // signer. Also, if the transaction is shielded, redact the amount and token // types by setting the transparent value to 0 and token type to a constant. // This has no side-effect because transaction is to self. - let (_amount, token) = if source == masp_addr && target == masp_addr { - // TODO Refactor me, we shouldn't rely on any specific token here. - (token::Amount::zero(), args.native_token.clone()) - } else { - (validated_amount.amount, args.token.clone()) - }; + let (transparent_amount, transparent_token) = + if source == masp_addr && target == masp_addr { + // TODO Refactor me, we shouldn't rely on any specific token here. + (token::Amount::zero().into(), args.native_token.clone()) + } else { + (validated_amount, args.token.clone()) + }; // Determine whether to pin this transaction to a storage key let key = match &args.target { TransferTarget::PaymentAddress(pa) if pa.is_pinned() => Some(pa.hash()), _ => None, }; - // Construct the shielded part of the transaction, if any - let stx_result = - ShieldedContext::::gen_shielded_transfer( - context, - &args.source, - &args.target, - &args.token, - validated_amount, - ) - .await; - - let shielded_parts = match stx_result { - Ok(stx) => Ok(stx), - Err(Build(builder::Error::InsufficientFunds(_))) => { - Err(TxError::NegativeBalanceAfterTransfer( - Box::new(source.clone()), - validated_amount.amount.to_string_native(), - Box::new(token.clone()), - )) - } - Err(err) => Err(TxError::MaspError(err.to_string())), - }?; - - let shielded_tx_epoch = shielded_parts.clone().map(|trans| trans.epoch); - - let asset_types = match shielded_parts.clone() { - None => None, - Some(transfer) => { - // Get the decoded asset types used in the transaction to give - // offline wallet users more information - let asset_types = used_asset_types(context, &transfer.builder) - .await - .unwrap_or_default(); - Some(asset_types) - } - }; + let shielded_parts = construct_shielded_parts( + context, + &args.source, + &args.target, + &args.token, + validated_amount, + ) + .await?; + let shielded_tx_epoch = shielded_parts.as_ref().map(|trans| trans.0.epoch); // Construct the corresponding transparent Transfer object let transfer = token::Transfer { source: source.clone(), target: target.clone(), - token: token.clone(), - amount: validated_amount, + token: transparent_token.clone(), + amount: transparent_amount, key: key.clone(), // Link the Transfer to the MASP Transaction by hash code shielded: None, @@ -2284,12 +2372,15 @@ pub async fn build_transfer( let add_shielded = |tx: &mut Tx, transfer: &mut token::Transfer| { // Add the MASP Transaction and its Builder to facilitate validation - if let Some(ShieldedTransfer { - builder, - masp_tx, - metadata, - epoch: _, - }) = shielded_parts + if let Some(( + ShieldedTransfer { + builder, + masp_tx, + metadata, + epoch: _, + }, + asset_types, + )) = shielded_parts { // Add a MASP Transaction section to the Tx and get the tx hash let masp_tx_hash = tx.add_masp_tx_section(masp_tx).1; @@ -2298,8 +2389,7 @@ pub async fn build_transfer( tracing::debug!("Transfer data {:?}", transfer); tx.add_masp_builder(MaspBuilder { - // Is safe - asset_types: asset_types.unwrap(), + asset_types, // Store how the Info objects map to Descriptors/Outputs metadata, // Store the data that was used to construct the Transaction @@ -2310,7 +2400,7 @@ pub async fn build_transfer( }; Ok(()) }; - let (tx, unshielding_epoch) = build_pow_flag( + let tx = build_pow_flag( context, &args.tx, args.tx_code_path.clone(), @@ -2320,26 +2410,44 @@ pub async fn build_transfer( tx_source_balance, ) .await?; - // Manage the two masp epochs - let masp_epoch = match (unshielding_epoch, shielded_tx_epoch) { - (Some(fee_unshield_epoch), Some(transfer_unshield_epoch)) => { - // If the two masp epochs are different, either the wrapper or the - // inner tx will fail, so abort tx creation - if fee_unshield_epoch != transfer_unshield_epoch && !args.tx.force { - return Err(Error::Other( - "Fee unshielding masp tx and inner tx masp transaction \ - were crafted on an epoch boundary" - .to_string(), - )); - } - // Take the smaller of the two epochs - Some(fee_unshield_epoch.min(transfer_unshield_epoch)) + Ok((tx, signing_data, shielded_tx_epoch)) +} + +// Construct the shielded part of the transaction, if any +async fn construct_shielded_parts( + context: &N, + source: &TransferSource, + target: &TransferTarget, + token: &Address, + amount: token::DenominatedAmount, +) -> Result)>> { + let stx_result = + ShieldedContext::::gen_shielded_transfer( + context, source, target, token, amount, + ) + .await; + + let shielded_parts = match stx_result { + Ok(Some(stx)) => stx, + Ok(None) => return Ok(None), + Err(Build(builder::Error::InsufficientFunds(_))) => { + return Err(TxError::NegativeBalanceAfterTransfer( + Box::new(source.effective_address()), + amount.amount().to_string_native(), + Box::new(token.clone()), + ) + .into()); } - (Some(_fee_unshielding_epoch), None) => unshielding_epoch, - (None, Some(_transfer_unshield_epoch)) => shielded_tx_epoch, - (None, None) => None, + Err(err) => return Err(TxError::MaspError(err.to_string()).into()), }; - Ok((tx, signing_data, masp_epoch)) + + // Get the decoded asset types used in the transaction to give offline + // wallet users more information + let asset_types = used_asset_types(context, &shielded_parts.builder) + .await + .unwrap_or_default(); + + Ok(Some((shielded_parts, asset_types))) } /// Submit a transaction to initialize an account @@ -2352,7 +2460,7 @@ pub async fn build_init_account( public_keys, threshold, }: &args::TxInitAccount, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let signing_data = signing::aux_signing_data(context, tx_args, None, None).await?; @@ -2394,7 +2502,7 @@ pub async fn build_init_account( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit a transaction to update a VP @@ -2408,7 +2516,7 @@ pub async fn build_update_account( public_keys, threshold, }: &args::TxUpdateAccount, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(addr.clone()); let signing_data = signing::aux_signing_data( context, @@ -2476,7 +2584,7 @@ pub async fn build_update_account( None, ) .await - .map(|(tx, epoch)| (tx, signing_data, epoch)) + .map(|tx| (tx, signing_data)) } /// Submit a custom transaction @@ -2489,7 +2597,7 @@ pub async fn build_custom( serialized_tx, owner, }: &args::TxCustom, -) -> Result<(Tx, SigningTxData, Option)> { +) -> Result<(Tx, SigningTxData)> { let default_signer = Some(owner.clone()); let signing_data = signing::aux_signing_data( context, @@ -2518,7 +2626,7 @@ pub async fn build_custom( tx }; - let epoch = prepare_tx( + prepare_tx( context, tx_args, &mut tx, @@ -2527,7 +2635,7 @@ pub async fn build_custom( ) .await?; - Ok((tx, signing_data, epoch)) + Ok((tx, signing_data)) } /// Generate IBC shielded transfer @@ -2652,8 +2760,8 @@ async fn expect_dry_broadcast( ) -> Result { match to_broadcast { TxBroadcastData::DryRun(tx) => { - rpc::dry_run_tx(context, tx.to_bytes()).await?; - Ok(ProcessTxResponse::DryRun) + let result = rpc::dry_run_tx(context, tx.to_bytes()).await?; + Ok(ProcessTxResponse::DryRun(result)) } TxBroadcastData::Live { tx, diff --git a/sdk/src/wallet/mod.rs b/sdk/src/wallet/mod.rs index c5fe54aea9..04d6d13796 100644 --- a/sdk/src/wallet/mod.rs +++ b/sdk/src/wallet/mod.rs @@ -417,6 +417,14 @@ impl Wallet { self.store.find_payment_addr(alias.as_ref()) } + /// Find an alias by the payment address if it's in the wallet. + pub fn find_alias_by_payment_addr( + &self, + payment_address: &PaymentAddress, + ) -> Option<&Alias> { + self.store.find_alias_by_payment_addr(payment_address) + } + /// Get all known keys by their alias, paired with PKH, if known. pub fn get_secret_keys( &self, @@ -477,6 +485,26 @@ impl Wallet { .map(|(alias, value)| (alias.into(), value)) .collect() } + + /// Check if alias is an encrypted secret key + pub fn is_encrypted_secret_key( + &self, + alias: impl AsRef, + ) -> Option { + self.store + .find_secret_key(alias) + .map(|stored_keypair| stored_keypair.is_encrypted()) + } + + /// Check if alias is an encrypted spending key + pub fn is_encrypted_spending_key( + &self, + alias: impl AsRef, + ) -> Option { + self.store + .find_spending_key(alias) + .map(|stored_spend_key| stored_spend_key.is_encrypted()) + } } impl Wallet { @@ -559,9 +587,8 @@ impl Wallet { /// the keypair for the alias. /// If no encryption password is provided, the keypair will be stored raw /// without encryption. - /// Stores the key in decrypted key cache and - /// returns the alias of the key and a reference-counting pointer to the - /// key. + /// Stores the key in decrypted key cache and returns the alias of the key + /// and a reference-counting pointer to the key. pub fn gen_store_secret_key( &mut self, scheme: SchemeType, @@ -656,8 +683,9 @@ impl Wallet { alias = format!("disposable_{ctr}"); } // Generate a disposable keypair to sign the wrapper if requested - // TODO: once the wrapper transaction has been accepted, this key can be - // deleted from wallet + // TODO: once the wrapper transaction has been applied, this key can be + // deleted from wallet (the transaction being accepted is not enough + // cause we could end up doing a rollback) let (alias, disposable_keypair) = self .gen_store_secret_key( SchemeType::Ed25519, @@ -962,4 +990,9 @@ impl Wallet { pub fn extend(&mut self, wallet: Self) { self.store.extend(wallet.store) } + + /// Remove keys and addresses associated with the given alias + pub fn remove_all_by_alias(&mut self, alias: String) { + self.store.remove_alias(&alias.into()) + } } diff --git a/sdk/src/wallet/store.rs b/sdk/src/wallet/store.rs index 2ed2f4d915..f8a370b347 100644 --- a/sdk/src/wallet/store.rs +++ b/sdk/src/wallet/store.rs @@ -63,8 +63,8 @@ pub struct Store { view_keys: BTreeMap, /// Known spending keys spend_keys: BTreeMap>, - /// Known payment addresses - payment_addrs: BTreeMap, + /// Payment address book + payment_addrs: BiBTreeMap, /// Cryptographic keypairs secret_keys: BTreeMap>, /// Known public keys @@ -148,7 +148,15 @@ impl Store { &self, alias: impl AsRef, ) -> Option<&PaymentAddress> { - self.payment_addrs.get(&alias.into()) + self.payment_addrs.get_by_left(&alias.into()) + } + + /// Find an alias by the address if it's in the wallet. + pub fn find_alias_by_payment_addr( + &self, + payment_address: &PaymentAddress, + ) -> Option<&Alias> { + self.payment_addrs.get_by_right(payment_address) } /// Find the stored key by a public key. @@ -237,7 +245,7 @@ impl Store { } /// Get all known payment addresses by their alias. - pub fn get_payment_addrs(&self) -> &BTreeMap { + pub fn get_payment_addrs(&self) -> &BiBTreeMap { &self.payment_addrs } @@ -557,7 +565,7 @@ impl Store { /// Check if any map of the wallet contains the given alias pub fn contains_alias(&self, alias: &Alias) -> bool { - self.payment_addrs.contains_key(alias) + self.payment_addrs.contains_left(alias) || self.view_keys.contains_key(alias) || self.spend_keys.contains_key(alias) || self.secret_keys.contains_key(alias) @@ -568,8 +576,8 @@ impl Store { } /// Completely remove the given alias from all maps in the wallet - fn remove_alias(&mut self, alias: &Alias) { - self.payment_addrs.remove(alias); + pub fn remove_alias(&mut self, alias: &Alias) { + self.payment_addrs.remove_by_left(alias); self.view_keys.remove(alias); self.spend_keys.remove(alias); self.secret_keys.remove(alias); diff --git a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 99dfe14b9e..6c32539e9a 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -23,7 +23,7 @@ use namada_core::ledger::eth_bridge::storage::bridge_pool::{ }; use namada_core::ledger::eth_bridge::storage::whitelist; use namada_core::ledger::eth_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_ethereum_bridge::parameters::read_native_erc20_address; +use namada_ethereum_bridge::storage::parameters::read_native_erc20_address; use namada_ethereum_bridge::storage::wrapped_erc20s; use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; @@ -646,7 +646,7 @@ mod test_bridge_pool_vp { use namada_core::ledger::eth_bridge::storage::bridge_pool::get_signed_root_key; use namada_core::ledger::gas::TxGasMeter; use namada_core::types::address; - use namada_ethereum_bridge::parameters::{ + use namada_ethereum_bridge::storage::parameters::{ Contracts, EthereumBridgeParams, UpgradeableContract, }; diff --git a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs index 9e98f18dbb..6c5855b61d 100644 --- a/shared/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/shared/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -170,7 +170,7 @@ mod tests { use namada_core::ledger::eth_bridge::storage::wrapped_erc20s; use namada_core::ledger::gas::TxGasMeter; use namada_core::ledger::storage_api::StorageWrite; - use namada_ethereum_bridge::parameters::{ + use namada_ethereum_bridge::storage::parameters::{ Contracts, EthereumBridgeParams, UpgradeableContract, }; use rand::Rng; diff --git a/shared/src/ledger/native_vp/ibc/context.rs b/shared/src/ledger/native_vp/ibc/context.rs index ffe2faa2f5..27b9ee4324 100644 --- a/shared/src/ledger/native_vp/ibc/context.rs +++ b/shared/src/ledger/native_vp/ibc/context.rs @@ -3,23 +3,20 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh_ext::BorshSerializeExt; -use masp_primitives::transaction::Transaction; use namada_core::ledger::ibc::{IbcCommonContext, IbcStorageContext}; +use namada_core::ledger::masp_utils; use crate::ledger::ibc::storage::is_ibc_key; use crate::ledger::native_vp::CtxPreStorageRead; use crate::ledger::storage::write_log::StorageModification; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; -use crate::types::address::{Address, InternalAddress, MASP}; +use crate::types::address::{Address, InternalAddress}; use crate::types::ibc::{IbcEvent, IbcShieldedTransfer}; use crate::types::storage::{ - BlockHash, BlockHeight, Epoch, Header, Key, KeySeg, TxIndex, -}; -use crate::types::token::{ - self, Amount, DenominatedAmount, Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, - TX_KEY_PREFIX, + BlockHash, BlockHeight, Epoch, Header, Key, TxIndex, }; +use crate::types::token::{self, Amount, DenominatedAmount}; use crate::vm::WasmCacheAccess; /// Result of a storage API call. @@ -202,49 +199,27 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<()> { + let amount = amount.to_amount(token, self)?; let src_key = token::balance_key(token, src); let dest_key = token::balance_key(token, dest); let src_bal: Option = self.ctx.read(&src_key)?; let mut src_bal = src_bal.expect("The source has no balance"); - src_bal.spend(&amount.amount); + src_bal.spend(&amount); let mut dest_bal: Amount = self.ctx.read(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount.amount); + dest_bal.receive(&amount); self.write(&src_key, src_bal.serialize_to_vec())?; self.write(&dest_key, dest_bal.serialize_to_vec()) } fn handle_masp_tx(&mut self, shielded: &IbcShieldedTransfer) -> Result<()> { - let masp_addr = MASP; - let head_tx_key = Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - let current_tx_idx: u64 = - self.ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); - let current_tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) - .expect("Cannot obtain a storage key"); - // Save the Transfer object and its location within the blockchain - // so that clients do not have to separately look these - // up - let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( - self.ctx.get_block_epoch()?, - self.ctx.get_block_height()?, - self.ctx.get_tx_index()?, - shielded.transfer.clone(), - shielded.masp_tx.clone(), - ); - self.write(¤t_tx_key, record.serialize_to_vec())?; - self.write(&head_tx_key, (current_tx_idx + 1).serialize_to_vec())?; - // If storage key has been supplied, then pin this transaction to it - if let Some(key) = &shielded.transfer.key { - let pin_key = Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + key)) - .expect("Cannot obtain a storage key"); - self.write(&pin_key, current_tx_idx.serialize_to_vec())?; - } - Ok(()) + masp_utils::handle_masp_tx( + self, + &shielded.transfer, + &shielded.masp_tx, + )?; + masp_utils::update_note_commitment_tree(self, &shielded.masp_tx) } fn mint_token( @@ -253,15 +228,16 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<()> { + let amount = amount.to_amount(token, self)?; let target_key = token::balance_key(token, target); let mut target_bal: Amount = self.ctx.read(&target_key)?.unwrap_or_default(); - target_bal.receive(&amount.amount); + target_bal.receive(&amount); let minted_key = token::minted_balance_key(token); let mut minted_bal: Amount = self.ctx.read(&minted_key)?.unwrap_or_default(); - minted_bal.receive(&amount.amount); + minted_bal.receive(&amount); self.write(&target_key, target_bal.serialize_to_vec())?; self.write(&minted_key, minted_bal.serialize_to_vec())?; @@ -279,15 +255,16 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<()> { + let amount = amount.to_amount(token, self)?; let target_key = token::balance_key(token, target); let mut target_bal: Amount = self.ctx.read(&target_key)?.unwrap_or_default(); - target_bal.spend(&amount.amount); + target_bal.spend(&amount); let minted_key = token::minted_balance_key(token); let mut minted_bal: Amount = self.ctx.read(&minted_key)?.unwrap_or_default(); - minted_bal.spend(&amount.amount); + minted_bal.spend(&amount); self.write(&target_key, target_bal.serialize_to_vec())?; self.write(&minted_key, minted_bal.serialize_to_vec()) diff --git a/shared/src/ledger/native_vp/ibc/mod.rs b/shared/src/ledger/native_vp/ibc/mod.rs index a4ca254537..e4778ff82f 100644 --- a/shared/src/ledger/native_vp/ibc/mod.rs +++ b/shared/src/ledger/native_vp/ibc/mod.rs @@ -19,7 +19,7 @@ use namada_core::ledger::storage::{self as ledger_storage, StorageHasher}; use namada_core::proto::Tx; use namada_core::types::address::Address; use namada_core::types::storage::Key; -use namada_proof_of_stake::read_pos_params; +use namada_proof_of_stake::storage::read_pos_params; use thiserror::Error; use crate::ibc::core::host::types::identifiers::ChainId as IbcChainId; diff --git a/shared/src/ledger/native_vp/masp.rs b/shared/src/ledger/native_vp/masp.rs index 50362bae29..31ee79848b 100644 --- a/shared/src/ledger/native_vp/masp.rs +++ b/shared/src/ledger/native_vp/masp.rs @@ -1,20 +1,29 @@ //! MASP native VP use std::cmp::Ordering; -use std::collections::BTreeSet; +use std::collections::{BTreeSet, HashSet}; use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; +use masp_primitives::merkle_tree::CommitmentTree; +use masp_primitives::sapling::Node; use masp_primitives::transaction::components::I128Sum; +use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::MASP_VERIFY_SHIELDED_TX_GAS; use namada_core::ledger::storage; use namada_core::ledger::storage_api::OptionExt; use namada_core::ledger::vp_env::VpEnv; use namada_core::proto::Tx; -use namada_core::types::address::Address; use namada_core::types::address::InternalAddress::Masp; -use namada_core::types::storage::{Epoch, Key}; -use namada_core::types::token; +use namada_core::types::address::{Address, MASP}; +use namada_core::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use namada_core::types::token::{ + self, is_masp_allowed_key, is_masp_key, is_masp_nullifier_key, + is_masp_tx_pin_key, is_masp_tx_prefix_key, Transfer, HEAD_TX_KEY, + MASP_CONVERT_ANCHOR_KEY, MASP_NOTE_COMMITMENT_ANCHOR_PREFIX, + MASP_NOTE_COMMITMENT_TREE_KEY, MASP_NULLIFIERS_KEY, PIN_KEY_PREFIX, + TX_KEY_PREFIX, +}; use namada_sdk::masp::verify_shielded_tx; use ripemd::Digest as RipemdDigest; use sha2::Digest as Sha2Digest; @@ -45,7 +54,7 @@ where pub ctx: Ctx<'a, DB, H, CA>, } -/// Generates the current asset type given the current epoch and an +/// Generates the current asset type given the provided epoch and an /// unique token address fn asset_type_from_epoched_address( epoch: Epoch, @@ -58,23 +67,6 @@ fn asset_type_from_epoched_address( AssetType::new(token_bytes.as_ref()).expect("unable to create asset type") } -/// Checks if the asset type matches the expected asset type, Adds a -/// debug log if the values do not match. -fn valid_asset_type( - asset_type: &AssetType, - asset_type_to_test: &AssetType, -) -> bool { - let res = - asset_type.get_identifier() == asset_type_to_test.get_identifier(); - if !res { - tracing::debug!( - "The asset type must be derived from the token address and \ - current epoch" - ); - } - res -} - /// Checks if the reported transparent amount and the unshielded /// values agree, if not adds to the debug log fn valid_transfer_amount( @@ -99,13 +91,281 @@ fn convert_amount( token: &Address, val: token::Amount, denom: token::MaspDenom, -) -> (AssetType, I128Sum) { +) -> I128Sum { let asset_type = asset_type_from_epoched_address(epoch, token, denom); // Combine the value and unit into one amount - let amount = - I128Sum::from_nonnegative(asset_type, denom.denominate(&val) as i128) - .expect("invalid value or asset type for amount"); - (asset_type, amount) + + I128Sum::from_nonnegative(asset_type, denom.denominate(&val) as i128) + .expect("invalid value or asset type for amount") +} + +impl<'a, DB, H, CA> MaspVp<'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, + CA: 'static + WasmCacheAccess, +{ + // Check that the transaction correctly revealed the nullifiers + fn valid_nullifiers_reveal( + &self, + keys_changed: &BTreeSet, + transaction: &Transaction, + ) -> Result { + let mut revealed_nullifiers = HashSet::new(); + let shielded_spends = match transaction.sapling_bundle() { + Some(bundle) if !bundle.shielded_spends.is_empty() => { + &bundle.shielded_spends + } + _ => { + tracing::debug!( + "Missing expected spend descriptions in shielded \ + transaction" + ); + return Ok(false); + } + }; + + for description in shielded_spends { + let nullifier_key = Key::from(MASP.to_db_key()) + .push(&MASP_NULLIFIERS_KEY.to_owned()) + .expect("Cannot obtain a storage key") + .push(&namada_core::types::hash::Hash(description.nullifier.0)) + .expect("Cannot obtain a storage key"); + if self.ctx.has_key_pre(&nullifier_key)? + || revealed_nullifiers.contains(&nullifier_key) + { + tracing::debug!( + "MASP double spending attempt, the nullifier {:#?} has \ + already been revealed previously", + description.nullifier.0 + ); + return Ok(false); + } + + // Check that the nullifier is indeed committed (no temp write + // and no delete) and carries no associated data (the latter not + // strictly necessary for validation, but we don't expect any + // value for this key anyway) + match self.ctx.read_bytes_post(&nullifier_key)? { + Some(value) if value.is_empty() => (), + _ => return Ok(false), + } + + revealed_nullifiers.insert(nullifier_key); + } + + for nullifier_key in + keys_changed.iter().filter(|key| is_masp_nullifier_key(key)) + { + if !revealed_nullifiers.contains(nullifier_key) { + tracing::debug!( + "An unexpected MASP nullifier key {nullifier_key} has \ + been revealed by the transaction" + ); + return Ok(false); + } + } + + Ok(true) + } + + // Check that a transaction carrying output descriptions correctly updates + // the tree and anchor in storage + fn valid_note_commitment_update( + &self, + transaction: &Transaction, + ) -> Result { + // Check that the merkle tree in storage has been correctly updated with + // the output descriptions cmu + let tree_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_TREE_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let mut previous_tree: CommitmentTree = + self.ctx.read_pre(&tree_key)?.ok_or(Error::NativeVpError( + native_vp::Error::SimpleMessage("Cannot read storage"), + ))?; + let post_tree: CommitmentTree = + self.ctx.read_post(&tree_key)?.ok_or(Error::NativeVpError( + native_vp::Error::SimpleMessage("Cannot read storage"), + ))?; + + // Based on the output descriptions of the transaction, update the + // previous tree in storage + for description in transaction + .sapling_bundle() + .map_or(&vec![], |bundle| &bundle.shielded_outputs) + { + previous_tree + .append(Node::from_scalar(description.cmu)) + .map_err(|()| { + Error::NativeVpError(native_vp::Error::SimpleMessage( + "Failed to update the commitment tree", + )) + })?; + } + // Check that the updated previous tree matches the actual post tree. + // This verifies that all and only the necessary notes have been + // appended to the tree + if previous_tree != post_tree { + tracing::debug!("The note commitment tree was incorrectly updated"); + return Ok(false); + } + + Ok(true) + } + + // Check that the spend descriptions anchors of a transaction are valid + fn valid_spend_descriptions_anchor( + &self, + transaction: &Transaction, + ) -> Result { + let shielded_spends = match transaction.sapling_bundle() { + Some(bundle) if !bundle.shielded_spends.is_empty() => { + &bundle.shielded_spends + } + _ => { + tracing::debug!( + "Missing expected spend descriptions in shielded \ + transaction" + ); + return Ok(false); + } + }; + + for description in shielded_spends { + let anchor_key = Key::from(MASP.to_db_key()) + .push(&MASP_NOTE_COMMITMENT_ANCHOR_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") + .push(&namada_core::types::hash::Hash( + description.anchor.to_bytes(), + )) + .expect("Cannot obtain a storage key"); + + // Check if the provided anchor was published before + if !self.ctx.has_key_pre(&anchor_key)? { + tracing::debug!( + "Spend description refers to an invalid anchor" + ); + return Ok(false); + } + } + + Ok(true) + } + + // Check that the convert descriptions anchors of a transaction are valid + fn valid_convert_descriptions_anchor( + &self, + transaction: &Transaction, + ) -> Result { + if let Some(bundle) = transaction.sapling_bundle() { + if !bundle.shielded_converts.is_empty() { + let anchor_key = Key::from(MASP.to_db_key()) + .push(&MASP_CONVERT_ANCHOR_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let expected_anchor = self + .ctx + .read_pre::(&anchor_key)? + .ok_or(Error::NativeVpError( + native_vp::Error::SimpleMessage("Cannot read storage"), + ))?; + + for description in &bundle.shielded_converts { + // Check if the provided anchor matches the current + // conversion tree's one + if namada_core::types::hash::Hash( + description.anchor.to_bytes(), + ) != expected_anchor + { + tracing::debug!( + "Convert description refers to an invalid anchor" + ); + return Ok(false); + } + } + } + } + + Ok(true) + } + + /// Check the correctness of the general storage changes that pertain to all + /// types of masp transfers + fn valid_state( + &self, + keys_changed: &BTreeSet, + transfer: &Transfer, + transaction: &Transaction, + ) -> Result { + // Check that the transaction didn't write unallowed masp keys, nor + // multiple variations of the same key prefixes + let mut found_tx_key = false; + let mut found_pin_key = false; + for key in keys_changed.iter().filter(|key| is_masp_key(key)) { + if !is_masp_allowed_key(key) { + return Ok(false); + } else if is_masp_tx_prefix_key(key) { + if found_tx_key { + return Ok(false); + } else { + found_tx_key = true; + } + } else if is_masp_tx_pin_key(key) { + if found_pin_key { + return Ok(false); + } else { + found_pin_key = true; + } + } + } + + // Validate head tx + let head_tx_key = Key::from(MASP.to_db_key()) + .push(&HEAD_TX_KEY.to_owned()) + .expect("Cannot obtain a storage key"); + let pre_head: u64 = self.ctx.read_pre(&head_tx_key)?.unwrap_or(0); + let post_head: u64 = self.ctx.read_post(&head_tx_key)?.unwrap_or(0); + + if post_head != pre_head + 1 { + return Ok(false); + } + + // Validate tx key + let current_tx_key = Key::from(MASP.to_db_key()) + .push(&(TX_KEY_PREFIX.to_owned() + &pre_head.to_string())) + .expect("Cannot obtain a storage key"); + match self + .ctx + .read_post::<(Epoch, BlockHeight, TxIndex, Transfer, Transaction)>( + ¤t_tx_key, + )? { + Some(( + epoch, + height, + tx_index, + storage_transfer, + storage_transaction, + )) if (epoch == self.ctx.get_block_epoch()? + && height == self.ctx.get_block_height()? + && tx_index == self.ctx.get_tx_index()? + && &storage_transfer == transfer + && &storage_transaction == transaction) => {} + _ => return Ok(false), + } + + // Validate pin key + if let Some(key) = &transfer.key { + let pin_key = Key::from(MASP.to_db_key()) + .push(&(PIN_KEY_PREFIX.to_owned() + key)) + .expect("Cannot obtain a storage key"); + match self.ctx.read_post::(&pin_key)? { + Some(tx_idx) if tx_idx == pre_head => (), + _ => return Ok(false), + } + } + + Ok(true) + } } impl<'a, DB, H, CA> NativeVp for MaspVp<'a, DB, H, CA> @@ -119,31 +379,46 @@ where fn validate_tx( &self, tx_data: &Tx, - _keys_changed: &BTreeSet, + keys_changed: &BTreeSet, _verifiers: &BTreeSet
, ) -> Result { let epoch = self.ctx.get_block_epoch()?; let (transfer, shielded_tx) = self.ctx.get_shielded_action(tx_data)?; + let transfer_amount = transfer + .amount + .to_amount(&transfer.token, &self.ctx.pre())?; let mut transparent_tx_pool = I128Sum::zero(); // The Sapling value balance adds to the transparent tx pool transparent_tx_pool += shielded_tx.sapling_value_balance(); + if !self.valid_state(keys_changed, &transfer, &shielded_tx)? { + return Ok(false); + } + if transfer.source != Address::Internal(Masp) { // Handle transparent input // Note that the asset type is timestamped so shields // where the shielded value has an incorrect timestamp // are automatically rejected for denom in token::MaspDenom::iter() { - let (_transp_asset, transp_amt) = convert_amount( + let transp_amt = convert_amount( epoch, &transfer.token, - transfer.amount.into(), + transfer_amount, denom, ); // Non-masp sources add to transparent tx pool transparent_tx_pool += transp_amt; } + + // No shielded spends nor shielded converts are allowed + if shielded_tx.sapling_bundle().is_some_and(|bundle| { + !(bundle.shielded_spends.is_empty() + && bundle.shielded_converts.is_empty()) + }) { + return Ok(false); + } } else { // Handle shielded input // The following boundary conditions must be satisfied @@ -151,6 +426,11 @@ where // 2. the transparent transaction value pool's amount must equal // the containing wrapper transaction's fee // amount Satisfies 1. + // 3. The spend descriptions' anchors are valid + // 4. The convert descriptions's anchors are valid + // 5. The nullifiers provided by the transaction have not been + // revealed previously (even in the same tx) and no unneeded + // nullifier is being revealed by the tx if let Some(transp_bundle) = shielded_tx.transparent_bundle() { if !transp_bundle.vin.is_empty() { tracing::debug!( @@ -161,6 +441,19 @@ where return Ok(false); } } + + if !(self.valid_spend_descriptions_anchor(&shielded_tx)? + && self.valid_convert_descriptions_anchor(&shielded_tx)? + && self.valid_nullifiers_reveal(keys_changed, &shielded_tx)?) + { + return Ok(false); + } + } + + // The transaction must correctly update the note commitment tree + // in storage with the new output descriptions + if !self.valid_note_commitment_update(&shielded_tx)? { + return Ok(false); } if transfer.target != Address::Internal(Masp) { @@ -196,31 +489,34 @@ where None => continue, }; - let expected_asset_type: AssetType = - asset_type_from_epoched_address( - epoch, - &transfer.token, - denom, - ); - // Satisfies 2. and 3. - if !valid_asset_type(&expected_asset_type, &out.asset_type) { - // we don't know which masp denoms are necessary - // apriori. This is encoded via - // the asset types. - continue; - } + let conversion_state = self.ctx.storage.get_conversion_state(); + let asset_epoch = + match conversion_state.assets.get(&out.asset_type) { + Some(((address, _), asset_epoch, _, _)) + if address == &transfer.token => + { + asset_epoch + } + _ => { + // we don't know which masp denoms are necessary + // apriori. This is encoded via + // the asset types. + continue; + } + }; + if !valid_transfer_amount( out.value, - denom.denominate(&transfer.amount.amount), + denom.denominate(&transfer_amount), ) { return Ok(false); } - let (_transp_asset, transp_amt) = convert_amount( - epoch, + let transp_amt = convert_amount( + *asset_epoch, &transfer.token, - transfer.amount.amount, + transfer_amount, denom, ); @@ -252,6 +548,7 @@ where // Handle shielded output // The following boundary conditions must be satisfied // 1. Zero transparent output + // 2. At least one shielded output // Satisfies 1. if let Some(transp_bundle) = shielded_tx.transparent_bundle() { @@ -264,6 +561,14 @@ where return Ok(false); } } + + // Staisfies 2. + if shielded_tx + .sapling_bundle() + .is_some_and(|bundle| bundle.shielded_outputs.is_empty()) + { + return Ok(false); + } } match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { @@ -285,6 +590,7 @@ where } _ => {} } + // Verify the proofs and charge the gas for the expensive execution self.ctx .charge_gas(MASP_VERIFY_SHIELDED_TX_GAS) diff --git a/shared/src/ledger/pos/vp.rs b/shared/src/ledger/pos/vp.rs index 506ef489ca..298746340c 100644 --- a/shared/src/ledger/pos/vp.rs +++ b/shared/src/ledger/pos/vp.rs @@ -7,11 +7,11 @@ use namada_core::ledger::storage_api::governance; pub use namada_proof_of_stake; pub use namada_proof_of_stake::parameters::PosParams; // use namada_proof_of_stake::validation::validate; -use namada_proof_of_stake::read_pos_params; +use namada_proof_of_stake::storage::read_pos_params; +use namada_proof_of_stake::storage_key::is_params_key; pub use namada_proof_of_stake::types; use thiserror::Error; -use super::is_params_key; use crate::ledger::native_vp::{self, Ctx, NativeVp}; // use crate::ledger::pos::{ // is_validator_address_raw_hash_key, diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index c1f165a8bf..e51d073415 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -194,6 +194,7 @@ where vps_result: VpsResult::default(), initialized_accounts: vec![], ibc_events: BTreeSet::default(), + eth_bridge_events: BTreeSet::default(), }) } TxType::Decrypted(DecryptedTx::Undecryptable) => { @@ -405,6 +406,9 @@ where match wrapper.get_tx_fee() { Ok(fees) => { + let fees = fees + .to_amount(&wrapper.fee.token, wl_storage) + .map_err(|e| Error::FeeError(e.to_string()))?; if balance.checked_sub(fees).is_some() { token_transfer( wl_storage, @@ -450,11 +454,7 @@ where This shouldn't happen." ); - Err(Error::FeeError(format!( - "{}. All the available transparent funds have been moved to \ - the block proposer", - e - ))) + Err(Error::FeeError(format!("{}", e))) } } } @@ -530,6 +530,9 @@ where .get_tx_fee() .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = fees + .to_amount(&wrapper.fee.token, wl_storage) + .map_err(|e| Error::FeeError(e.to_string()))?; if balance.checked_sub(fees).is_some() { Ok(()) } else { @@ -608,6 +611,7 @@ where vps_result, initialized_accounts, ibc_events, + eth_bridge_events: BTreeSet::default(), }) } @@ -1117,8 +1121,8 @@ mod tests { }; use namada_ethereum_bridge::storage::eth_bridge_queries::EthBridgeQueries; use namada_ethereum_bridge::storage::proof::EthereumProof; - use namada_ethereum_bridge::storage::vote_tallies; - use namada_ethereum_bridge::{bridge_pool_vp, test_utils}; + use namada_ethereum_bridge::storage::{vote_tallies, vp}; + use namada_ethereum_bridge::test_utils; use super::*; @@ -1207,7 +1211,7 @@ mod tests { (validator_b, validator_b_stake), ]), ); - bridge_pool_vp::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut wl_storage); let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); diff --git a/shared/src/ledger/storage/mod.rs b/shared/src/ledger/storage/mod.rs index 0578a8286c..84d751ca92 100644 --- a/shared/src/ledger/storage/mod.rs +++ b/shared/src/ledger/storage/mod.rs @@ -1,7 +1,5 @@ //! Ledger's state storage with key-value backed store and a merkle tree -#[cfg(any(test, feature = "testing"))] -pub use namada_core::ledger::storage::mockdb; pub use namada_core::ledger::storage::{ - traits, write_log, PrefixIter, WlStorage, *, + mockdb, traits, write_log, PrefixIter, WlStorage, *, }; diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index d29bd4b257..aa7399bc3a 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -10,7 +10,8 @@ use masp_primitives::transaction::Transaction; use namada_core::ledger::gas::{ GasMetering, TxGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, }; -use namada_core::types::address::{ESTABLISHED_ADDRESS_BYTES_LEN, MASP}; +use namada_core::ledger::masp_utils; +use namada_core::types::address::ESTABLISHED_ADDRESS_BYTES_LEN; use namada_core::types::internal::KeyVal; use namada_core::types::storage::TX_INDEX_LENGTH; use namada_core::types::transaction::TxSentinel; @@ -32,10 +33,9 @@ use crate::types::address::{self, Address}; use crate::types::hash::Hash; use crate::types::ibc::{IbcEvent, IbcShieldedTransfer}; use crate::types::internal::HostEnvResult; -use crate::types::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use crate::types::storage::{BlockHeight, Epoch, Key, TxIndex}; use crate::types::token::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, - Transfer, HEAD_TX_KEY, PIN_KEY_PREFIX, TX_KEY_PREFIX, }; use crate::vm::memory::VmMemory; use crate::vm::prefix_iter::{PrefixIteratorId, PrefixIterators}; @@ -2165,6 +2165,41 @@ where } } +/// Appends the new note commitments to the tree in storage +pub fn tx_update_masp_note_commitment_tree( + env: &TxVmEnv, + transaction_ptr: u64, + transaction_len: u64, +) -> TxResult +where + MEM: VmMemory, + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let _sentinel = unsafe { env.ctx.sentinel.get() }; + let _gas_meter = unsafe { env.ctx.gas_meter.get() }; + let (serialized_transaction, gas) = env + .memory + .read_bytes(transaction_ptr, transaction_len as _) + .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; + + tx_charge_gas(env, gas)?; + let transaction = Transaction::try_from_slice(&serialized_transaction) + .map_err(TxRuntimeError::EncodingError)?; + + let mut ctx = env.ctx.clone(); + match masp_utils::update_note_commitment_tree(&mut ctx, &transaction) { + Ok(()) => Ok(HostEnvResult::Success.to_i64()), + Err(_) => { + // NOTE: sentinel for gas errors is already set by the + // update_note_commitment_tree function which in turn calls other + // host functions + Ok(HostEnvResult::Fail.to_i64()) + } + } +} + /// Evaluate a validity predicate with the given input data. pub fn vp_eval( env: &VpVmEnv<'static, MEM, DB, H, EVAL, CA>, @@ -2498,7 +2533,8 @@ where ) -> Result<(), storage_api::Error> { use namada_core::types::token; - if amount.amount != token::Amount::default() && src != dest { + let amount = amount.to_amount(token, self)?; + if amount != token::Amount::default() && src != dest { let src_key = token::balance_key(token, src); let dest_key = token::balance_key(token, dest); let src_bal = self.read::(&src_key)?; @@ -2506,10 +2542,10 @@ where self.log_string(format!("src {} has no balance", src_key)); unreachable!() }); - src_bal.spend(&amount.amount); + src_bal.spend(&amount); let mut dest_bal = self.read::(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount.amount); + dest_bal.receive(&amount); self.write(&src_key, src_bal)?; self.write(&dest_key, dest_bal)?; } @@ -2520,35 +2556,12 @@ where &mut self, shielded: &IbcShieldedTransfer, ) -> Result<(), storage_api::Error> { - let masp_addr = MASP; - let head_tx_key = Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - let current_tx_idx = - self.read::(&head_tx_key).unwrap_or(None).unwrap_or(0); - let current_tx_key = Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) - .expect("Cannot obtain a storage key"); - // Save the Transfer object and its location within the blockchain - // so that clients do not have to separately look these - // up - let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( - self.get_block_epoch()?, - self.get_block_height()?, - self.get_tx_index()?, - shielded.transfer.clone(), - shielded.masp_tx.clone(), - ); - self.write(¤t_tx_key, record)?; - self.write(&head_tx_key, current_tx_idx + 1)?; - // If storage key has been supplied, then pin this transaction to it - if let Some(key) = &shielded.transfer.key { - let pin_key = Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + key)) - .expect("Cannot obtain a storage key"); - self.write(&pin_key, current_tx_idx)?; - } - Ok(()) + masp_utils::handle_masp_tx( + self, + &shielded.transfer, + &shielded.masp_tx, + )?; + masp_utils::update_note_commitment_tree(self, &shielded.masp_tx) } fn mint_token( @@ -2559,15 +2572,16 @@ where ) -> Result<(), storage_api::Error> { use namada_core::types::token; + let amount = amount.to_amount(token, self)?; let target_key = token::balance_key(token, target); let mut target_bal = self.read::(&target_key)?.unwrap_or_default(); - target_bal.receive(&amount.amount); + target_bal.receive(&amount); let minted_key = token::minted_balance_key(token); let mut minted_bal = self.read::(&minted_key)?.unwrap_or_default(); - minted_bal.receive(&amount.amount); + minted_bal.receive(&amount); self.write(&target_key, target_bal)?; self.write(&minted_key, minted_bal)?; @@ -2587,16 +2601,17 @@ where ) -> Result<(), storage_api::Error> { use namada_core::types::token; + let amount = amount.to_amount(token, self)?; let target_key = token::balance_key(token, target); let mut target_bal = self.read::(&target_key)?.unwrap_or_default(); - target_bal.spend(&amount.amount); + target_bal.spend(&amount); // burn the minted amount let minted_key = token::minted_balance_key(token); let mut minted_bal = self.read::(&minted_key)?.unwrap_or_default(); - minted_bal.spend(&amount.amount); + minted_bal.spend(&amount); self.write(&target_key, target_bal)?; self.write(&minted_key, minted_bal) diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index 8c897f1a29..304a27a7f2 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -88,6 +88,7 @@ where "namada_tx_ibc_execute" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_ibc_execute), "namada_tx_set_commitment_sentinel" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_set_commitment_sentinel), "namada_tx_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_verify_tx_section_signature), + "namada_tx_update_masp_note_commitment_tree" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_masp_note_commitment_tree) }, } } diff --git a/test_fixtures/masp_proofs/30E148B3F9E8D21A41ABB09756027024E9AC79985302DD15C96CB57743A74CC3.bin b/test_fixtures/masp_proofs/28A7EA5FE79BA929443DE88963FFC7D83CE95D11AC98C8B9343E5EED170119A9.bin similarity index 77% rename from test_fixtures/masp_proofs/30E148B3F9E8D21A41ABB09756027024E9AC79985302DD15C96CB57743A74CC3.bin rename to test_fixtures/masp_proofs/28A7EA5FE79BA929443DE88963FFC7D83CE95D11AC98C8B9343E5EED170119A9.bin index 95cd7edf2e..dab2128885 100644 Binary files a/test_fixtures/masp_proofs/30E148B3F9E8D21A41ABB09756027024E9AC79985302DD15C96CB57743A74CC3.bin and b/test_fixtures/masp_proofs/28A7EA5FE79BA929443DE88963FFC7D83CE95D11AC98C8B9343E5EED170119A9.bin differ diff --git a/test_fixtures/masp_proofs/8BA2DA741BF1FE1CDEC5295AE3ECBE1A9EAF3496A30D69069DE2FBD293EEC978.bin b/test_fixtures/masp_proofs/2E68959DFE3412D892C1EB6A83C733C557AA7C18902CB851BB4DB93A834ED187.bin similarity index 80% rename from test_fixtures/masp_proofs/8BA2DA741BF1FE1CDEC5295AE3ECBE1A9EAF3496A30D69069DE2FBD293EEC978.bin rename to test_fixtures/masp_proofs/2E68959DFE3412D892C1EB6A83C733C557AA7C18902CB851BB4DB93A834ED187.bin index b66df7cf31..d7589ef1fa 100644 Binary files a/test_fixtures/masp_proofs/8BA2DA741BF1FE1CDEC5295AE3ECBE1A9EAF3496A30D69069DE2FBD293EEC978.bin and b/test_fixtures/masp_proofs/2E68959DFE3412D892C1EB6A83C733C557AA7C18902CB851BB4DB93A834ED187.bin differ diff --git a/test_fixtures/masp_proofs/553C507BF748CC2353DCE98EB91B5464810F6974083CC2FEE2756ED1E97B8143.bin b/test_fixtures/masp_proofs/375F008787D3797A051D288892F6B0C21274416797B6508AE6C759537F947AC8.bin similarity index 80% rename from test_fixtures/masp_proofs/553C507BF748CC2353DCE98EB91B5464810F6974083CC2FEE2756ED1E97B8143.bin rename to test_fixtures/masp_proofs/375F008787D3797A051D288892F6B0C21274416797B6508AE6C759537F947AC8.bin index 60efe5b463..506df0e8de 100644 Binary files a/test_fixtures/masp_proofs/553C507BF748CC2353DCE98EB91B5464810F6974083CC2FEE2756ED1E97B8143.bin and b/test_fixtures/masp_proofs/375F008787D3797A051D288892F6B0C21274416797B6508AE6C759537F947AC8.bin differ diff --git a/test_fixtures/masp_proofs/51EF94B13138B91DB3081F95193F779253B53A9A0BD62FD3DF8F6FCF4AF1E145.bin b/test_fixtures/masp_proofs/51EF94B13138B91DB3081F95193F779253B53A9A0BD62FD3DF8F6FCF4AF1E145.bin new file mode 100644 index 0000000000..b142e6796d Binary files /dev/null and b/test_fixtures/masp_proofs/51EF94B13138B91DB3081F95193F779253B53A9A0BD62FD3DF8F6FCF4AF1E145.bin differ diff --git a/test_fixtures/masp_proofs/544F7432B95E26E312105FB804BF9BC175A70082068C8785BDDBF49DCCA4BE66.bin b/test_fixtures/masp_proofs/544F7432B95E26E312105FB804BF9BC175A70082068C8785BDDBF49DCCA4BE66.bin new file mode 100644 index 0000000000..3952963e0e Binary files /dev/null and b/test_fixtures/masp_proofs/544F7432B95E26E312105FB804BF9BC175A70082068C8785BDDBF49DCCA4BE66.bin differ diff --git a/test_fixtures/masp_proofs/F0471ECBD3AF04B4A373D2966781D5DEF92B9A9BB3A159947560514682CC3877.bin b/test_fixtures/masp_proofs/68DE980FCC7CC858B090D50340BAE37AFDAA28B0701ED5D313EA859FC8153343.bin similarity index 79% rename from test_fixtures/masp_proofs/F0471ECBD3AF04B4A373D2966781D5DEF92B9A9BB3A159947560514682CC3877.bin rename to test_fixtures/masp_proofs/68DE980FCC7CC858B090D50340BAE37AFDAA28B0701ED5D313EA859FC8153343.bin index 8166bb7656..6c150f6d2c 100644 Binary files a/test_fixtures/masp_proofs/F0471ECBD3AF04B4A373D2966781D5DEF92B9A9BB3A159947560514682CC3877.bin and b/test_fixtures/masp_proofs/68DE980FCC7CC858B090D50340BAE37AFDAA28B0701ED5D313EA859FC8153343.bin differ diff --git a/test_fixtures/masp_proofs/6DD5A788D36258E9D5FEF454DBC89879834677F926130D56EB5E4067728EB5CF.bin b/test_fixtures/masp_proofs/6DD5A788D36258E9D5FEF454DBC89879834677F926130D56EB5E4067728EB5CF.bin new file mode 100644 index 0000000000..d6777132a3 Binary files /dev/null and b/test_fixtures/masp_proofs/6DD5A788D36258E9D5FEF454DBC89879834677F926130D56EB5E4067728EB5CF.bin differ diff --git a/test_fixtures/masp_proofs/6E92D0B97A65FB5ADFA6371A6CBE50202AF004DFD895A721EDA284D96B253ACC.bin b/test_fixtures/masp_proofs/6E92D0B97A65FB5ADFA6371A6CBE50202AF004DFD895A721EDA284D96B253ACC.bin new file mode 100644 index 0000000000..ffea35c6f0 Binary files /dev/null and b/test_fixtures/masp_proofs/6E92D0B97A65FB5ADFA6371A6CBE50202AF004DFD895A721EDA284D96B253ACC.bin differ diff --git a/test_fixtures/masp_proofs/8032CA7B951C625E43F48AEBD53CEA99A9BC66B2BAB03D4ABA1AE57B12596061.bin b/test_fixtures/masp_proofs/8032CA7B951C625E43F48AEBD53CEA99A9BC66B2BAB03D4ABA1AE57B12596061.bin new file mode 100644 index 0000000000..cce1836fd8 Binary files /dev/null and b/test_fixtures/masp_proofs/8032CA7B951C625E43F48AEBD53CEA99A9BC66B2BAB03D4ABA1AE57B12596061.bin differ diff --git a/test_fixtures/masp_proofs/4B412E2EA5AC98758E696AB36327E2CFFC5F8055CE2E2FAFB4DE8E7C2216D5F8.bin b/test_fixtures/masp_proofs/852AFF2FF8758999DA709605017C1886347E49C18C8E0F35D25956CA06390B17.bin similarity index 77% rename from test_fixtures/masp_proofs/4B412E2EA5AC98758E696AB36327E2CFFC5F8055CE2E2FAFB4DE8E7C2216D5F8.bin rename to test_fixtures/masp_proofs/852AFF2FF8758999DA709605017C1886347E49C18C8E0F35D25956CA06390B17.bin index 27e6acb10f..80ce280428 100644 Binary files a/test_fixtures/masp_proofs/4B412E2EA5AC98758E696AB36327E2CFFC5F8055CE2E2FAFB4DE8E7C2216D5F8.bin and b/test_fixtures/masp_proofs/852AFF2FF8758999DA709605017C1886347E49C18C8E0F35D25956CA06390B17.bin differ diff --git a/test_fixtures/masp_proofs/8A79BF5E0292339E70287DB626F136D4522BAFC99E0E81FD54A28A38E8419CFC.bin b/test_fixtures/masp_proofs/8A79BF5E0292339E70287DB626F136D4522BAFC99E0E81FD54A28A38E8419CFC.bin new file mode 100644 index 0000000000..c74348c48c Binary files /dev/null and b/test_fixtures/masp_proofs/8A79BF5E0292339E70287DB626F136D4522BAFC99E0E81FD54A28A38E8419CFC.bin differ diff --git a/test_fixtures/masp_proofs/C506FA6C2EFB37B06CD5A473AF5CC6F78791F8954A0219C07B15344AEE0D2E0F.bin b/test_fixtures/masp_proofs/9267939E4DFBF958BF98337A9099BB39683C7B8AAB924B267369B3143A3FBF89.bin similarity index 73% rename from test_fixtures/masp_proofs/C506FA6C2EFB37B06CD5A473AF5CC6F78791F8954A0219C07B15344AEE0D2E0F.bin rename to test_fixtures/masp_proofs/9267939E4DFBF958BF98337A9099BB39683C7B8AAB924B267369B3143A3FBF89.bin index 2c2038c810..57a0d2c390 100644 Binary files a/test_fixtures/masp_proofs/C506FA6C2EFB37B06CD5A473AF5CC6F78791F8954A0219C07B15344AEE0D2E0F.bin and b/test_fixtures/masp_proofs/9267939E4DFBF958BF98337A9099BB39683C7B8AAB924B267369B3143A3FBF89.bin differ diff --git a/test_fixtures/masp_proofs/94DF56F3CCC7E6F588F0CB82C2ABE40E759AD9C4F4F6475A419BFD56CE76BA68.bin b/test_fixtures/masp_proofs/94DF56F3CCC7E6F588F0CB82C2ABE40E759AD9C4F4F6475A419BFD56CE76BA68.bin new file mode 100644 index 0000000000..a139159fa4 Binary files /dev/null and b/test_fixtures/masp_proofs/94DF56F3CCC7E6F588F0CB82C2ABE40E759AD9C4F4F6475A419BFD56CE76BA68.bin differ diff --git a/test_fixtures/masp_proofs/A2385FC511BDA00B7E8605BF05958D4A1929918AB0A43EEAE93AE8BBC515E18C.bin b/test_fixtures/masp_proofs/A2385FC511BDA00B7E8605BF05958D4A1929918AB0A43EEAE93AE8BBC515E18C.bin new file mode 100644 index 0000000000..dba4c4a242 Binary files /dev/null and b/test_fixtures/masp_proofs/A2385FC511BDA00B7E8605BF05958D4A1929918AB0A43EEAE93AE8BBC515E18C.bin differ diff --git a/test_fixtures/masp_proofs/A312CDD49C05B7C768F5DAF708C010E6D95775165E4FC619A11DCFDB59E21D30.bin b/test_fixtures/masp_proofs/A312CDD49C05B7C768F5DAF708C010E6D95775165E4FC619A11DCFDB59E21D30.bin new file mode 100644 index 0000000000..67decdde40 Binary files /dev/null and b/test_fixtures/masp_proofs/A312CDD49C05B7C768F5DAF708C010E6D95775165E4FC619A11DCFDB59E21D30.bin differ diff --git a/test_fixtures/masp_proofs/A8A9963AC2983B576BAE4DE9BA6D2CF14C7F9E90A8588BA75DA0D2860F36E2CB.bin b/test_fixtures/masp_proofs/A8A9963AC2983B576BAE4DE9BA6D2CF14C7F9E90A8588BA75DA0D2860F36E2CB.bin new file mode 100644 index 0000000000..9e942f81f5 Binary files /dev/null and b/test_fixtures/masp_proofs/A8A9963AC2983B576BAE4DE9BA6D2CF14C7F9E90A8588BA75DA0D2860F36E2CB.bin differ diff --git a/test_fixtures/masp_proofs/BB9AA71A4227C9E62948CBA8DB4A5C2232840271F032F756953233DB3E53E757.bin b/test_fixtures/masp_proofs/AA4BBAF45B9610AD4D2BCBDDF61E860D9B5DF041F4645BFC329BE9A03AABFE47.bin similarity index 74% rename from test_fixtures/masp_proofs/BB9AA71A4227C9E62948CBA8DB4A5C2232840271F032F756953233DB3E53E757.bin rename to test_fixtures/masp_proofs/AA4BBAF45B9610AD4D2BCBDDF61E860D9B5DF041F4645BFC329BE9A03AABFE47.bin index 4764bda55d..c368a61e3e 100644 Binary files a/test_fixtures/masp_proofs/BB9AA71A4227C9E62948CBA8DB4A5C2232840271F032F756953233DB3E53E757.bin and b/test_fixtures/masp_proofs/AA4BBAF45B9610AD4D2BCBDDF61E860D9B5DF041F4645BFC329BE9A03AABFE47.bin differ diff --git a/test_fixtures/masp_proofs/AEA19C9B07742FF5F6D759B171396732D2EBF77728D2772EB251123DF2CEF6A1.bin b/test_fixtures/masp_proofs/AEA19C9B07742FF5F6D759B171396732D2EBF77728D2772EB251123DF2CEF6A1.bin new file mode 100644 index 0000000000..6cba86af9a Binary files /dev/null and b/test_fixtures/masp_proofs/AEA19C9B07742FF5F6D759B171396732D2EBF77728D2772EB251123DF2CEF6A1.bin differ diff --git a/test_fixtures/masp_proofs/C788F9057C615CCE7B260BB8BF5CF776D5DE7C9153D67B7FDF22ED6F35558738.bin b/test_fixtures/masp_proofs/C788F9057C615CCE7B260BB8BF5CF776D5DE7C9153D67B7FDF22ED6F35558738.bin new file mode 100644 index 0000000000..a22c9f2199 Binary files /dev/null and b/test_fixtures/masp_proofs/C788F9057C615CCE7B260BB8BF5CF776D5DE7C9153D67B7FDF22ED6F35558738.bin differ diff --git a/test_fixtures/masp_proofs/473EDF0B2908F047110AC52C7F7ECD8CFD237A237C63AA565FC44893414EE7FC.bin b/test_fixtures/masp_proofs/D32DFDE8713AB8AAD01125856B8F934075535EBBEC567084302C3D9B469B83FA.bin similarity index 76% rename from test_fixtures/masp_proofs/473EDF0B2908F047110AC52C7F7ECD8CFD237A237C63AA565FC44893414EE7FC.bin rename to test_fixtures/masp_proofs/D32DFDE8713AB8AAD01125856B8F934075535EBBEC567084302C3D9B469B83FA.bin index 2b6a1976a2..a5ec743370 100644 Binary files a/test_fixtures/masp_proofs/473EDF0B2908F047110AC52C7F7ECD8CFD237A237C63AA565FC44893414EE7FC.bin and b/test_fixtures/masp_proofs/D32DFDE8713AB8AAD01125856B8F934075535EBBEC567084302C3D9B469B83FA.bin differ diff --git a/test_fixtures/masp_proofs/DEDF664AD06184041515677A72699E65D4B40A4E1BC9E7A32D63CF28466A2F20.bin b/test_fixtures/masp_proofs/DA1D4780CBA612D7CBA0004D16FF5FEC42C100CEB0B19DC6C47DBFE88D42FFFC.bin similarity index 70% rename from test_fixtures/masp_proofs/DEDF664AD06184041515677A72699E65D4B40A4E1BC9E7A32D63CF28466A2F20.bin rename to test_fixtures/masp_proofs/DA1D4780CBA612D7CBA0004D16FF5FEC42C100CEB0B19DC6C47DBFE88D42FFFC.bin index 456bc93bb4..9ed88be93f 100644 Binary files a/test_fixtures/masp_proofs/DEDF664AD06184041515677A72699E65D4B40A4E1BC9E7A32D63CF28466A2F20.bin and b/test_fixtures/masp_proofs/DA1D4780CBA612D7CBA0004D16FF5FEC42C100CEB0B19DC6C47DBFE88D42FFFC.bin differ diff --git a/test_fixtures/masp_proofs/978C35E058808D61F0E265D72DE8DD6A8E6226394EA6E3DFE1CFC10F69C0ACE0.bin b/test_fixtures/masp_proofs/DC5FEEE0E4971DF2083A9D17D645FC957F905749C47816C307B4B8D580DAE5D9.bin similarity index 78% rename from test_fixtures/masp_proofs/978C35E058808D61F0E265D72DE8DD6A8E6226394EA6E3DFE1CFC10F69C0ACE0.bin rename to test_fixtures/masp_proofs/DC5FEEE0E4971DF2083A9D17D645FC957F905749C47816C307B4B8D580DAE5D9.bin index aa8cdbab4d..b502e0a0bf 100644 Binary files a/test_fixtures/masp_proofs/978C35E058808D61F0E265D72DE8DD6A8E6226394EA6E3DFE1CFC10F69C0ACE0.bin and b/test_fixtures/masp_proofs/DC5FEEE0E4971DF2083A9D17D645FC957F905749C47816C307B4B8D580DAE5D9.bin differ diff --git a/test_fixtures/masp_proofs/4CCB9ADD3188CD893508CBB4FCB62D08DAA52F4DE496209BDAEFD36E75EAE98D.bin b/test_fixtures/masp_proofs/E3409A9853B0ECDBE4147AD52CF288D9729C50CA4AE4D1635C6D82007461517F.bin similarity index 71% rename from test_fixtures/masp_proofs/4CCB9ADD3188CD893508CBB4FCB62D08DAA52F4DE496209BDAEFD36E75EAE98D.bin rename to test_fixtures/masp_proofs/E3409A9853B0ECDBE4147AD52CF288D9729C50CA4AE4D1635C6D82007461517F.bin index d1072e6e0e..d0fc8f7171 100644 Binary files a/test_fixtures/masp_proofs/4CCB9ADD3188CD893508CBB4FCB62D08DAA52F4DE496209BDAEFD36E75EAE98D.bin and b/test_fixtures/masp_proofs/E3409A9853B0ECDBE4147AD52CF288D9729C50CA4AE4D1635C6D82007461517F.bin differ diff --git a/test_fixtures/masp_proofs/E7F3B43D776427F6570F4EF1600F74AF27826AB075ECA82AE64F488CC7D7B99D.bin b/test_fixtures/masp_proofs/E7F3B43D776427F6570F4EF1600F74AF27826AB075ECA82AE64F488CC7D7B99D.bin new file mode 100644 index 0000000000..00154939a1 Binary files /dev/null and b/test_fixtures/masp_proofs/E7F3B43D776427F6570F4EF1600F74AF27826AB075ECA82AE64F488CC7D7B99D.bin differ diff --git a/test_fixtures/masp_proofs/AE4CEC9192B52E8CE5AB0C25936D2AEEF55A6F202D8EB4564017DB5BEF872107.bin b/test_fixtures/masp_proofs/E93FF3062E6FCF83381BEE364347FA3E6D650FE0B00B7DF477B409EEADFE64C8.bin similarity index 77% rename from test_fixtures/masp_proofs/AE4CEC9192B52E8CE5AB0C25936D2AEEF55A6F202D8EB4564017DB5BEF872107.bin rename to test_fixtures/masp_proofs/E93FF3062E6FCF83381BEE364347FA3E6D650FE0B00B7DF477B409EEADFE64C8.bin index dec88f4141..93c04891d7 100644 Binary files a/test_fixtures/masp_proofs/AE4CEC9192B52E8CE5AB0C25936D2AEEF55A6F202D8EB4564017DB5BEF872107.bin and b/test_fixtures/masp_proofs/E93FF3062E6FCF83381BEE364347FA3E6D650FE0B00B7DF477B409EEADFE64C8.bin differ diff --git a/test_fixtures/masp_proofs/D007D1734AD42D34174F04566899481AB5F7C8F57502C5EAB2BF7594EA6CED8F.bin b/test_fixtures/masp_proofs/ECBEE7807835F2DF39FA2924DC51842690C36EA86369DC2145AED8B139748042.bin similarity index 78% rename from test_fixtures/masp_proofs/D007D1734AD42D34174F04566899481AB5F7C8F57502C5EAB2BF7594EA6CED8F.bin rename to test_fixtures/masp_proofs/ECBEE7807835F2DF39FA2924DC51842690C36EA86369DC2145AED8B139748042.bin index bd69732482..aa956f7ebd 100644 Binary files a/test_fixtures/masp_proofs/D007D1734AD42D34174F04566899481AB5F7C8F57502C5EAB2BF7594EA6CED8F.bin and b/test_fixtures/masp_proofs/ECBEE7807835F2DF39FA2924DC51842690C36EA86369DC2145AED8B139748042.bin differ diff --git a/test_fixtures/masp_proofs/EDE8DC791D02098C5C199CAF2F6C8B75587507CA52981F48FD1FC9C5BE0BEE43.bin b/test_fixtures/masp_proofs/EDE8DC791D02098C5C199CAF2F6C8B75587507CA52981F48FD1FC9C5BE0BEE43.bin new file mode 100644 index 0000000000..c26a5a7cc6 Binary files /dev/null and b/test_fixtures/masp_proofs/EDE8DC791D02098C5C199CAF2F6C8B75587507CA52981F48FD1FC9C5BE0BEE43.bin differ diff --git a/tests/src/e2e/eth_bridge_tests.rs b/tests/src/e2e/eth_bridge_tests.rs index 11166037eb..b1ea2636f5 100644 --- a/tests/src/e2e/eth_bridge_tests.rs +++ b/tests/src/e2e/eth_bridge_tests.rs @@ -46,6 +46,9 @@ use crate::e2e::setup::constants::{ ALBERT, ALBERT_KEY, BERTHA, BERTHA_KEY, NAM, }; use crate::e2e::setup::{Bin, Who}; +use crate::strings::{ + LEDGER_STARTED, TX_ACCEPTED, TX_APPLIED_SUCCESS, VALIDATOR_NODE, +}; use crate::{run, run_as}; /// # Examples @@ -70,7 +73,7 @@ fn run_ledger_with_ethereum_events_endpoint() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::SelfHostedEndpoint, Some(DEFAULT_ETHEREUM_EVENTS_LISTEN_ADDR), ); @@ -81,7 +84,7 @@ fn run_ledger_with_ethereum_events_endpoint() -> Result<()> { ledger.exp_string( "Starting to listen for Borsh-serialized Ethereum events", )?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string(LEDGER_STARTED)?; ledger.send_control(ControlCode::EndOfText)?; ledger.exp_string( @@ -137,11 +140,8 @@ async fn test_roundtrip_eth_transfer() -> Result<()> { assert_eq!(dai_supply, Some(transfer_amount)); // let's transfer them back to Ethereum - let amount = token::DenominatedAmount { - amount: transfer_amount, - denom: 0u8.into(), - } - .to_string(); + let amount = + token::DenominatedAmount::new(transfer_amount, 0u8.into()).to_string(); let dai_addr = DAI_ERC20_ETH_ADDRESS.to_string(); let tx_args = vec![ "add-erc20-transfer", @@ -175,9 +175,8 @@ async fn test_roundtrip_eth_transfer() -> Result<()> { tx_args, Some(CLIENT_COMMAND_TIMEOUT_SECONDS) )?; - namadac_tx.exp_string("Transaction accepted")?; - namadac_tx.exp_string("Transaction applied")?; - namadac_tx.exp_string("Transaction is valid")?; + namadac_tx.exp_string(TX_ACCEPTED)?; + namadac_tx.exp_string(TX_APPLIED_SUCCESS)?; drop(namadac_tx); let mut namadar = run!( @@ -318,7 +317,7 @@ async fn test_bridge_pool_e2e() { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::SelfHostedEndpoint, Some(DEFAULT_ETHEREUM_EVENTS_LISTEN_ADDR), ); @@ -367,9 +366,8 @@ async fn test_bridge_pool_e2e() { Some(CLIENT_COMMAND_TIMEOUT_SECONDS) ) .unwrap(); - namadac_tx.exp_string("Transaction accepted").unwrap(); - namadac_tx.exp_string("Transaction applied").unwrap(); - namadac_tx.exp_string("Transaction is valid").unwrap(); + namadac_tx.exp_string(TX_ACCEPTED).unwrap(); + namadac_tx.exp_string(TX_APPLIED_SUCCESS).unwrap(); drop(namadac_tx); let mut namadar = run!( @@ -533,15 +531,15 @@ async fn test_wnam_transfer() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::SelfHostedEndpoint, Some(DEFAULT_ETHEREUM_EVENTS_LISTEN_ADDR), ); let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, vec!["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - ledger.exp_string("This node is a validator")?; + ledger.exp_string(LEDGER_STARTED)?; + ledger.exp_string(VALIDATOR_NODE)?; ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; let bg_ledger = ledger.background(); @@ -568,7 +566,7 @@ async fn test_wnam_transfer() -> Result<()> { // check NAM balance of receiver and bridge let receiver_balance = find_balance( &test, - &Who::Validator(0), + Who::Validator(0), &native_token_address, &wnam_transfer.receiver, )?; @@ -576,7 +574,7 @@ async fn test_wnam_transfer() -> Result<()> { let bridge_balance = find_balance( &test, - &Who::Validator(0), + Who::Validator(0), &native_token_address, &BRIDGE_ADDRESS, )?; @@ -629,15 +627,15 @@ fn test_configure_oracle_from_storage() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::RemoteEndpoint, None, ); let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, vec!["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - ledger.exp_string("This node is a validator")?; + ledger.exp_string(LEDGER_STARTED)?; + ledger.exp_string(VALIDATOR_NODE)?; ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; // check that the oracle has been configured with the values from storage let initial_config = oracle::config::Config { @@ -676,7 +674,7 @@ async fn test_dai_transfer_implicit() -> Result<()> { let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -692,7 +690,7 @@ async fn test_dai_transfer_established() -> Result<()> { // create an established account that Albert controls let established_alias = "albert-established"; - let rpc_address = get_actor_rpc(&test, &Who::Validator(0)); + let rpc_address = get_actor_rpc(&test, Who::Validator(0)); init_established_account( &test, &rpc_address, @@ -715,7 +713,7 @@ async fn test_dai_transfer_established() -> Result<()> { let established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &established_addr, )?; @@ -745,7 +743,7 @@ async fn test_wdai_transfer_implicit_unauthorized() -> Result<()> { let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -757,24 +755,20 @@ async fn test_wdai_transfer_implicit_unauthorized() -> Result<()> { // signed with Albert's key let mut cmd = attempt_wrapped_erc20_transfer( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr.to_string(), &bertha_addr.to_string(), &bertha_addr.to_string(), - &token::DenominatedAmount { - amount: token::Amount::from(10_000), - denom: 0u8.into(), - }, + &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()), )?; - cmd.exp_string("Transaction is valid.")?; - cmd.exp_string("Transaction is invalid.")?; + cmd.exp_string(TX_REJECTED)?; cmd.assert_success(); // check balances are unchanged after an unsuccessful transfer let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -793,7 +787,7 @@ async fn test_wdai_transfer_established_unauthorized() -> Result<()> { let initial_transfer_amount = token::Amount::from(10_000_000); // create an established account that Albert controls let albert_established_alias = "albert-established"; - let rpc_address = get_actor_rpc(&test, &Who::Validator(0)); + let rpc_address = get_actor_rpc(&test, Who::Validator(0)); init_established_account( &test, &rpc_address, @@ -815,7 +809,7 @@ async fn test_wdai_transfer_established_unauthorized() -> Result<()> { let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr, )?; @@ -827,24 +821,21 @@ async fn test_wdai_transfer_established_unauthorized() -> Result<()> { // signed with Albert's key let mut cmd = attempt_wrapped_erc20_transfer( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr.to_string(), &bertha_addr.to_string(), &bertha_addr.to_string(), - &token::DenominatedAmount { - amount: token::Amount::from(10_000), - denom: 0u8.into(), - }, + &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()), )?; - cmd.exp_string("Transaction is valid.")?; - cmd.exp_string("Transaction is invalid.")?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_REJECTED)?; cmd.assert_success(); // check balances are unchanged after an unsuccessful transfer let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr, )?; @@ -873,7 +864,7 @@ async fn test_wdai_transfer_implicit_to_implicit() -> Result<()> { let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -882,26 +873,24 @@ async fn test_wdai_transfer_implicit_to_implicit() -> Result<()> { // attempt a transfer from Albert to Bertha that should succeed, as it's // signed with Albert's key let bertha_addr = find_address(&test, BERTHA)?; - let second_transfer_amount = &token::DenominatedAmount { - amount: token::Amount::from(10_000), - denom: 0u8.into(), - }; + let second_transfer_amount = + &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()); let mut cmd = attempt_wrapped_erc20_transfer( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr.to_string(), &bertha_addr.to_string(), &albert_addr.to_string(), second_transfer_amount, )?; - cmd.exp_string("Transaction is valid.")?; - cmd.exp_string("Transaction is valid.")?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; cmd.assert_success(); let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -912,7 +901,7 @@ async fn test_wdai_transfer_implicit_to_implicit() -> Result<()> { let bertha_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &bertha_addr, )?; @@ -939,7 +928,7 @@ async fn test_wdai_transfer_implicit_to_established() -> Result<()> { let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -947,7 +936,7 @@ async fn test_wdai_transfer_implicit_to_established() -> Result<()> { // create an established account that Bertha controls let bertha_established_alias = "bertha-established"; - let rpc_address = get_actor_rpc(&test, &Who::Validator(0)); + let rpc_address = get_actor_rpc(&test, Who::Validator(0)); init_established_account( &test, &rpc_address, @@ -960,26 +949,24 @@ async fn test_wdai_transfer_implicit_to_established() -> Result<()> { // attempt a transfer from Albert to Bertha that should succeed, as it's // signed with Albert's key - let second_transfer_amount = &token::DenominatedAmount { - amount: token::Amount::from(10_000), - denom: 0u8.into(), - }; + let second_transfer_amount = + &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()); let mut cmd = attempt_wrapped_erc20_transfer( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr.to_string(), &bertha_established_addr.to_string(), &albert_addr.to_string(), second_transfer_amount, )?; - cmd.exp_string("Transaction is valid.")?; - cmd.exp_string("Transaction is valid.")?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; cmd.assert_success(); let albert_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_addr, )?; @@ -990,7 +977,7 @@ async fn test_wdai_transfer_implicit_to_established() -> Result<()> { let bertha_established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &bertha_established_addr, )?; @@ -1008,7 +995,7 @@ async fn test_wdai_transfer_established_to_implicit() -> Result<()> { // create an established account that Albert controls let albert_established_alias = "albert-established"; - let rpc_address = get_actor_rpc(&test, &Who::Validator(0)); + let rpc_address = get_actor_rpc(&test, Who::Validator(0)); init_established_account( &test, &rpc_address, @@ -1031,7 +1018,7 @@ async fn test_wdai_transfer_established_to_implicit() -> Result<()> { let albert_established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr, )?; @@ -1041,26 +1028,24 @@ async fn test_wdai_transfer_established_to_implicit() -> Result<()> { // attempt a transfer from Albert to Bertha that should succeed, as it's // signed with Albert's key - let second_transfer_amount = &token::DenominatedAmount { - amount: token::Amount::from(10_000), - denom: 0u8.into(), - }; + let second_transfer_amount = + &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()); let mut cmd = attempt_wrapped_erc20_transfer( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr.to_string(), &bertha_addr.to_string(), &albert_established_addr.to_string(), second_transfer_amount, )?; - cmd.exp_string("Transaction is valid.")?; - cmd.exp_string("Transaction is valid.")?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; cmd.assert_success(); let albert_established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr, )?; @@ -1071,7 +1056,7 @@ async fn test_wdai_transfer_established_to_implicit() -> Result<()> { let bertha_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &bertha_addr, )?; @@ -1088,7 +1073,7 @@ async fn test_wdai_transfer_established_to_established() -> Result<()> { // create an established account that Albert controls let albert_established_alias = "albert-established"; - let rpc_address = get_actor_rpc(&test, &Who::Validator(0)); + let rpc_address = get_actor_rpc(&test, Who::Validator(0)); init_established_account( &test, &rpc_address, @@ -1111,7 +1096,7 @@ async fn test_wdai_transfer_established_to_established() -> Result<()> { let albert_established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr, )?; @@ -1131,26 +1116,24 @@ async fn test_wdai_transfer_established_to_established() -> Result<()> { // attempt a transfer from Albert to Bertha that should succeed, as it's // signed with Albert's key - let second_transfer_amount = &token::DenominatedAmount { - amount: token::Amount::from(10_000), - denom: 0u8.into(), - }; + let second_transfer_amount = + &token::DenominatedAmount::new(token::Amount::from(10_000), 0u8.into()); let mut cmd = attempt_wrapped_erc20_transfer( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr.to_string(), &bertha_established_addr.to_string(), &albert_established_addr.to_string(), second_transfer_amount, )?; - cmd.exp_string("Transaction is valid.")?; - cmd.exp_string("Transaction is valid.")?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; cmd.assert_success(); let albert_established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &albert_established_addr, )?; @@ -1161,7 +1144,7 @@ async fn test_wdai_transfer_established_to_established() -> Result<()> { let bertha_established_wdai_balance = find_wrapped_erc20_balance( &test, - &Who::Validator(0), + Who::Validator(0), &DAI_ERC20_ETH_ADDRESS, &bertha_established_addr, )?; diff --git a/tests/src/e2e/eth_bridge_tests/helpers.rs b/tests/src/e2e/eth_bridge_tests/helpers.rs index 9fbc467443..a7ba34a7ff 100644 --- a/tests/src/e2e/eth_bridge_tests/helpers.rs +++ b/tests/src/e2e/eth_bridge_tests/helpers.rs @@ -23,6 +23,7 @@ use crate::e2e::helpers::{ use crate::e2e::setup::{ self, set_ethereum_bridge_mode, Bin, NamadaBgCmd, NamadaCmd, Test, Who, }; +use crate::strings::{LEDGER_STARTED, VALIDATOR_NODE}; use crate::{run, run_as}; /// The default listen address for a self-hosted events endpoint. @@ -114,15 +115,15 @@ pub fn setup_single_validator_test() -> Result<(Test, NamadaBgCmd)> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::SelfHostedEndpoint, Some(DEFAULT_ETHEREUM_EVENTS_LISTEN_ADDR), ); let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, vec!["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; - ledger.exp_string("This node is a validator")?; + ledger.exp_string(LEDGER_STARTED)?; + ledger.exp_string(VALIDATOR_NODE)?; ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; let bg_ledger = ledger.background(); @@ -163,7 +164,7 @@ pub async fn send_transfer_to_namada_event( /// This will fail if the keys for `signer` are not in the local wallet. pub fn attempt_wrapped_erc20_transfer( test: &Test, - node: &Who, + node: Who, asset: &EthAddress, from: &str, to: &str, @@ -198,7 +199,7 @@ pub fn attempt_wrapped_erc20_transfer( /// been involved in a wrapped ERC20 transfer of any kind). pub fn find_wrapped_erc20_balance( test: &Test, - node: &Who, + node: Who, asset: &EthAddress, owner: &Address, ) -> Result { diff --git a/tests/src/e2e/helpers.rs b/tests/src/e2e/helpers.rs index 9fe5194da6..0e33d9cc87 100644 --- a/tests/src/e2e/helpers.rs +++ b/tests/src/e2e/helpers.rs @@ -32,6 +32,7 @@ use super::setup::{ ENV_VAR_USE_PREBUILT_BINARIES, }; use crate::e2e::setup::{Bin, Who, APPS_PACKAGE}; +use crate::strings::{LEDGER_STARTED, TX_ACCEPTED, TX_APPLIED_SUCCESS}; use crate::{run, run_as}; /// Instantiate a new [`HttpClient`] to perform RPC requests with. @@ -66,7 +67,7 @@ pub fn setup_single_node_test() -> Result<(Test, NamadaBgCmd)> { pub fn run_single_node_test_from(test: Test) -> Result<(Test, NamadaBgCmd)> { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, &["ledger"], Some(40))?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string(LEDGER_STARTED)?; // TODO(namada#867): we only need to wait until the RPC server is available, // not necessarily for a block to be committed // ledger.exp_string("Starting RPC HTTP server on")?; @@ -94,11 +95,10 @@ pub fn init_established_account( "--ledger-address", rpc_addr, ]; - let mut client_init_account = - run!(test, Bin::Client, init_account_args, Some(40))?; - client_init_account.exp_string("Transaction is valid.")?; - client_init_account.exp_string("Transaction applied")?; - client_init_account.assert_success(); + let mut cmd = run!(test, Bin::Client, init_account_args, Some(40))?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; + cmd.assert_success(); Ok(()) } @@ -107,10 +107,11 @@ pub fn find_address(test: &Test, alias: impl AsRef) -> Result
{ let mut find = run!( test, Bin::Wallet, - &["address", "find", "--alias", alias.as_ref()], + &["find", "--addr", "--alias", alias.as_ref()], Some(10) )?; - let (unread, matched) = find.exp_regex("Found address .*")?; + find.exp_string("Found transparent address:")?; + let (unread, matched) = find.exp_regex("\".*\": .*")?; let address_str = strip_trailing_newline(&matched) .trim() .rsplit_once(' ') @@ -130,7 +131,7 @@ pub fn find_address(test: &Test, alias: impl AsRef) -> Result
{ #[allow(dead_code)] pub fn find_balance( test: &Test, - node: &Who, + node: Who, token: &Address, owner: &Address, ) -> Result { @@ -162,7 +163,7 @@ pub fn find_balance( } /// Find the address of the node's RPC endpoint. -pub fn get_actor_rpc(test: &Test, who: &Who) -> String { +pub fn get_actor_rpc(test: &Test, who: Who) -> String { let base_dir = test.get_base_dir(who); let tendermint_mode = match who { Who::NonValidator => TendermintMode::Full, @@ -176,7 +177,7 @@ pub fn get_actor_rpc(test: &Test, who: &Who) -> String { } /// Get some nodes's wallet. -pub fn get_node_wallet(test: &Test, who: &Who) -> Wallet { +pub fn get_node_wallet(test: &Test, who: Who) -> Wallet { let wallet_store_dir = test.get_base_dir(who).join(test.net.chain_id.as_str()); let mut wallet = FsWalletUtils::new(wallet_store_dir); @@ -185,7 +186,7 @@ pub fn get_node_wallet(test: &Test, who: &Who) -> Wallet { } /// Get the public key of the validator -pub fn get_validator_pk(test: &Test, who: &Who) -> Option { +pub fn get_validator_pk(test: &Test, who: Who) -> Option { let index = match who { Who::NonValidator => return None, Who::Validator(i) => i, @@ -245,10 +246,11 @@ pub fn find_keypair( test, Bin::Wallet, &[ - "key", "find", + "--keys", "--alias", alias.as_ref(), + "--decrypt", "--unsafe-show-secret" ], Some(10) @@ -381,7 +383,9 @@ pub fn wait_for_block_height( break Ok(()); } if Instant::now().duration_since(start) > loop_timeout { - panic!("Timed out waiting for height {height}, current {current}"); + return Err(eyre!( + "Timed out waiting for height {height}, current {current}" + )); } sleep(1); } diff --git a/tests/src/e2e/ibc_tests.rs b/tests/src/e2e/ibc_tests.rs index 1ea7667f57..d0a3ae4638 100644 --- a/tests/src/e2e/ibc_tests.rs +++ b/tests/src/e2e/ibc_tests.rs @@ -88,6 +88,9 @@ use crate::e2e::helpers::{ use crate::e2e::setup::{ self, sleep, working_dir, Bin, NamadaCmd, Test, TestDir, Who, }; +use crate::strings::{ + LEDGER_STARTED, TX_ACCEPTED, TX_APPLIED_SUCCESS, TX_FAILED, VALIDATOR_NODE, +}; use crate::{run, run_as}; #[test] @@ -96,14 +99,14 @@ fn run_ledger_ibc() -> Result<()> { set_ethereum_bridge_mode( &test_a, &test_a.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); set_ethereum_bridge_mode( &test_b, &test_b.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -116,7 +119,7 @@ fn run_ledger_ibc() -> Result<()> { &["ledger", "run"], Some(40) )?; - ledger_a.exp_string("Namada ledger node started")?; + ledger_a.exp_string(LEDGER_STARTED)?; // Run Chain B let mut ledger_b = run_as!( test_b, @@ -125,9 +128,9 @@ fn run_ledger_ibc() -> Result<()> { &["ledger", "run"], Some(40) )?; - ledger_b.exp_string("Namada ledger node started")?; - ledger_a.exp_string("This node is a validator")?; - ledger_b.exp_string("This node is a validator")?; + ledger_b.exp_string(LEDGER_STARTED)?; + ledger_a.exp_string(VALIDATOR_NODE)?; + ledger_b.exp_string(VALIDATOR_NODE)?; wait_for_wasm_pre_compile(&mut ledger_a)?; wait_for_wasm_pre_compile(&mut ledger_b)?; @@ -167,7 +170,8 @@ fn run_ledger_ibc() -> Result<()> { try_invalid_transfers(&test_a, &test_b, &port_id_a, &channel_id_a)?; // Transfer 50000 received over IBC on Chain B - transfer_received_token(&port_id_b, &channel_id_b, &test_b)?; + let token = format!("{port_id_b}/{channel_id_b}/nam"); + transfer_on_chain(&test_b, BERTHA, ALBERT, token, 50000, BERTHA_KEY)?; check_balances_after_non_ibc(&port_id_b, &channel_id_b, &test_b)?; // Transfer 50000 back from the origin-specific account on Chain B to Chain @@ -203,7 +207,7 @@ fn run_ledger_ibc() -> Result<()> { &port_id_b, &channel_id_b, )?; - check_shielded_balances(&port_id_b, &channel_id_b, &test_b)?; + check_shielded_balances(&port_id_b, &channel_id_b, &test_a, &test_b)?; // Skip tests for closing a channel and timeout_on_close since the transfer // channel cannot be closed @@ -265,7 +269,7 @@ fn setup_two_single_node_nets() -> Result<(Test, Test)> { .map_err(|_| eyre!("Could not read genesis files from test b"))?; // chain b's validator needs to listen on a different port than chain a's // validator - let validator_pk = get_validator_pk(&test_b, &Who::Validator(0)).unwrap(); + let validator_pk = get_validator_pk(&test_b, Who::Validator(0)).unwrap(); let validator_addr = genesis_b .transactions .established_account @@ -338,18 +342,18 @@ fn create_client(test_a: &Test, test_b: &Test) -> Result<(ClientId, ClientId)> { let height_b = submit_ibc_tx(test_b, message, ALBERT, ALBERT_KEY, false)?; let events = get_events(test_a, height_a)?; - let client_id_a = get_client_id_from_events(&events) - .ok_or(eyre!("Transaction failed"))?; + let client_id_a = + get_client_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; let events = get_events(test_b, height_b)?; - let client_id_b = get_client_id_from_events(&events) - .ok_or(eyre!("Transaction failed"))?; + let client_id_b = + get_client_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; // `client_id_a` represents the ID of the B's client on Chain A Ok((client_id_a, client_id_b)) } fn make_client_state(test: &Test, height: Height) -> TmClientState { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); @@ -473,12 +477,12 @@ fn update_client( } fn make_light_client_io(test: &Test) -> TmLightClientIo { - let addr = format!("http://{}", get_actor_rpc(test, &Who::Validator(0))); + let addr = format!("http://{}", get_actor_rpc(test, Who::Validator(0))); let rpc_addr = Url::from_str(&addr).unwrap(); let rpc_client = HttpClient::new(rpc_addr).unwrap(); let rpc_timeout = Duration::new(10, 0); - let pk = get_validator_pk(test, &Who::Validator(0)).unwrap(); + let pk = get_validator_pk(test, Who::Validator(0)).unwrap(); let peer_id = id_from_pk(&PublicKey::try_from_pk(&pk).unwrap()); TmLightClientIo::new(peer_id, rpc_client, Some(rpc_timeout)) @@ -617,8 +621,8 @@ fn channel_handshake( }; let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; let events = get_events(test_a, height)?; - let channel_id_a = get_channel_id_from_events(&events) - .ok_or(eyre!("Transaction failed"))?; + let channel_id_a = + get_channel_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; // get the proofs from Chain A let height_a = query_height(test_a)?; @@ -644,8 +648,8 @@ fn channel_handshake( // OpenTryChannel on Chain B let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; let events = get_events(test_b, height)?; - let channel_id_b = get_channel_id_from_events(&events) - .ok_or(eyre!("Transaction failed"))?; + let channel_id_b = + get_channel_id_from_events(&events).ok_or(eyre!(TX_FAILED))?; // get the A's proofs on Chain B let height_b = query_height(test_b)?; @@ -757,8 +761,7 @@ fn transfer_token( false, )?; let events = get_events(test_a, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; check_ibc_packet_query(test_a, &"send_packet".parse().unwrap(), &packet)?; let height_a = query_height(test_a)?; @@ -775,10 +778,8 @@ fn transfer_token( // Receive the token on Chain B let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; let events = get_events(test_b, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; - let ack = - get_ack_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; + let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; check_ibc_packet_query( test_b, &"write_acknowledgement".parse().unwrap(), @@ -862,31 +863,33 @@ fn try_invalid_transfers( Ok(()) } -fn transfer_received_token( - port_id: &PortId, - channel_id: &ChannelId, +fn transfer_on_chain( test: &Test, + sender: impl AsRef, + receiver: impl AsRef, + token: impl AsRef, + amount: u64, + signer: impl AsRef, ) -> Result<()> { - let rpc = get_actor_rpc(test, &Who::Validator(0)); - let ibc_denom = format!("{port_id}/{channel_id}/nam"); - let amount = Amount::native_whole(50000).to_string_native(); + let rpc = get_actor_rpc(test, Who::Validator(0)); let tx_args = [ "transfer", "--source", - BERTHA, + sender.as_ref(), "--target", - ALBERT, + receiver.as_ref(), "--token", - &ibc_denom, + token.as_ref(), "--amount", - &amount, - "--gas-token", - NAM, + &amount.to_string(), + "--signing-keys", + signer.as_ref(), "--node", &rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_ACCEPTED)?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); Ok(()) @@ -921,8 +924,7 @@ fn transfer_back( false, )?; let events = get_events(test_b, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; let height_b = query_height(test_b)?; let proof = get_commitment_proof(test_b, &packet, height_b)?; @@ -937,10 +939,8 @@ fn transfer_back( // Receive the token on Chain A let height = submit_ibc_tx(test_a, msg, ALBERT, ALBERT_KEY, false)?; let events = get_events(test_a, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; - let ack = - get_ack_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; + let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; // get the proof on Chain A let height_a = query_height(test_a)?; @@ -985,8 +985,7 @@ fn transfer_timeout( false, )?; let events = get_events(test_a, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; // wait for the timeout sleep(5); @@ -1022,8 +1021,10 @@ fn shielded_transfer( ) -> Result<()> { // Get masp proof for the following IBC transfer from the destination chain // It will send 10 BTC from Chain A to PA(B) on Chain B - let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); let output_folder = test_b.test_dir.path().to_string_lossy(); + // PA(B) on Chain B will receive BTC on chain A + let token_addr = find_address(test_a, BTC)?; let amount = Amount::native_whole(10).to_string_native(); let args = [ "ibc-gen-shielded", @@ -1032,7 +1033,7 @@ fn shielded_transfer( "--target", AB_PAYMENT_ADDRESS, "--token", - BTC, + &token_addr.to_string(), "--amount", &amount, "--port-id", @@ -1046,11 +1047,14 @@ fn shielded_transfer( let file_path = get_shielded_transfer_path(&mut client)?; client.assert_success(); - // Send a token from Chain A to PA(B) on Chain B + // Send a token to the shielded address on Chain A + transfer_on_chain(test_a, ALBERT, AA_PAYMENT_ADDRESS, BTC, 10, ALBERT_KEY)?; + + // Send a token from SP(A) on Chain A to PA(B) on Chain B let amount = Amount::native_whole(10).to_string_native(); let height = transfer( test_a, - ALBERT, + A_SPENDING_KEY, AB_PAYMENT_ADDRESS, BTC, amount, @@ -1063,8 +1067,7 @@ fn shielded_transfer( false, )?; let events = get_events(test_a, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; check_ibc_packet_query(test_a, &"send_packet".parse().unwrap(), &packet)?; let height_a = query_height(test_a)?; @@ -1081,10 +1084,8 @@ fn shielded_transfer( // Receive the token on Chain B let height = submit_ibc_tx(test_b, msg, ALBERT, ALBERT_KEY, false)?; let events = get_events(test_b, height)?; - let packet = - get_packet_from_events(&events).ok_or(eyre!("Transaction failed"))?; - let ack = - get_ack_from_events(&events).ok_or(eyre!("Transaction failed"))?; + let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; + let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; check_ibc_packet_query( test_b, &"write_acknowledgement".parse().unwrap(), @@ -1178,7 +1179,7 @@ fn submit_ibc_tx( std::fs::write(&data_path, data).expect("writing data failed"); let data_path = data_path.to_string_lossy(); - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let mut client = run!( test, Bin::Client, @@ -1199,9 +1200,9 @@ fn submit_ibc_tx( ], Some(40) )?; - client.exp_string("Transaction applied")?; + client.exp_string(TX_APPLIED_SUCCESS)?; if wait_reveal_pk { - client.exp_string("Transaction applied")?; + client.exp_string(TX_APPLIED_SUCCESS)?; } check_tx_height(test, &mut client) } @@ -1221,7 +1222,7 @@ fn transfer( expected_err: Option<&str>, wait_reveal_pk: bool, ) -> Result { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let channel_id = channel_id.to_string(); let port_id = port_id.to_string(); @@ -1264,9 +1265,9 @@ fn transfer( Ok(0) } None => { - client.exp_string("Transaction applied")?; + client.exp_string(TX_APPLIED_SUCCESS)?; if wait_reveal_pk { - client.exp_string("Transaction applied")?; + client.exp_string(TX_APPLIED_SUCCESS)?; } check_tx_height(test, &mut client) } @@ -1274,25 +1275,17 @@ fn transfer( } fn check_tx_height(test: &Test, client: &mut NamadaCmd) -> Result { - let (unread, matched) = client.exp_regex("\"height\": .*,")?; + let (_unread, matched) = client.exp_regex(r"height .*")?; + // Expecting e.g. "height 1337." let height_str = matched .trim() - .rsplit_once(' ') + .split_once(' ') .unwrap() .1 - .replace(['"', ','], ""); - let height = height_str.parse().unwrap(); - - let (_unread, matched) = client.exp_regex("\"code\": .*,")?; - let code = matched - .trim() - .rsplit_once(' ') + .split_once('.') .unwrap() - .1 - .replace(['"', ','], ""); - if code != "0" { - return Err(eyre!("The IBC transaction failed: unread {}", unread)); - } + .0; + let height: u32 = height_str.parse().unwrap(); // wait for the next block to use the app hash while height as u64 + 1 > query_height(test)?.revision_height() { @@ -1311,7 +1304,7 @@ fn make_ibc_data(message: impl Msg) -> Vec { } fn query_height(test: &Test) -> Result { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); @@ -1324,7 +1317,7 @@ fn query_height(test: &Test) -> Result { } fn query_header(test: &Test, height: Height) -> Result { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); let height = height.revision_height() as u32; @@ -1345,7 +1338,7 @@ fn check_ibc_update_query( client_id: &ClientId, consensus_height: BlockHeight, ) -> Result<()> { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); match test.async_runtime().block_on(RPC.shell().ibc_client_update( @@ -1364,7 +1357,7 @@ fn check_ibc_packet_query( event_type: &EventType, packet: &Packet, ) -> Result<()> { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); match test.async_runtime().block_on(RPC.shell().ibc_packet( @@ -1387,7 +1380,7 @@ fn query_value_with_proof( key: &Key, height: Option, ) -> Result<(Option>, TmProof)> { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); let result = test.async_runtime().block_on(query_storage_value_bytes( @@ -1425,7 +1418,7 @@ fn check_balances( test_b: &Test, ) -> Result<()> { // Check the balances on Chain A - let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); + let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); // Check the escrowed balance let escrow = Address::Internal(InternalAddress::Ibc).to_string(); let query_args = vec![ @@ -1444,7 +1437,7 @@ fn check_balances( // Check the balance on Chain B let trace_path = format!("{}/{}", &dest_port_id, &dest_channel_id); - let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); let query_args = vec![ "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; @@ -1465,7 +1458,7 @@ fn check_balances_after_non_ibc( let trace_path = format!("{}/{}", port_id, channel_id); // Check the source - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let query_args = vec!["balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc]; let expected = format!("{}/nam: 50000", trace_path); @@ -1492,7 +1485,7 @@ fn check_balances_after_back( test_b: &Test, ) -> Result<()> { // Check the balances on Chain A - let rpc_a = get_actor_rpc(test_a, &Who::Validator(0)); + let rpc_a = get_actor_rpc(test_a, Who::Validator(0)); // Check the escrowed balance let escrow = Address::Internal(InternalAddress::Ibc).to_string(); let query_args = vec![ @@ -1511,7 +1504,7 @@ fn check_balances_after_back( // Check the balance on Chain B let trace_path = format!("{}/{}", dest_port_id, dest_channel_id); - let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); let query_args = vec![ "balance", "--owner", BERTHA, "--token", NAM, "--node", &rpc_b, ]; @@ -1526,16 +1519,19 @@ fn check_balances_after_back( fn check_shielded_balances( dest_port_id: &PortId, dest_channel_id: &ChannelId, + test_a: &Test, test_b: &Test, ) -> Result<()> { // Check the balance on Chain B - let rpc_b = get_actor_rpc(test_b, &Who::Validator(0)); + let rpc_b = get_actor_rpc(test_b, Who::Validator(0)); + // PA(B) on Chain B has received BTC on chain A + let token_addr = find_address(test_a, BTC)?.to_string(); let query_args = vec![ "balance", "--owner", AB_VIEWING_KEY, "--token", - BTC, + &token_addr, "--no-conversions", "--node", &rpc_b, @@ -1640,7 +1636,7 @@ fn get_attributes_from_event(event: &AbciEvent) -> HashMap { } fn get_events(test: &Test, height: u32) -> Result> { - let rpc = get_actor_rpc(test, &Who::Validator(0)); + let rpc = get_actor_rpc(test, Who::Validator(0)); let ledger_address = TendermintAddress::from_str(&rpc).unwrap(); let client = HttpClient::new(ledger_address).unwrap(); diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 5b3aa128ba..ca123601a1 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -18,6 +18,7 @@ use std::time::{Duration, Instant}; use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; +use color_eyre::owo_colors::OwoColorize; use data_encoding::HEXLOWER; use namada::types::address::Address; use namada::types::storage::Epoch; @@ -50,6 +51,10 @@ use crate::e2e::setup::{ self, allow_duplicate_ips, default_port_offset, set_validators, sleep, Bin, Who, }; +use crate::strings::{ + LEDGER_SHUTDOWN, LEDGER_STARTED, NON_VALIDATOR_NODE, TX_ACCEPTED, + TX_APPLIED_SUCCESS, TX_FAILED, TX_REJECTED, VALIDATOR_NODE, +}; use crate::{run, run_as}; fn start_namada_ledger_node( @@ -61,13 +66,12 @@ fn start_namada_ledger_node( Some(idx) => Who::Validator(idx), _ => Who::NonValidator, }; - let mut node = - run_as!(test, who.clone(), Bin::Node, &["ledger"], timeout_sec)?; - node.exp_string("Namada ledger node started")?; + let mut node = run_as!(test, who, Bin::Node, &["ledger"], timeout_sec)?; + node.exp_string(LEDGER_STARTED)?; if let Who::Validator(_) = who { - node.exp_string("This node is a validator")?; + node.exp_string(VALIDATOR_NODE)?; } else { - node.exp_string("This node is not a validator")?; + node.exp_string(NON_VALIDATOR_NODE)?; } Ok(node) } @@ -92,7 +96,7 @@ fn run_ledger() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -103,16 +107,16 @@ fn run_ledger() -> Result<()> { for args in &cmd_combinations { let mut ledger = run_as!(test, Who::Validator(0), Bin::Node, args, Some(40))?; - ledger.exp_string("Namada ledger node started")?; - ledger.exp_string("This node is a validator")?; + ledger.exp_string(LEDGER_STARTED)?; + ledger.exp_string(VALIDATOR_NODE)?; } // Start the ledger as a non-validator for args in &cmd_combinations { let mut ledger = run_as!(test, Who::NonValidator, Bin::Node, args, Some(40))?; - ledger.exp_string("Namada ledger node started")?; - ledger.exp_string("This node is not a validator")?; + ledger.exp_string(LEDGER_STARTED)?; + ledger.exp_string(NON_VALIDATOR_NODE)?; } Ok(()) @@ -133,20 +137,20 @@ fn test_node_connectivity_and_consensus() -> Result<()> { None, )?; - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(0)); - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(1)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(0)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(1)); set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(1), + Who::Validator(1), ethereum_bridge::ledger::Mode::Off, None, ); @@ -162,7 +166,7 @@ fn test_node_connectivity_and_consensus() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, None, Some(40))?.background(); // 2. Cross over epoch to check for consensus with multiple nodes - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let _ = epoch_sleep(&test, &validator_one_rpc, 720)?; // 3. Submit a valid token transfer tx @@ -184,8 +188,7 @@ fn test_node_connectivity_and_consensus() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 4. Check that all the nodes processed the tx with the same result @@ -199,9 +202,9 @@ fn test_node_connectivity_and_consensus() -> Result<()> { let _bg_validator_0 = validator_0.background(); let _bg_validator_1 = validator_1.background(); - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); - let validator_1_rpc = get_actor_rpc(&test, &Who::Validator(1)); - let non_validator_rpc = get_actor_rpc(&test, &Who::NonValidator); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); + let validator_1_rpc = get_actor_rpc(&test, Who::Validator(1)); + let non_validator_rpc = get_actor_rpc(&test, Who::NonValidator); // Find the block height on the validator let after_tx_height = get_height(&test, &validator_0_rpc)?; @@ -236,7 +239,7 @@ fn test_namada_shuts_down_if_tendermint_dies() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -258,7 +261,7 @@ fn test_namada_shuts_down_if_tendermint_dies() -> Result<()> { ledger.exp_string("Tendermint node is no longer running.")?; // 4. Check that the ledger node shuts down - ledger.exp_string("Namada ledger node has shut down.")?; + ledger.exp_string(LEDGER_SHUTDOWN)?; ledger.exp_eof()?; Ok(()) @@ -278,7 +281,7 @@ fn run_ledger_load_state_and_reset() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -292,7 +295,7 @@ fn run_ledger_load_state_and_reset() -> Result<()> { ledger.exp_regex(r"Committed block hash.*, height: [0-9]+")?; let bg_ledger = ledger.background(); // Wait for a new epoch - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); epoch_sleep(&test, &validator_one_rpc, 30)?; // 2. Shut it down @@ -300,7 +303,7 @@ fn run_ledger_load_state_and_reset() -> Result<()> { ledger.interrupt()?; // Wait for the node to stop running to finish writing the state and tx // queue - ledger.exp_string("Namada ledger node has shut down.")?; + ledger.exp_string(LEDGER_SHUTDOWN)?; ledger.exp_eof()?; drop(ledger); @@ -352,7 +355,7 @@ fn suspend_ledger() -> Result<()> { Some(40) )?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string(LEDGER_STARTED)?; // There should be no previous state ledger.exp_string("No state could be found")?; // Wait to commit a block @@ -361,7 +364,7 @@ fn suspend_ledger() -> Result<()> { let bg_ledger = ledger.background(); // 2. Query the ledger - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let mut client = run!( test, Bin::Client, @@ -375,7 +378,7 @@ fn suspend_ledger() -> Result<()> { ledger.interrupt()?; // Wait for the node to stop running to finish writing the state and tx // queue - ledger.exp_string("Namada ledger node has shut down.")?; + ledger.exp_string(LEDGER_SHUTDOWN)?; ledger.exp_eof()?; Ok(()) } @@ -394,7 +397,7 @@ fn stop_ledger_at_height() -> Result<()> { Some(40) )?; - ledger.exp_string("Namada ledger node started")?; + ledger.exp_string(LEDGER_STARTED)?; // There should be no previous state ledger.exp_string("No state could be found")?; // Wait to commit a block @@ -420,7 +423,7 @@ fn ledger_txs_and_queries() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -435,10 +438,10 @@ fn ledger_txs_and_queries() -> Result<()> { source: find_address(&test, BERTHA).unwrap(), target: find_address(&test, ALBERT).unwrap(), token: find_address(&test, NAM).unwrap(), - amount: token::DenominatedAmount { - amount: token::Amount::native_whole(10), - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }, + amount: token::DenominatedAmount::new( + token::Amount::native_whole(10), + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ), key: None, shielded: None, } @@ -447,7 +450,7 @@ fn ledger_txs_and_queries() -> Result<()> { std::fs::write(&tx_data_path, transfer).unwrap(); let tx_data_path = tx_data_path.to_string_lossy(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let multisig_account = format!("{},{},{}", BERTHA_KEY, ALBERT_KEY, CHRISTEL_KEY); @@ -573,10 +576,9 @@ fn ledger_txs_and_queries() -> Result<()> { let mut client = run!(test, Bin::Client, tx_args, Some(40))?; if !dry_run { - client.exp_string("Transaction accepted")?; - client.exp_string("Transaction applied")?; + client.exp_string(TX_ACCEPTED)?; } - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); } } @@ -710,7 +712,7 @@ fn wrapper_disposable_signer() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let _ep1 = epoch_sleep(&test, &validator_one_rpc, 720)?; @@ -729,9 +731,8 @@ fn wrapper_disposable_signer() -> Result<()> { ]; let mut client = run!(test, Bin::Client, tx_args, Some(720))?; - client.exp_string("Transaction accepted")?; - client.exp_string("Transaction applied")?; - client.exp_string("Transaction is valid")?; + client.exp_string(TX_ACCEPTED)?; + client.exp_string(TX_APPLIED_SUCCESS)?; let _ep1 = epoch_sleep(&test, &validator_one_rpc, 720)?; let tx_args = vec![ @@ -752,9 +753,8 @@ fn wrapper_disposable_signer() -> Result<()> { ]; let mut client = run!(test, Bin::Client, tx_args, Some(720))?; - client.exp_string("Transaction accepted")?; - client.exp_string("Transaction applied")?; - client.exp_string("Transaction is valid")?; + client.exp_string(TX_ACCEPTED)?; + client.exp_string(TX_APPLIED_SUCCESS)?; let _ep1 = epoch_sleep(&test, &validator_one_rpc, 720)?; let tx_args = vec![ "transfer", @@ -773,6 +773,9 @@ fn wrapper_disposable_signer() -> Result<()> { "--disposable-gas-payer", "--ledger-address", &validator_one_rpc, + // NOTE: Forcing the transaction will make the client produce a + // transfer without a masp object attached to it, so don't expect a + // failure from the masp vp here but from the check_fees function "--force", ]; let mut client = run!(test, Bin::Client, tx_args, Some(720))?; @@ -794,7 +797,7 @@ fn invalid_transactions() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -806,7 +809,7 @@ fn invalid_transactions() -> Result<()> { // 2. Submit a an invalid transaction (trying to transfer tokens should fail // in the user's VP due to the wrong signer) - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let tx_args = vec![ "transfer", @@ -826,10 +829,8 @@ fn invalid_transactions() -> Result<()> { ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction accepted")?; - client.exp_string("Transaction applied")?; - client.exp_string("Transaction is invalid")?; - client.exp_string(r#""code": "2"#)?; + client.exp_string(TX_ACCEPTED)?; + client.exp_string(TX_REJECTED)?; client.assert_success(); let mut ledger = bg_ledger.foreground(); @@ -842,7 +843,7 @@ fn invalid_transactions() -> Result<()> { ledger.interrupt()?; // Wait for the node to stop running to finish writing the state and tx // queue - ledger.exp_string("Namada ledger node has shut down.")?; + ledger.exp_string(LEDGER_SHUTDOWN)?; ledger.exp_eof()?; drop(ledger); @@ -880,13 +881,8 @@ fn invalid_transactions() -> Result<()> { ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction accepted")?; - client.exp_string("Transaction applied")?; - - client.exp_string("Error trying to apply a transaction")?; - - client.exp_string(r#""code": "1"#)?; - + client.exp_string(TX_ACCEPTED)?; + client.exp_string(TX_FAILED)?; client.assert_success(); Ok(()) } @@ -935,12 +931,12 @@ fn pos_bonds() -> Result<()> { }, None, )?; - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(0)); - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(1)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(0)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(1)); set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -950,7 +946,7 @@ fn pos_bonds() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); // 2. Submit a self-bond for the first genesis validator let tx_args = vec![ @@ -966,8 +962,7 @@ fn pos_bonds() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 3. Submit a delegation to the first genesis validator @@ -985,8 +980,7 @@ fn pos_bonds() -> Result<()> { &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 4. Submit a re-delegation from the first to the second genesis validator @@ -1006,8 +1000,7 @@ fn pos_bonds() -> Result<()> { &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 5. Submit an unbond of the self-bond @@ -1103,8 +1096,7 @@ fn pos_bonds() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 10. Submit a withdrawal of the delegation @@ -1120,8 +1112,7 @@ fn pos_bonds() -> Result<()> { &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 11. Submit an withdrawal of the re-delegation @@ -1137,8 +1128,7 @@ fn pos_bonds() -> Result<()> { &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); Ok(()) @@ -1169,7 +1159,7 @@ fn pos_rewards() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(i), + Who::Validator(i), ethereum_bridge::ledger::Mode::Off, None, ); @@ -1180,7 +1170,7 @@ fn pos_rewards() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); // Query the current rewards for the validator self-bond let tx_args = vec![ @@ -1284,8 +1274,7 @@ fn pos_rewards() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // Query the validator balance again and check that the balance has grown @@ -1343,7 +1332,7 @@ fn test_bond_queries() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let validator_alias = "validator-0"; // 2. Submit a delegation to the genesis validator @@ -1358,7 +1347,7 @@ fn test_bond_queries() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 3. Submit a delegation to the genesis validator @@ -1376,8 +1365,7 @@ fn test_bond_queries() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 3. Wait for epoch 4 @@ -1408,8 +1396,7 @@ fn test_bond_queries() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 5. Submit an unbond of the delegation @@ -1427,8 +1414,7 @@ fn test_bond_queries() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; let (_, res) = client .exp_regex(r"withdrawable starting from epoch [0-9]+") .unwrap(); @@ -1498,7 +1484,7 @@ fn pos_init_validator() -> Result<()> { .amount .increase_precision(NATIVE_MAX_DECIMAL_PLACES.into()) .unwrap() - .amount + .amount() }) .sum::(); assert_eq!( @@ -1523,7 +1509,7 @@ fn pos_init_validator() -> Result<()> { non_validator.exp_string("Committed block hash")?; let bg_non_validator = non_validator.background(); - let non_validator_rpc = get_actor_rpc(&test, &Who::NonValidator); + let non_validator_rpc = get_actor_rpc(&test, Who::NonValidator); // 2. Initialize a new validator account with the non-validator node let new_validator = "new-validator"; @@ -1547,7 +1533,7 @@ fn pos_init_validator() -> Result<()> { "--unsafe-dont-encrypt", ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 3. Submit a delegation to the new validator @@ -1568,7 +1554,7 @@ fn pos_init_validator() -> Result<()> { &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // Then self-bond the tokens: let delegation = 5_u64; @@ -1587,7 +1573,7 @@ fn pos_init_validator() -> Result<()> { &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 4. Transfer some NAM to the new validator @@ -1608,7 +1594,7 @@ fn pos_init_validator() -> Result<()> { &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 5. Submit a self-bond for the new validator @@ -1622,7 +1608,7 @@ fn pos_init_validator() -> Result<()> { &non_validator_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // Stop the non-validator node and run it as the new validator @@ -1638,7 +1624,7 @@ fn pos_init_validator() -> Result<()> { } let loc = format!("{}:{}", std::file!(), std::line!()); - let validator_1_base_dir = test.get_base_dir(&Who::NonValidator); + let validator_1_base_dir = test.get_base_dir(Who::NonValidator); let mut validator_1 = setup::run_cmd( Bin::Node, ["ledger"], @@ -1648,8 +1634,8 @@ fn pos_init_validator() -> Result<()> { loc, )?; - validator_1.exp_string("Namada ledger node started")?; - validator_1.exp_string("This node is a validator")?; + validator_1.exp_string(LEDGER_STARTED)?; + validator_1.exp_string(VALIDATOR_NODE)?; validator_1.exp_string("Committed block hash")?; let _bg_validator_1 = validator_1.background(); @@ -1702,7 +1688,7 @@ fn ledger_many_txs_in_a_block() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -1712,7 +1698,7 @@ fn ledger_many_txs_in_a_block() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = Arc::new(get_actor_rpc(&test, &Who::Validator(0))); + let validator_one_rpc = Arc::new(get_actor_rpc(&test, Who::Validator(0))); // A token transfer tx args let tx_args = Arc::new(vec![ @@ -1742,9 +1728,8 @@ fn ledger_many_txs_in_a_block() -> Result<()> { let mut args = (*tx_args).clone(); args.push(&*validator_one_rpc); let mut client = run!(*test, Bin::Client, args, Some(80))?; - client.exp_string("Transaction accepted")?; - client.exp_string("Transaction applied")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_ACCEPTED)?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); let res: Result<()> = Ok(()); res @@ -1788,7 +1773,7 @@ fn proposal_submission() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -1804,7 +1789,7 @@ fn proposal_submission() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); // 1.1 Delegate some token let tx_args = vec![ @@ -1819,7 +1804,7 @@ fn proposal_submission() -> Result<()> { &validator_0_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 2. Submit valid proposal @@ -1830,7 +1815,7 @@ fn proposal_submission() -> Result<()> { TestWasms::TxProposalCode.read_bytes(), 12, ); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let submit_proposal_args = vec![ "init-proposal", @@ -1842,8 +1827,7 @@ fn proposal_submission() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string("Transaction applied")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // Wait for the proposal to be committed @@ -1972,7 +1956,7 @@ fn proposal_submission() -> Result<()> { submit_proposal_vote, Some(15) )?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); let submit_proposal_vote_delagator = vec![ @@ -1989,7 +1973,7 @@ fn proposal_submission() -> Result<()> { let mut client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 10. Send a yay vote from a non-validator/non-delegator user @@ -2008,7 +1992,7 @@ fn proposal_submission() -> Result<()> { // this is valid because the client filter ALBERT delegation and there are // none let mut client = run!(test, Bin::Client, submit_proposal_vote, Some(15))?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 11. Query the proposal and check the result @@ -2113,7 +2097,7 @@ fn pgf_governance_proposal() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -2129,7 +2113,7 @@ fn pgf_governance_proposal() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); // Delegate some token let tx_args = vec![ @@ -2144,8 +2128,7 @@ fn pgf_governance_proposal() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 1 - Submit proposal @@ -2157,7 +2140,7 @@ fn pgf_governance_proposal() -> Result<()> { let valid_proposal_json_path = prepare_proposal_data(&test, albert, pgf_stewards, 12); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let submit_proposal_args = vec![ "init-proposal", @@ -2168,8 +2151,7 @@ fn pgf_governance_proposal() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 2 - Query the proposal @@ -2242,8 +2224,7 @@ fn pgf_governance_proposal() -> Result<()> { submit_proposal_vote, Some(15) )?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // Send different yay vote from delegator to check majority on 1/3 @@ -2261,8 +2242,7 @@ fn pgf_governance_proposal() -> Result<()> { let mut client = run!(test, Bin::Client, submit_proposal_vote_delagator, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 4 - Query the proposal and check the result is the one voted by the @@ -2349,7 +2329,7 @@ fn pgf_governance_proposal() -> Result<()> { let valid_proposal_json_path = prepare_proposal_data(&test, albert, pgf_funding, 36); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let submit_proposal_args = vec![ "init-proposal", @@ -2360,8 +2340,7 @@ fn pgf_governance_proposal() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, submit_proposal_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 2 - Query the funding proposal @@ -2427,7 +2406,7 @@ fn proposal_offline() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -2437,7 +2416,7 @@ fn proposal_offline() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); // 1.1 Delegate some token let tx_args = vec![ @@ -2452,8 +2431,7 @@ fn proposal_offline() -> Result<()> { &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 2. Create an offline proposal @@ -2488,7 +2466,7 @@ fn proposal_offline() -> Result<()> { epoch = get_epoch(&test, &validator_one_rpc).unwrap(); } - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let offline_proposal_args = vec![ "init-proposal", @@ -2607,20 +2585,20 @@ fn double_signing_gets_slashed() -> Result<()> { None, )?; - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(0)); - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(1)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(0)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(1)); set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(1), + Who::Validator(1), ethereum_bridge::ledger::Mode::Off, None, ); @@ -2636,18 +2614,18 @@ fn double_signing_gets_slashed() -> Result<()> { let mut validator_2 = run_as!(test, Who::Validator(2), Bin::Node, &["ledger"], Some(40))?; - validator_2.exp_string("Namada ledger node started")?; - validator_2.exp_string("This node is a validator")?; + validator_2.exp_string(LEDGER_STARTED)?; + validator_2.exp_string(VALIDATOR_NODE)?; let _bg_validator_2 = validator_2.background(); let mut validator_3 = run_as!(test, Who::Validator(3), Bin::Node, &["ledger"], Some(40))?; - validator_3.exp_string("Namada ledger node started")?; - validator_3.exp_string("This node is a validator")?; + validator_3.exp_string(LEDGER_STARTED)?; + validator_3.exp_string(VALIDATOR_NODE)?; let _bg_validator_3 = validator_3.background(); // 2. Copy the first genesis validator base-dir - let validator_0_base_dir = test.get_base_dir(&Who::Validator(0)); + let validator_0_base_dir = test.get_base_dir(Who::Validator(0)); let validator_0_base_dir_copy = test .test_dir .path() @@ -2734,12 +2712,12 @@ fn double_signing_gets_slashed() -> Result<()> { validator_0_base_dir_copy, loc, )?; - validator_0_copy.exp_string("Namada ledger node started")?; - validator_0_copy.exp_string("This node is a validator")?; + validator_0_copy.exp_string(LEDGER_STARTED)?; + validator_0_copy.exp_string(VALIDATOR_NODE)?; let _bg_validator_0_copy = validator_0_copy.background(); // 5. Submit a valid token transfer tx to validator 0 - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); let tx_args = [ "transfer", "--source", @@ -2837,8 +2815,7 @@ fn double_signing_gets_slashed() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // Wait until pipeline epoch to see if the validator is back in consensus @@ -2874,7 +2851,7 @@ fn double_signing_gets_slashed() -> Result<()> { validator_1.interrupt()?; // Wait for the node to stop running to finish writing the state and tx // queue - validator_1.exp_string("Namada ledger node has shut down.")?; + validator_1.exp_string(LEDGER_SHUTDOWN)?; validator_1.assert_success(); Ok(()) @@ -2899,7 +2876,7 @@ fn implicit_account_reveal_pk() -> Result<()> { .background(); // 2. Some transactions that need signature authorization: - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); let txs_args: Vec Vec>> = vec![ // A token transfer tx Box::new(|source| { @@ -2975,7 +2952,7 @@ fn implicit_account_reveal_pk() -> Result<()> { let mut cmd = run!( test, Bin::Wallet, - &["key", "gen", "--alias", &key_alias, "--unsafe-dont-encrypt"], + &["gen", "--alias", &key_alias, "--unsafe-dont-encrypt"], Some(20), )?; cmd.assert_success(); @@ -3038,7 +3015,7 @@ fn test_epoch_sleep() -> Result<()> { let _bg_ledger = ledger.background(); - let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_one_rpc = get_actor_rpc(&test, Who::Validator(0)); // 2. Query the current epoch let start_epoch = get_epoch(&test, &validator_one_rpc).unwrap(); @@ -3128,12 +3105,12 @@ fn deactivate_and_reactivate_validator() -> Result<()> { }, None, )?; - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(0)); - allow_duplicate_ips(&test, &test.net.chain_id, &Who::Validator(1)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(0)); + allow_duplicate_ips(&test, &test.net.chain_id, Who::Validator(1)); set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -3147,7 +3124,7 @@ fn deactivate_and_reactivate_validator() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(1), Some(40))? .background(); - let validator_1_rpc = get_actor_rpc(&test, &Who::Validator(1)); + let validator_1_rpc = get_actor_rpc(&test, Who::Validator(1)); // Check the state of validator-1 let tx_args = vec![ @@ -3173,8 +3150,7 @@ fn deactivate_and_reactivate_validator() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(1), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); let deactivate_epoch = get_epoch(&test, &validator_1_rpc)?; @@ -3217,8 +3193,7 @@ fn deactivate_and_reactivate_validator() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(1), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); let reactivate_epoch = get_epoch(&test, &validator_1_rpc)?; @@ -3260,7 +3235,7 @@ fn change_validator_metadata() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -3270,7 +3245,7 @@ fn change_validator_metadata() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? .background(); - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); // 2. Query the validator metadata loaded from genesis let metadata_query_args = vec![ @@ -3311,7 +3286,7 @@ fn change_validator_metadata() -> Result<()> { metadata_change_args, Some(40) )?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 4. Query the metadata after the change @@ -3344,7 +3319,7 @@ fn change_validator_metadata() -> Result<()> { metadata_change_args, Some(40) )?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); // 6. Query the metadata to see that the validator website is removed @@ -3398,7 +3373,7 @@ fn test_invalid_validator_txs() -> Result<()> { set_ethereum_bridge_mode( &test, &test.net.chain_id, - &Who::Validator(0), + Who::Validator(0), ethereum_bridge::ledger::Mode::Off, None, ); @@ -3412,8 +3387,8 @@ fn test_invalid_validator_txs() -> Result<()> { start_namada_ledger_node_wait_wasm(&test, Some(1), Some(40))? .background(); - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); - let validator_1_rpc = get_actor_rpc(&test, &Who::Validator(1)); + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); + let validator_1_rpc = get_actor_rpc(&test, Who::Validator(1)); // Try to change validator-1 commission rate as validator-0 let tx_args = vec![ @@ -3429,8 +3404,7 @@ fn test_invalid_validator_txs() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is invalid.")?; + client.exp_string(TX_REJECTED)?; client.assert_success(); // Try to deactivate validator-1 as validator-0 @@ -3445,8 +3419,7 @@ fn test_invalid_validator_txs() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is invalid.")?; + client.exp_string(TX_REJECTED)?; client.assert_success(); // Try to change the validator-1 website as validator-0 @@ -3463,8 +3436,7 @@ fn test_invalid_validator_txs() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is invalid.")?; + client.exp_string(TX_REJECTED)?; client.assert_success(); // Deactivate validator-1 @@ -3479,8 +3451,7 @@ fn test_invalid_validator_txs() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(1), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is valid.")?; + client.exp_string(TX_APPLIED_SUCCESS)?; client.assert_success(); let deactivate_epoch = get_epoch(&test, &validator_1_rpc)?; @@ -3523,9 +3494,135 @@ fn test_invalid_validator_txs() -> Result<()> { ]; let mut client = run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; - client.exp_string("Transaction applied with result:")?; - client.exp_string("Transaction is invalid.")?; + client.exp_string(TX_REJECTED)?; client.assert_success(); Ok(()) } + +/// Test change of consensus key of a validator from consensus set. +/// +/// 1. Run 2 genesis validator nodes. +/// 2. Change consensus key of validator-0 +/// 3. Check that no new blocks are being created - chain halted because +/// validator-0 consensus change took effect and it cannot sign with the old +/// key anymore +/// 4. Configure validator-0 node with the new key +/// 5. Resume the chain and check that blocks are being created +#[test] +fn change_consensus_key() -> Result<()> { + let min_num_of_blocks = 6; + let pipeline_len = 2; + let test = setup::network( + |mut genesis, base_dir| { + genesis.parameters.parameters.min_num_of_blocks = min_num_of_blocks; + genesis.parameters.parameters.max_expected_time_per_block = 1; + genesis.parameters.parameters.epochs_per_year = 31_536_000; + genesis.parameters.pos_params.pipeline_len = pipeline_len; + genesis.parameters.pos_params.unbonding_len = 4; + setup::set_validators(2, genesis, base_dir, default_port_offset) + }, + None, + )?; + + for i in 0..2 { + set_ethereum_bridge_mode( + &test, + &test.net.chain_id, + Who::Validator(i), + ethereum_bridge::ledger::Mode::Off, + None, + ); + } + + // ========================================================================= + // 1. Run 2 genesis validator ledger nodes + + let bg_validator_0 = + start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))? + .background(); + + let _bg_validator_1 = + start_namada_ledger_node_wait_wasm(&test, Some(1), Some(40))? + .background(); + + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); + + // ========================================================================= + // 2. Change consensus key of validator-0 + + let tx_args = vec![ + "change-consensus-key", + "--validator", + "validator-0", + "--signing-keys", + "validator-0-balance-key", + "--node", + &validator_0_rpc, + "--unsafe-dont-encrypt", + ]; + let mut client = + run_as!(test, Who::Validator(0), Bin::Client, tx_args, Some(40))?; + client.exp_string(TX_APPLIED_SUCCESS)?; + client.assert_success(); + + // ========================================================================= + // 3. Check that no new blocks are being created - chain halted because + // validator-0 consensus change took effect and it cannot sign with the old + // key anymore + + // Wait for the next epoch + let validator_0_rpc = get_actor_rpc(&test, Who::Validator(0)); + let _epoch = epoch_sleep(&test, &validator_0_rpc, 30)?; + + // The chain should halt before the following (pipeline) epoch + let _err_report = epoch_sleep(&test, &validator_0_rpc, 30) + .expect_err("Chain should halt"); + + // Load validator-0 wallet + println!( + "{}", + "Setting up the new validator consensus key in CometBFT...".blue() + ); + let chain_dir = test.get_chain_dir(Who::Validator(0)); + let mut wallet = namada_apps::wallet::load(&chain_dir).unwrap(); + + // ========================================================================= + // 4. Configure validator-0 node with the new key + + // Get the new consensus SK + let new_key_alias = "validator-0-consensus-key-1"; + let new_sk = wallet.find_secret_key(new_key_alias, None).unwrap(); + // Write the key to CometBFT dir + let cometbft_dir = test.get_cometbft_home(Who::Validator(0)); + namada_apps::node::ledger::tendermint_node::write_validator_key( + cometbft_dir, + &new_sk, + ) + .unwrap(); + println!( + "{}", + "Done setting up the new validator consensus key in CometBFT.".blue() + ); + + // ========================================================================= + // 5. Resume the chain and check that blocks are being created + + // Restart validator-0 node + let mut validator_0 = bg_validator_0.foreground(); + validator_0.interrupt().unwrap(); + // Wait for the node to stop running + validator_0.exp_string(LEDGER_SHUTDOWN)?; + validator_0.exp_eof()?; + drop(validator_0); + + let mut validator_0 = start_namada_ledger_node(&test, Some(0), Some(40))?; + // Wait to commit a block + validator_0.exp_regex(r"Committed block hash.*, height: [0-9]+")?; + let _bg_validator_0 = validator_0.background(); + + // Continue to make blocks for another epoch + let _epoch = epoch_sleep(&test, &validator_0_rpc, 40)?; + + Ok(()) +} diff --git a/tests/src/e2e/multitoken_tests/helpers.rs b/tests/src/e2e/multitoken_tests/helpers.rs index 0856691dd5..caddb88f41 100644 --- a/tests/src/e2e/multitoken_tests/helpers.rs +++ b/tests/src/e2e/multitoken_tests/helpers.rs @@ -17,6 +17,7 @@ use super::setup::constants::NAM; use super::setup::{Bin, NamadaCmd, Test}; use crate::e2e::setup::constants::ALBERT; use crate::run; +use crate::strings::{TX_ACCEPTED, TX_APPLIED_SUCCESS}; const MULTITOKEN_KEY_SEGMENT: &str = "tokens"; const BALANCE_KEY_SEGMENT: &str = "balance"; @@ -53,11 +54,10 @@ pub fn init_multitoken_vp(test: &Test, rpc_addr: &str) -> Result { "--ledger-address", rpc_addr, ]; - let mut client_init_account = - run!(test, Bin::Client, init_account_args, Some(40))?; - client_init_account.exp_string("Transaction is valid.")?; - client_init_account.exp_string("Transaction applied")?; - client_init_account.assert_success(); + let mut cmd = run!(test, Bin::Client, init_account_args, Some(40))?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; + cmd.assert_success(); Ok(multitoken_alias.to_string()) } @@ -120,10 +120,10 @@ pub fn mint_red_tokens( "--ledger-address", rpc_addr, ]; - let mut client_tx = run!(test, Bin::Client, tx_args, Some(40))?; - client_tx.exp_string("Transaction is valid.")?; - client_tx.exp_string("Transaction applied")?; - client_tx.assert_success(); + let mut cmd = run!(test, Bin::Client, tx_args, Some(40))?; + cmd.exp_string(TX_ACCEPTED)?; + cmd.exp_string(TX_APPLIED_SUCCESS)?; + cmd.assert_success(); Ok(()) } diff --git a/tests/src/e2e/setup.rs b/tests/src/e2e/setup.rs index c0c2bd59fb..1db62a2fe1 100644 --- a/tests/src/e2e/setup.rs +++ b/tests/src/e2e/setup.rs @@ -86,7 +86,7 @@ pub fn default_port_offset(ix: u8) -> u16 { pub fn update_actor_config( test: &Test, chain_id: &ChainId, - who: &Who, + who: Who, update: F, ) where F: FnOnce(&mut Config), @@ -101,7 +101,7 @@ pub fn update_actor_config( } /// Configure validator p2p settings to allow duplicate ips -pub fn allow_duplicate_ips(test: &Test, chain_id: &ChainId, who: &Who) { +pub fn allow_duplicate_ips(test: &Test, chain_id: &ChainId, who: Who) { update_actor_config(test, chain_id, who, |config| { config.ledger.cometbft.p2p.allow_duplicate_ip = true; }); @@ -112,7 +112,7 @@ pub fn allow_duplicate_ips(test: &Test, chain_id: &ChainId, who: &Who) { pub fn set_ethereum_bridge_mode( test: &Test, chain_id: &ChainId, - who: &Who, + who: Who, mode: ethereum_bridge::ledger::Mode, rpc_endpoint: Option<&str>, ) { @@ -223,25 +223,19 @@ where .expect("NAM balances should exist in pre-genesis wallet already"); nam_balances.0.insert( GenesisAddress::PublicKey(StringEncoded::new(sk.ref_to())), - token::DenominatedAmount { - amount: token::Amount::from_uint( - 1000000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }, + token::DenominatedAmount::new( + token::Amount::from_uint(1000000, NATIVE_MAX_DECIMAL_PLACES) + .unwrap(), + NATIVE_MAX_DECIMAL_PLACES.into(), + ), ); nam_balances.0.insert( GenesisAddress::EstablishedAddress(validator_address.clone()), - token::DenominatedAmount { - amount: token::Amount::from_uint( - 2000000, - NATIVE_MAX_DECIMAL_PLACES, - ) - .unwrap(), - denom: NATIVE_MAX_DECIMAL_PLACES.into(), - }, + token::DenominatedAmount::new( + token::Amount::from_uint(2000000, NATIVE_MAX_DECIMAL_PLACES) + .unwrap(), + NATIVE_MAX_DECIMAL_PLACES.into(), + ), ); // invoke `init-genesis-validator` to promote the generated established // account to a validator account @@ -710,7 +704,7 @@ mod macros { } } -#[derive(Clone)] +#[derive(Clone, Copy, Debug)] pub enum Who { // A non-validator NonValidator, @@ -757,11 +751,11 @@ impl Test { I: IntoIterator, S: AsRef, { - let base_dir = self.get_base_dir(&who); + let base_dir = self.get_base_dir(who); run_cmd(bin, args, timeout_sec, &self.working_dir, base_dir, loc) } - pub fn get_base_dir(&self, who: &Who) -> PathBuf { + pub fn get_base_dir(&self, who: Who) -> PathBuf { match who { Who::NonValidator => self.test_dir.path().to_owned(), Who::Validator(index) => self @@ -772,6 +766,15 @@ impl Test { } } + pub fn get_chain_dir(&self, who: Who) -> PathBuf { + self.get_base_dir(who).join(self.net.chain_id.as_str()) + } + + pub fn get_cometbft_home(&self, who: Who) -> PathBuf { + self.get_chain_dir(who) + .join(namada_apps::config::COMETBFT_DIR) + } + /// Get an async runtime. pub fn async_runtime(&self) -> &tokio::runtime::Runtime { Lazy::force(&self.async_runtime.0) diff --git a/tests/src/e2e/wallet_tests.rs b/tests/src/e2e/wallet_tests.rs index 167d67202e..370bdac670 100644 --- a/tests/src/e2e/wallet_tests.rs +++ b/tests/src/e2e/wallet_tests.rs @@ -28,12 +28,8 @@ fn wallet_encrypted_key_cmds() -> Result<()> { let password = "VeRySeCuR3"; // 1. key gen - let mut cmd = run!( - test, - Bin::Wallet, - &["key", "gen", "--alias", key_alias], - Some(20), - )?; + let mut cmd = + run!(test, Bin::Wallet, &["gen", "--alias", key_alias], Some(20),)?; cmd.exp_string("Enter your encryption password:")?; cmd.send_line(password)?; @@ -50,17 +46,22 @@ fn wallet_encrypted_key_cmds() -> Result<()> { let mut cmd = run!( test, Bin::Wallet, - &["key", "find", "--alias", key_alias], + &["find", "--keys", "--alias", key_alias, "--decrypt"], Some(20), )?; + cmd.exp_string("Found transparent keys:")?; + cmd.exp_string(&format!( + " Alias \"{}\" (encrypted):", + key_alias.to_lowercase() + ))?; + cmd.exp_string(" Public key hash:")?; + cmd.exp_string(" Public key:")?; cmd.exp_string("Enter your decryption password:")?; cmd.send_line(password)?; - cmd.exp_string("Public key hash:")?; - cmd.exp_string("Public key:")?; // 3. key list - let mut cmd = run!(test, Bin::Wallet, &["key", "list"], Some(20))?; + let mut cmd = run!(test, Bin::Wallet, &["list", "--keys"], Some(20))?; cmd.exp_string(&format!( "Alias \"{}\" (encrypted):", key_alias.to_lowercase() @@ -76,41 +77,45 @@ fn wallet_encrypted_key_cmds() -> Result<()> { #[test] fn wallet_encrypted_key_cmds_env_var() -> Result<()> { let test = setup::single_node_net()?; - let key_alias = "test_key_1"; + let key_alias = "Test_Key_1"; let password = "VeRySeCuR3"; env::set_var("NAMADA_WALLET_PASSWORD", password); // 1. key gen - let mut cmd = run!( - test, - Bin::Wallet, - &["key", "gen", "--alias", key_alias], - Some(20), - )?; + let mut cmd = + run!(test, Bin::Wallet, &["gen", "--alias", key_alias], Some(20),)?; cmd.exp_string("Enter BIP39 passphrase (empty for none): ")?; cmd.send_line("")?; cmd.exp_string(&format!( "Successfully added a key and an address with alias: \"{}\"", - key_alias + key_alias.to_lowercase() ))?; // 2. key find let mut cmd = run!( test, Bin::Wallet, - &["key", "find", "--alias", key_alias], + &["find", "--keys", "--alias", key_alias, "--decrypt"], Some(20), )?; - cmd.exp_string("Public key hash:")?; - cmd.exp_string("Public key:")?; + cmd.exp_string("Found transparent keys:")?; + cmd.exp_string(&format!( + " Alias \"{}\" (encrypted):", + key_alias.to_lowercase() + ))?; + cmd.exp_string(" Public key hash:")?; + cmd.exp_string(" Public key:")?; // 3. key list - let mut cmd = run!(test, Bin::Wallet, &["key", "list"], Some(20))?; - cmd.exp_string(&format!("Alias \"{}\" (encrypted):", key_alias))?; + let mut cmd = run!(test, Bin::Wallet, &["list", "--keys"], Some(20))?; + cmd.exp_string(&format!( + " Alias \"{}\" (encrypted):", + key_alias.to_lowercase() + ))?; Ok(()) } @@ -122,34 +127,42 @@ fn wallet_encrypted_key_cmds_env_var() -> Result<()> { #[test] fn wallet_unencrypted_key_cmds() -> Result<()> { let test = setup::single_node_net()?; - let key_alias = "test_key_1"; + let key_alias = "Test_Key_1"; // 1. key gen let mut cmd = run!( test, Bin::Wallet, - &["key", "gen", "--alias", key_alias, "--unsafe-dont-encrypt"], + &["gen", "--alias", key_alias, "--unsafe-dont-encrypt"], Some(20), )?; cmd.exp_string(&format!( "Successfully added a key and an address with alias: \"{}\"", - key_alias + key_alias.to_lowercase() ))?; // 2. key find let mut cmd = run!( test, Bin::Wallet, - &["key", "find", "--alias", key_alias], + &["find", "--keys", "--alias", key_alias], Some(20), )?; - cmd.exp_string("Public key hash:")?; - cmd.exp_string("Public key:")?; + cmd.exp_string("Found transparent keys:")?; + cmd.exp_string(&format!( + " Alias \"{}\" (not encrypted):", + key_alias.to_lowercase() + ))?; + cmd.exp_string(" Public key hash:")?; + cmd.exp_string(" Public key:")?; // 3. key list - let mut cmd = run!(test, Bin::Wallet, &["key", "list"], Some(20))?; - cmd.exp_string(&format!("Alias \"{}\" (not encrypted):", key_alias))?; + let mut cmd = run!(test, Bin::Wallet, &["list", "--keys"], Some(20))?; + cmd.exp_string(&format!( + " Alias \"{}\" (not encrypted):", + key_alias.to_lowercase() + ))?; Ok(()) } @@ -162,61 +175,49 @@ fn wallet_unencrypted_key_cmds() -> Result<()> { #[test] fn wallet_address_cmds() -> Result<()> { let test = setup::single_node_net()?; - let gen_address_alias = "test_address_1"; - let add_address_alias = "test_address_2"; + let gen_address_alias = "Test_Address_1"; + let add_address_alias = "Test_Address_2"; let add_address = "tnam1q82t25z5f9gmnv5sztyr8ht9tqhrw4u875qjhy56"; // 1. address gen let mut cmd = run!( test, Bin::Wallet, - &[ - "address", - "gen", - "--alias", - gen_address_alias, - "--unsafe-dont-encrypt", - ], + &["gen", "--alias", gen_address_alias, "--unsafe-dont-encrypt"], Some(20), )?; cmd.exp_string(&format!( "Successfully added a key and an address with alias: \"{}\"", - gen_address_alias + gen_address_alias.to_lowercase() ))?; // 2. address add let mut cmd = run!( test, Bin::Wallet, - &[ - "address", - "add", - "--address", - add_address, - "--alias", - add_address_alias, - ], + &["add", "--value", add_address, "--alias", add_address_alias], Some(20), )?; cmd.exp_string(&format!( - "Successfully added a key and an address with alias: \"{}\"", - add_address_alias + "Successfully added an address with alias: \"{}\"", + add_address_alias.to_lowercase() ))?; // 3. address find let mut cmd = run!( test, Bin::Wallet, - &["address", "find", "--alias", gen_address_alias], + &["find", "--addr", "--alias", gen_address_alias], Some(20), )?; - cmd.exp_string("Found address")?; + cmd.exp_string("Found transparent address:")?; // 4. address list - let mut cmd = run!(test, Bin::Wallet, &["address", "list"], Some(20))?; + let mut cmd = run!(test, Bin::Wallet, &["list", "--addr"], Some(20))?; - cmd.exp_string(&format!("\"{}\":", gen_address_alias))?; - cmd.exp_string(&format!("\"{}\":", add_address_alias))?; + cmd.exp_string("Known transparent addresses:")?; + cmd.exp_string(&format!("\"{}\":", gen_address_alias.to_lowercase()))?; + cmd.exp_string(&format!("\"{}\":", add_address_alias.to_lowercase()))?; Ok(()) } diff --git a/tests/src/integration/masp.rs b/tests/src/integration/masp.rs index bb7ca903b6..8754e40f09 100644 --- a/tests/src/integration/masp.rs +++ b/tests/src/integration/masp.rs @@ -14,6 +14,7 @@ use crate::e2e::setup::constants::{ BB_PAYMENT_ADDRESS, BERTHA, BERTHA_KEY, BTC, B_SPENDING_KEY, CHRISTEL, CHRISTEL_KEY, ETH, MASP, NAM, }; +use crate::strings::TX_APPLIED_SUCCESS; /// In this test we verify that users of the MASP receive the correct rewards /// for leaving their assets in the pool for varying periods of time. @@ -919,7 +920,7 @@ fn masp_txs_and_queries() -> Result<()> { "--node", validator_one_rpc, ], - Response::Ok("Transaction is valid"), + Response::Ok(TX_APPLIED_SUCCESS), ), // 3. Attempt to spend 10 ETH at SK(A) to PA(B) ( @@ -957,7 +958,7 @@ fn masp_txs_and_queries() -> Result<()> { "--node", validator_one_rpc, ], - Response::Ok("Transaction is valid"), + Response::Ok(TX_APPLIED_SUCCESS), ), // 5. Spend 7 BTC at SK(A) to PA(B) ( @@ -976,7 +977,7 @@ fn masp_txs_and_queries() -> Result<()> { "--node", validator_one_rpc, ], - Response::Ok("Transaction is valid"), + Response::Ok(TX_APPLIED_SUCCESS), ), // 6. Attempt to spend 7 BTC at SK(A) to PA(B) ( @@ -1014,7 +1015,7 @@ fn masp_txs_and_queries() -> Result<()> { "--node", validator_one_rpc, ], - Response::Ok("Transaction is valid"), + Response::Ok(TX_APPLIED_SUCCESS), ), // 8. Assert BTC balance at VK(A) is 0 ( @@ -1070,7 +1071,7 @@ fn masp_txs_and_queries() -> Result<()> { "--node", validator_one_rpc, ], - Response::Ok("Transaction is valid"), + Response::Ok(TX_APPLIED_SUCCESS), ), ]; @@ -1093,7 +1094,7 @@ fn masp_txs_and_queries() -> Result<()> { let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args.clone())); match tx_result { - Response::Ok("Transaction is valid") => { + Response::Ok(TX_APPLIED_SUCCESS) => { assert!( captured.result.is_ok(), "{:?} failed with result {:?}.\n Unread output: {}", @@ -1105,7 +1106,7 @@ fn masp_txs_and_queries() -> Result<()> { node.assert_success(); } else { assert!( - captured.contains("Transaction is valid"), + captured.contains(TX_APPLIED_SUCCESS), "{:?} failed to contain needle 'Transaction is \ valid',\nGot output '{}'", tx_args, @@ -1163,9 +1164,6 @@ fn wrapper_fee_unshielding() -> Result<()> { let validator_one_rpc = "127.0.0.1:26567"; // Download the shielded pool parameters before starting node let _ = FsShieldedUtils::new(PathBuf::new()); - // Lengthen epoch to ensure that a transaction can be constructed and - // submitted within the same block. Necessary to ensure that conversion is - // not invalidated. let (mut node, _services) = setup::setup()?; _ = node.next_epoch(); @@ -1219,9 +1217,52 @@ fn wrapper_fee_unshielding() -> Result<()> { node.assert_success(); // 3. Invalid unshielding - // TODO: this test shall panic because of the panic in the sdk. Once the - // panics are removed from there, this test can be updated - let tx_run = run( + let tx_args = vec![ + "transfer", + "--source", + ALBERT, + "--target", + BERTHA, + "--token", + NAM, + "--amount", + "1", + "--gas-price", + "1000", + "--gas-spending-key", + B_SPENDING_KEY, + "--ledger-address", + validator_one_rpc, + // NOTE: Forcing the transaction will make the client produce a + // transfer without a masp object attached to it, so don't expect a + // failure from the masp vp here but from the check_fees function + "--force", + ]; + + let captured = + CapturedOutput::of(|| run(&node, Bin::Client, tx_args.clone())); + assert!( + captured.result.is_err(), + "{:?} unexpectedly succeeded", + tx_args + ); + + Ok(()) +} + +// Test that a masp unshield transaction can be succesfully executed even across +// an epoch boundary. +#[test] +fn cross_epoch_tx() -> Result<()> { + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "127.0.0.1:26567"; + // Download the shielded pool parameters before starting node + let _ = FsShieldedUtils::new(PathBuf::new()); + let (mut node, _services) = setup::setup()?; + _ = node.next_epoch(); + + // 1. Shield some tokens + run( &node, Bin::Client, vec![ @@ -1229,22 +1270,69 @@ fn wrapper_fee_unshielding() -> Result<()> { "--source", ALBERT, "--target", - BERTHA, + AA_PAYMENT_ADDRESS, "--token", NAM, "--amount", - "1", - "--gas-price", "1000", - "--gas-spending-key", - B_SPENDING_KEY, "--ledger-address", validator_one_rpc, - "--force", ], - ) - .is_err(); + )?; + node.assert_success(); + + // 2. Generate the tx in the current epoch + let tempdir = tempfile::tempdir().unwrap(); + run( + &node, + Bin::Client, + vec![ + "transfer", + "--source", + A_SPENDING_KEY, + "--target", + BERTHA, + "--token", + NAM, + "--amount", + "100", + "--gas-payer", + ALBERT_KEY, + "--output-folder-path", + tempdir.path().to_str().unwrap(), + "--dump-tx", + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); + + // Look for the only file in the temp dir + let tx_path = tempdir + .path() + .read_dir() + .unwrap() + .next() + .unwrap() + .unwrap() + .path(); + + // 3. Submit the unshielding in the following epoch + _ = node.next_epoch(); + run( + &node, + Bin::Client, + vec![ + "tx", + "--owner", + ALBERT_KEY, + "--tx-path", + tx_path.to_str().unwrap(), + "--ledger-address", + validator_one_rpc, + ], + )?; + node.assert_success(); - assert!(tx_run); Ok(()) } diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 1d9958a30a..ca55eba207 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -17,6 +17,8 @@ pub mod native_vp; pub mod storage; #[cfg(test)] mod storage_api; +#[cfg(test)] +pub mod strings; /// Using this import requires `tracing` and `tracing-subscriber` dependencies. /// Set env var `RUST_LOG=info` to see the logs from a test run (and diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index 82d52aa746..1bbe50f362 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -574,11 +574,11 @@ pub mod testing { use namada::proof_of_stake::epoched::DynEpochOffset; use namada::proof_of_stake::parameters::testing::arb_rate; use namada::proof_of_stake::parameters::PosParams; - use namada::proof_of_stake::types::{BondId, ValidatorState}; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ get_num_consensus_validators, read_pos_params, unbond_handle, - ADDRESS as POS_ADDRESS, }; + use namada::proof_of_stake::types::{BondId, ValidatorState}; + use namada::proof_of_stake::ADDRESS as POS_ADDRESS; use namada::types::key::common::PublicKey; use namada::types::key::RefTo; use namada::types::storage::Epoch; diff --git a/tests/src/strings.rs b/tests/src/strings.rs new file mode 100644 index 0000000000..bea5ce536d --- /dev/null +++ b/tests/src/strings.rs @@ -0,0 +1,25 @@ +//! Expected strings for integration and e2e tests. + +/// Namada ledger started +pub const LEDGER_STARTED: &str = "Namada ledger node started"; + +/// Namada ledger has shut down +pub const LEDGER_SHUTDOWN: &str = "Namada ledger node has shut down"; + +/// Ledger is running as a validator +pub const VALIDATOR_NODE: &str = "This node is a validator"; + +/// Ledger is not running as a validator +pub const NON_VALIDATOR_NODE: &str = "This node is not a validator"; + +/// Inner tx applied and accepted by VPs. +pub const TX_APPLIED_SUCCESS: &str = "Transaction was successfully applied"; + +/// Inner transaction rejected by VP(s). +pub const TX_REJECTED: &str = "Transaction was rejected by VPs"; + +/// Inner transaction failed in execution (no VPs ran). +pub const TX_FAILED: &str = "Transaction failed"; + +/// Wrapper transaction accepted. +pub const TX_ACCEPTED: &str = "Wrapper transaction accepted"; diff --git a/tx_prelude/src/ibc.rs b/tx_prelude/src/ibc.rs index dfd0430e5b..ebf97e7859 100644 --- a/tx_prelude/src/ibc.rs +++ b/tx_prelude/src/ibc.rs @@ -6,12 +6,13 @@ use std::rc::Rc; pub use namada_core::ledger::ibc::{ IbcActions, IbcCommonContext, IbcStorageContext, ProofSpec, TransferModule, }; +use namada_core::ledger::masp_utils; use namada_core::ledger::tx_env::TxEnv; use namada_core::types::address::{Address, InternalAddress}; pub use namada_core::types::ibc::{IbcEvent, IbcShieldedTransfer}; use namada_core::types::token::DenominatedAmount; -use crate::token::{burn, handle_masp_tx, mint, transfer}; +use crate::token::{burn, mint, transfer}; use crate::{Ctx, Error}; /// IBC actions to handle an IBC message @@ -52,7 +53,12 @@ impl IbcStorageContext for Ctx { &mut self, shielded: &IbcShieldedTransfer, ) -> Result<(), Error> { - handle_masp_tx(self, &shielded.transfer, &shielded.masp_tx) + masp_utils::handle_masp_tx( + self, + &shielded.transfer, + &shielded.masp_tx, + )?; + masp_utils::update_note_commitment_tree(self, &shielded.masp_tx) } fn mint_token( @@ -66,7 +72,7 @@ impl IbcStorageContext for Ctx { &Address::Internal(InternalAddress::Ibc), target, token, - amount.amount, + amount.to_amount(token, self)?, ) } @@ -76,7 +82,7 @@ impl IbcStorageContext for Ctx { token: &Address, amount: DenominatedAmount, ) -> Result<(), Error> { - burn(self, target, token, amount.amount) + burn(self, target, token, amount.to_amount(token, self)?) } fn log_string(&self, message: String) { diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index 29e9881488..fbb6e03a78 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -19,6 +19,7 @@ use std::marker::PhantomData; pub use borsh::{BorshDeserialize, BorshSerialize}; pub use borsh_ext; use borsh_ext::BorshSerializeExt; +use masp_primitives::transaction::Transaction; pub use namada_core::ledger::governance::storage as gov_storage; pub use namada_core::ledger::parameters::storage as parameters_storage; pub use namada_core::ledger::storage::types::encode; @@ -404,3 +405,20 @@ pub fn verify_signatures_of_pks( Ok(HostEnvResult::is_success(valid)) } + +/// Update the masp note commitment tree in storage with the new notes +pub fn update_masp_note_commitment_tree( + transaction: &Transaction, +) -> EnvResult { + // Serialize transaction + let transaction = transaction.serialize_to_vec(); + + let valid = unsafe { + namada_tx_update_masp_note_commitment_tree( + transaction.as_ptr() as _, + transaction.len() as _, + ) + }; + + Ok(HostEnvResult::is_success(valid)) +} diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs index 15250e760a..3b7883361d 100644 --- a/tx_prelude/src/proof_of_stake.rs +++ b/tx_prelude/src/proof_of_stake.rs @@ -5,15 +5,15 @@ use namada_core::types::key::common; use namada_core::types::transaction::pos::BecomeValidator; use namada_core::types::{key, token}; pub use namada_proof_of_stake::parameters::PosParams; -use namada_proof_of_stake::types::ValidatorMetaData; +use namada_proof_of_stake::storage::read_pos_params; +use namada_proof_of_stake::types::{ResultSlashing, ValidatorMetaData}; use namada_proof_of_stake::{ become_validator, bond_tokens, change_consensus_key, change_validator_commission_rate, change_validator_metadata, claim_reward_tokens, deactivate_validator, reactivate_validator, - read_pos_params, redelegate_tokens, unbond_tokens, unjail_validator, - withdraw_tokens, + redelegate_tokens, unbond_tokens, unjail_validator, withdraw_tokens, }; -pub use namada_proof_of_stake::{parameters, types, ResultSlashing}; +pub use namada_proof_of_stake::{parameters, types}; use super::*; diff --git a/tx_prelude/src/token.rs b/tx_prelude/src/token.rs index 009cccb36d..50cdef6c4e 100644 --- a/tx_prelude/src/token.rs +++ b/tx_prelude/src/token.rs @@ -1,6 +1,5 @@ -use masp_primitives::transaction::Transaction; -use namada_core::types::address::{Address, MASP}; -use namada_core::types::storage::KeySeg; +pub use namada_core::ledger::masp_utils; +use namada_core::types::address::Address; use namada_core::types::token; pub use namada_core::types::token::*; @@ -15,7 +14,8 @@ pub fn transfer( token: &Address, amount: DenominatedAmount, ) -> TxResult { - if amount.amount != Amount::default() && src != dest { + let amount = amount.to_amount(token, ctx)?; + if amount != Amount::default() && src != dest { let src_key = token::balance_key(token, src); let dest_key = token::balance_key(token, dest); let src_bal: Option = ctx.read(&src_key)?; @@ -23,49 +23,36 @@ pub fn transfer( log_string(format!("src {} has no balance", src_key)); unreachable!() }); - src_bal.spend(&amount.amount); + src_bal.spend(&amount); let mut dest_bal: Amount = ctx.read(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount.amount); + dest_bal.receive(&amount); ctx.write(&src_key, src_bal)?; ctx.write(&dest_key, dest_bal)?; } Ok(()) } -/// Handle a MASP transaction. -pub fn handle_masp_tx( +/// An undenominated token transfer that can be used in a transaction. +pub fn undenominated_transfer( ctx: &mut Ctx, - transfer: &Transfer, - shielded: &Transaction, + src: &Address, + dest: &Address, + token: &Address, + amount: Amount, ) -> TxResult { - let masp_addr = MASP; - ctx.insert_verifier(&masp_addr)?; - let head_tx_key = storage::Key::from(masp_addr.to_db_key()) - .push(&HEAD_TX_KEY.to_owned()) - .expect("Cannot obtain a storage key"); - let current_tx_idx: u64 = - ctx.read(&head_tx_key).unwrap_or(None).unwrap_or(0); - let current_tx_key = storage::Key::from(masp_addr.to_db_key()) - .push(&(TX_KEY_PREFIX.to_owned() + ¤t_tx_idx.to_string())) - .expect("Cannot obtain a storage key"); - // Save the Transfer object and its location within the blockchain - // so that clients do not have to separately look these - // up - let record: (Epoch, BlockHeight, TxIndex, Transfer, Transaction) = ( - ctx.get_block_epoch()?, - ctx.get_block_height()?, - ctx.get_tx_index()?, - transfer.clone(), - shielded.clone(), - ); - ctx.write(¤t_tx_key, record)?; - ctx.write(&head_tx_key, current_tx_idx + 1)?; - // If storage key has been supplied, then pin this transaction to it - if let Some(key) = &transfer.key { - let pin_key = storage::Key::from(masp_addr.to_db_key()) - .push(&(PIN_KEY_PREFIX.to_owned() + key)) - .expect("Cannot obtain a storage key"); - ctx.write(&pin_key, current_tx_idx)?; + if amount != Amount::default() && src != dest { + let src_key = token::balance_key(token, src); + let dest_key = token::balance_key(token, dest); + let src_bal: Option = ctx.read(&src_key)?; + let mut src_bal = src_bal.unwrap_or_else(|| { + log_string(format!("src {} has no balance", src_key)); + unreachable!() + }); + src_bal.spend(&amount); + let mut dest_bal: Amount = ctx.read(&dest_key)?.unwrap_or_default(); + dest_bal.receive(&amount); + ctx.write(&src_key, src_bal)?; + ctx.write(&dest_key, dest_bal)?; } Ok(()) } diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index 3b2c5c4dcc..f46ad44b94 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -133,6 +133,11 @@ pub mod tx { max_signatures_len: u64, ) -> i64; + /// Update the masp note commitment tree with the new notes + pub fn namada_tx_update_masp_note_commitment_tree( + transaction_ptr: u64, + transaction_len: u64, + ) -> i64; } } diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index ee23ea8c2f..58825e1a3f 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3304,7 +3304,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.28.2" +version = "0.29.0" dependencies = [ "async-trait", "bimap", @@ -3366,7 +3366,7 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.28.2" +version = "0.29.0" dependencies = [ "ark-bls12-381", "ark-serialize", @@ -3390,6 +3390,7 @@ dependencies = [ "k256", "masp_primitives", "namada_macros", + "num-derive", "num-integer", "num-rational 0.4.1", "num-traits", @@ -3418,7 +3419,7 @@ dependencies = [ [[package]] name = "namada_ethereum_bridge" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -3439,7 +3440,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.28.2" +version = "0.29.0" dependencies = [ "proc-macro2", "quote", @@ -3448,7 +3449,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "data-encoding", @@ -3463,7 +3464,7 @@ dependencies = [ [[package]] name = "namada_sdk" -version = "0.28.2" +version = "0.29.0" dependencies = [ "async-trait", "bimap", @@ -3487,6 +3488,7 @@ dependencies = [ "owo-colors", "parse_duration", "paste", + "proptest", "prost 0.12.3", "rand 0.8.5", "rand_core 0.6.4", @@ -3508,7 +3510,7 @@ dependencies = [ [[package]] name = "namada_test_utils" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "namada_core", @@ -3517,7 +3519,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.28.2" +version = "0.29.0" dependencies = [ "async-trait", "chrono", @@ -3551,7 +3553,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -3566,7 +3568,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "masp_primitives", @@ -3575,7 +3577,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -3589,7 +3591,7 @@ dependencies = [ [[package]] name = "namada_wasm" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "getrandom 0.2.11", @@ -5963,7 +5965,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tx_template" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "getrandom 0.2.11", @@ -6118,7 +6120,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vp_template" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "getrandom 0.2.11", diff --git a/wasm/checksums.json b/wasm/checksums.json index 7842720d72..72d7b70f48 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,26 +1,26 @@ { - "tx_become_validator.wasm": "tx_become_validator.0aab9a45dfd620c83014fe48ed088d03d3929044fec1ab94ffe4bd80b7adb028.wasm", - "tx_bond.wasm": "tx_bond.84cf7978614b128549d80617ee406ecb88fc5b27c8de0e607217840c57fb7358.wasm", - "tx_bridge_pool.wasm": "tx_bridge_pool.a7f98a066119e3b72e1a12139ed0168eaa558c12df995e8021eacfad2aed9ef9.wasm", - "tx_change_consensus_key.wasm": "tx_change_consensus_key.ea0da2b38e8687d25584b05f3023e40601db8d5cb03307f7d7fc9fbc971e3e11.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.b716719c36f9da4795264286f3326f135cb7f484ec76eee36cff8633e54ddc44.wasm", - "tx_change_validator_metadata.wasm": "tx_change_validator_metadata.42fcbf3237ce59474354978d9d83bcaaca4e137a664cadcca8a9d769466be6d5.wasm", - "tx_claim_rewards.wasm": "tx_claim_rewards.3ab287b087fc6c33572d3ac2ce5719cc6ada99dc8e485f3875a0dfce578e2d8b.wasm", - "tx_deactivate_validator.wasm": "tx_deactivate_validator.33881a917eff3273ea4e2c10697693a1fae74b4bbc62cb358d8886d88bb0dcdf.wasm", - "tx_ibc.wasm": "tx_ibc.fb438a95a5eb2ce0cb9d07470cc51d8464b4d438b24eb8509e6343317f03ac45.wasm", - "tx_init_account.wasm": "tx_init_account.3688651fdc312503d62ceeb0e77e4b64c91e5a4e8ba824a5bf9fb1eb63024864.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.40225970522e78556a4bb28b2afdf69c62513424940c00becf1bb7907a7aa97f.wasm", - "tx_reactivate_validator.wasm": "tx_reactivate_validator.b1bfabf2c2c9e38e1019d16b5a58c2dbfd053f675a85b6ddacddb00afab06e6c.wasm", - "tx_redelegate.wasm": "tx_redelegate.db7855853cd8f1c1e021757b410c9dc83924a1a69eef733681a27885ed437dcf.wasm", - "tx_resign_steward.wasm": "tx_resign_steward.aaff0cf2e539672bcf220a4ef1644a0cf2e485641c1b558a1efd3509510272d4.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.4e4d2d661cb09fe4c34881d513f32f171ccd5506af666f90d4a34af1b64ef774.wasm", - "tx_transfer.wasm": "tx_transfer.f0a7521e22d8b516b32535c07b4149188cfe79c8000f46a1ae425f91b6bde271.wasm", - "tx_unbond.wasm": "tx_unbond.4510bad25e0a74624100f21fd23016a04074eb116ed4f26d11ca652796b292e0.wasm", - "tx_unjail_validator.wasm": "tx_unjail_validator.c29354e345622daf644c63c9f436e5fe31fb4ebc621ecf51fd98e906cab10113.wasm", - "tx_update_account.wasm": "tx_update_account.16b120553164ea2ac3fa254839d33bf2c6d58097efd64b3c1f6597f1e8946b98.wasm", - "tx_update_steward_commission.wasm": "tx_update_steward_commission.0ef3670dbe76cc87910102a7dddd80a902252df5b89955a1e339aa997e5d7d0e.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.547dd82816983658b210022188d486802fc89a940bca6f4b95a9d10ee7c176bb.wasm", - "tx_withdraw.wasm": "tx_withdraw.5115513d1070a9ba486bb48920fc33bbc2c83ba48541601d4b1af1f9b3420310.wasm", - "vp_implicit.wasm": "vp_implicit.e8cbb1fad44a2717a0ac11445fa5a6ac781b4e3356ffd86c3bb2274ad37da34c.wasm", - "vp_user.wasm": "vp_user.361e7e57cbe10e4be123f4cce46b71d26725132393669e20986c914ad4b0054d.wasm" -} + "tx_become_validator.wasm": "tx_become_validator.7ca7c43374065e64a71e12f7e342fc87fadbbe2e374b8d300ce56d3980163bea.wasm", + "tx_bond.wasm": "tx_bond.e2b37e862aaa5c513b15490f534fcf4b09e9f3d0ba5f6863541aa699f59ddd7f.wasm", + "tx_bridge_pool.wasm": "tx_bridge_pool.6f16cf22f25492075e59a9639b30748f695983fd47f25ef108f8ba2929db5f38.wasm", + "tx_change_consensus_key.wasm": "tx_change_consensus_key.3a313d3dd9a9cae60c971978529ff7779d20873a2b288b4ef278dbf4fec53b23.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.52d369d876e4cec95739333dd4791e6ccd49fe61456655a80eceda77560d11a6.wasm", + "tx_change_validator_metadata.wasm": "tx_change_validator_metadata.0c1bf7b68cc8c0a34547474ca67ccab74c56fb1d02fe8f113eaa9c11f45fc95d.wasm", + "tx_claim_rewards.wasm": "tx_claim_rewards.b19cbf08390b2e89b5e744077c10d356929f4f90df62380ea5404f4d94d2ae54.wasm", + "tx_deactivate_validator.wasm": "tx_deactivate_validator.e722934d353c7322ad500b8c285ecdf91f1e46ca68d4b3c3a248a9eed4c6dc53.wasm", + "tx_ibc.wasm": "tx_ibc.3185d08d2857358c2654f6fdb64b63b6f07e6b19a89dcd4586ff2aa29c3f714e.wasm", + "tx_init_account.wasm": "tx_init_account.a9edd915dc693d5534dd6aa618465bada2423a5bad5a4b73bc41aa205c8199cf.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.85a2bd50f0625c8ad17e0781f2e6e072e48df0b03bc2c3660ae56df61c54cb98.wasm", + "tx_reactivate_validator.wasm": "tx_reactivate_validator.0ad3539a30f594d542575e3fbabd32c0f38f929f75e7b6757a067354b7e1d3f0.wasm", + "tx_redelegate.wasm": "tx_redelegate.52a3f7e69416bebad63f92039f3d3966513d4ba4823ba9323cfef0ff73e88a39.wasm", + "tx_resign_steward.wasm": "tx_resign_steward.94b56e5cc78ef38095cf25acc25a0414f0d0669d2a33f00a2072a4b2a1dd1d3d.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.e31afa2ca41469cbc13616d0926cb373cc230719cf3325d8599e6026064c5592.wasm", + "tx_transfer.wasm": "tx_transfer.40493b61f19b05ef53286efbf5f67f3f9f016a90e155b51b33ac3505fe6bb7ce.wasm", + "tx_unbond.wasm": "tx_unbond.5b44922da37f2a7ed6f16fab3424cd4d81a5d2657b1d4c6073d2db7f8d33407f.wasm", + "tx_unjail_validator.wasm": "tx_unjail_validator.9ec0b69fd155b962b2ebc616adc9111e1675410319ec456ecdbf280155257d2d.wasm", + "tx_update_account.wasm": "tx_update_account.29730c004185db0fb6e40defa61295f78115d103cda6e838414a0e4bb8ba8b53.wasm", + "tx_update_steward_commission.wasm": "tx_update_steward_commission.f275fb0ced6ed3fb65b84383b6102cca43d1b8a4f5d8b99645c3739671e01f60.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.ef4091c2e075d1495206bb50fe1c8742fa9f1ecfeec864272b6d774161fe2423.wasm", + "tx_withdraw.wasm": "tx_withdraw.8b84400601c11428db61e6e923d72102a111eda5f7b21e5390a858e1fe890b8d.wasm", + "vp_implicit.wasm": "vp_implicit.af7c0590247d504fc88a4f3a20586798c4617216646dd186e3320f569a475a4d.wasm", + "vp_user.wasm": "vp_user.df0cd830e627aaa94746617a19e33a91f5ff2ae5f23d60aeaf542dd30ce1d82d.wasm" +} \ No newline at end of file diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index 6b8d07fa32..382c1c2093 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "tx_template" resolver = "2" -version = "0.28.2" +version = "0.29.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index dc2872896d..3f7fd9abaa 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "vp_template" resolver = "2" -version = "0.28.2" +version = "0.29.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 2eb0de8538..73a713c60a 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm" resolver = "2" -version = "0.28.2" +version = "0.29.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 419c1e00a0..733e021c59 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -21,11 +21,11 @@ mod tests { use std::collections::BTreeSet; use namada::ledger::pos::{OwnedPosParams, PosVP}; - use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, }; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada_tests::log::test; diff --git a/wasm/wasm_source/src/tx_bridge_pool.rs b/wasm/wasm_source/src/tx_bridge_pool.rs index 321005d23e..84de025fd4 100644 --- a/wasm/wasm_source/src/tx_bridge_pool.rs +++ b/wasm/wasm_source/src/tx_bridge_pool.rs @@ -21,12 +21,12 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { amount, ref payer, } = transfer.gas_fee; - token::transfer( + token::undenominated_transfer( ctx, payer, &bridge_pool::BRIDGE_POOL_ADDRESS, fee_token_addr, - amount.native_denominated(), + amount, )?; log_string("Token transfer succeeded."); let TransferToEthereum { @@ -38,22 +38,22 @@ fn apply_tx(ctx: &mut Ctx, signed: Tx) -> TxResult { // if minting wNam, escrow the correct amount if asset == native_erc20_address(ctx)? { let nam_addr = ctx.get_native_token()?; - token::transfer( + token::undenominated_transfer( ctx, sender, ð_bridge::ADDRESS, &nam_addr, - amount.native_denominated(), + amount, )?; } else { // Otherwise we escrow ERC20 tokens. let token = transfer.token_address(); - token::transfer( + token::undenominated_transfer( ctx, sender, &bridge_pool::BRIDGE_POOL_ADDRESS, &token, - amount.native_denominated(), + amount, )?; } log_string("Escrow succeeded"); diff --git a/wasm/wasm_source/src/tx_change_consensus_key.rs b/wasm/wasm_source/src/tx_change_consensus_key.rs index 9f06745c9f..9d82f4d855 100644 --- a/wasm/wasm_source/src/tx_change_consensus_key.rs +++ b/wasm/wasm_source/src/tx_change_consensus_key.rs @@ -12,5 +12,15 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { consensus_key, } = transaction::pos::ConsensusKeyChange::try_from_slice(&data[..]) .wrap_err("failed to decode Dec value")?; + + // Check that the tx has been signed with the new consensus key + if !matches!( + verify_signatures_of_pks(ctx, &signed, vec![consensus_key.clone()]), + Ok(true) + ) { + debug_log!("Consensus key ownership signature verification failed"); + panic!() + } + ctx.change_validator_consensus_key(&validator, &consensus_key) } diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index 33433b59b3..4569c4d603 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -23,8 +23,8 @@ mod tests { use std::cmp; use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::storage::validator_commission_rate_handle; use namada::proof_of_stake::types::GenesisValidator; - use namada::proof_of_stake::validator_commission_rate_handle; use namada::types::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::types::storage::Epoch; use namada_tests::log::test; diff --git a/wasm/wasm_source/src/tx_redelegate.rs b/wasm/wasm_source/src/tx_redelegate.rs index 82f63cd9e4..a02970a0c1 100644 --- a/wasm/wasm_source/src/tx_redelegate.rs +++ b/wasm/wasm_source/src/tx_redelegate.rs @@ -25,11 +25,11 @@ mod tests { use std::collections::BTreeSet; use namada::ledger::pos::{OwnedPosParams, PosVP}; - use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, }; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada_tests::log::test; diff --git a/wasm/wasm_source/src/tx_transfer.rs b/wasm/wasm_source/src/tx_transfer.rs index e92750e016..e521be4f75 100644 --- a/wasm/wasm_source/src/tx_transfer.rs +++ b/wasm/wasm_source/src/tx_transfer.rs @@ -38,7 +38,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { }) .transpose()?; if let Some(shielded) = shielded { - token::handle_masp_tx(ctx, &transfer, &shielded)?; + token::masp_utils::handle_masp_tx(ctx, &transfer, &shielded)?; + update_masp_note_commitment_tree(&shielded)?; } Ok(()) } diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 3747f91d33..f7f11b14bc 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -28,11 +28,11 @@ mod tests { use std::collections::BTreeSet; use namada::ledger::pos::{OwnedPosParams, PosVP}; - use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; - use namada::proof_of_stake::{ + use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, }; + use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada_tests::log::test; diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index 1fb10dc588..f8984b1bef 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -24,8 +24,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { use namada::ledger::pos::{OwnedPosParams, PosVP}; + use namada::proof_of_stake::storage::unbond_handle; use namada::proof_of_stake::types::GenesisValidator; - use namada::proof_of_stake::unbond_handle; use namada::types::dec::Dec; use namada::types::storage::Epoch; use namada_tests::log::test; diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 95d1401c54..8f36c31d3a 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -11,18 +11,24 @@ //! //! Any other storage key changes are allowed only with a valid signature. -use namada_vp_prelude::storage::KeySeg; +use core::ops::Deref; + use namada_vp_prelude::*; use once_cell::unsync::Lazy; enum KeyType<'a> { /// Public key - written once revealed Pk(&'a Address), - Token { + TokenBalance { owner: &'a Address, }, + TokenMinted, + TokenMinter(&'a Address), PoS, + Masp, + PgfSteward(&'a Address), GovernanceVote(&'a Address), + Ibc, Unknown, } @@ -31,9 +37,15 @@ impl<'a> From<&'a storage::Key> for KeyType<'a> { if let Some(address) = key::is_pks_key(key) { Self::Pk(address) } else if let Some([_, owner]) = token::is_any_token_balance_key(key) { - Self::Token { owner } - } else if proof_of_stake::storage::is_pos_key(key) { + Self::TokenBalance { owner } + } else if token::is_any_minted_balance_key(key).is_some() { + Self::TokenMinted + } else if let Some(minter) = token::is_any_minter_key(key) { + Self::TokenMinter(minter) + } else if proof_of_stake::storage_key::is_pos_key(key) { Self::PoS + } else if let Some(address) = pgf_storage::keys::is_stewards_key(key) { + Self::PgfSteward(address) } else if gov_storage::keys::is_vote_key(key) { let voter_address = gov_storage::keys::get_voter_address(key); if let Some(address) = voter_address { @@ -41,6 +53,10 @@ impl<'a> From<&'a storage::Key> for KeyType<'a> { } else { Self::Unknown } + } else if token::is_masp_key(key) { + Self::Masp + } else if ibc::is_ibc_key(key) { + Self::Ibc } else { Self::Unknown } @@ -98,7 +114,7 @@ fn validate_tx( } true } - KeyType::Token { owner, .. } => { + KeyType::TokenBalance { owner, .. } => { if owner == &addr { let pre: token::Amount = ctx.read_pre(key)?.unwrap_or_default(); @@ -130,52 +146,19 @@ fn validate_tx( true } } - KeyType::PoS => { - // Allow the account to be used in PoS - let bond_id = proof_of_stake::storage::is_bond_key(key) - .map(|(bond_id, _)| bond_id) - .or_else(|| { - proof_of_stake::storage::is_unbond_key(key) - .map(|(bond_id, _, _)| bond_id) - }); - let valid = match bond_id { - Some(bond_id) => { - // Bonds and unbonds changes for this address - // must be signed - bond_id.source != addr || *valid_sig - } - None => { - // Any other PoS changes are allowed without signature - true - } - }; - debug_log!( - "PoS key {} {}", - key, - if valid { "accepted" } else { "rejected" } - ); - valid - } - KeyType::GovernanceVote(voter) => { - if voter == &addr { - *valid_sig - } else { - true - } - } + KeyType::TokenMinted => verifiers.contains(&address::MULTITOKEN), + KeyType::TokenMinter(minter) => minter != &addr || *valid_sig, + KeyType::PoS => validate_pos_changes(ctx, &addr, key, &valid_sig)?, + KeyType::PgfSteward(address) => address != &addr || *valid_sig, + KeyType::GovernanceVote(voter) => voter != &addr || *valid_sig, + KeyType::Masp | KeyType::Ibc => true, KeyType::Unknown => { - if key.segments.get(0) == Some(&addr.to_db_key()) { - // Unknown changes to this address space require a valid - // signature - *valid_sig - } else { - // Unknown changes anywhere else are permitted - true - } + // Unknown changes require a valid signature + *valid_sig } }; if !is_valid { - debug_log!("key {} modification failed vp", key); + log_string(format!("key {} modification failed vp_implicit", key)); return reject(); } } @@ -183,6 +166,124 @@ fn validate_tx( accept() } +fn validate_pos_changes( + ctx: &Ctx, + owner: &Address, + key: &storage::Key, + valid_sig: &impl Deref, +) -> VpResult { + use proof_of_stake::{storage, storage_key}; + + // Bond or unbond + let is_valid_bond_or_unbond_change = || { + let bond_id = storage_key::is_bond_key(key) + .map(|(bond_id, _)| bond_id) + .or_else(|| storage_key::is_bond_epoched_meta_key(key)) + .or_else(|| { + storage_key::is_unbond_key(key).map(|(bond_id, _, _)| bond_id) + }); + if let Some(bond_id) = bond_id { + // Bonds and unbonds changes for this address must be signed + return &bond_id.source != owner || **valid_sig; + }; + // Unknown changes are not allowed + false + }; + + // Changes in validator state + let is_valid_state_change = || { + let state_change = storage_key::is_validator_state_key(key); + let is_valid_state = + match state_change { + Some((address, epoch)) => { + let params_pre = storage::read_pos_params(&ctx.pre())?; + let state_pre = storage::validator_state_handle(address) + .get(&ctx.pre(), epoch, ¶ms_pre)?; + + let params_post = storage::read_pos_params(&ctx.post())?; + let state_post = storage::validator_state_handle(address) + .get(&ctx.post(), epoch, ¶ms_post)?; + + match (state_pre, state_post) { + (Some(pre), Some(post)) => { + use proof_of_stake::types::ValidatorState::*; + + // Bonding and unbonding may affect validator sets + if matches!( + pre, + Consensus | BelowCapacity | BelowThreshold + ) && matches!( + post, + Consensus | BelowCapacity | BelowThreshold + ) { + true + } else { + // Unknown state changes are not allowed + false + } + } + (Some(_pre), None) => { + // Clearing of old epoched data + true + } + _ => false, + } + } + None => false, + }; + + VpResult::Ok( + is_valid_state + || storage_key::is_validator_state_epoched_meta_key(key) + || storage_key::is_consensus_validator_set_key(key) + || storage_key::is_below_capacity_validator_set_key(key), + ) + }; + + let is_valid_reward_claim = || { + if let Some(bond_id) = + storage_key::is_last_pos_reward_claim_epoch_key(key) + { + // Claims for this address must be signed + return &bond_id.source != owner || **valid_sig; + } + if let Some(bond_id) = storage_key::is_rewards_counter_key(key) { + // Redelegations auto-claim rewards + return &bond_id.source != owner || **valid_sig; + } + + false + }; + + let is_valid_redelegation = || { + if storage_key::is_validator_redelegations_key(key) { + return true; + } + if let Some(delegator) = + storage_key::is_delegator_redelegations_key(key) + { + // Redelegations for this address must be signed + return delegator != owner || **valid_sig; + } + if let Some(bond_id) = storage_key::is_rewards_counter_key(key) { + // Redelegations auto-claim rewards + return &bond_id.source != owner || **valid_sig; + } + false + }; + + Ok(is_valid_bond_or_unbond_change() + || storage_key::is_total_deltas_key(key) + || storage_key::is_validator_deltas_key(key) + || storage_key::is_validator_total_bond_or_unbond_key(key) + || storage_key::is_validator_set_positions_key(key) + || storage_key::is_total_consensus_stake_key(key) + || is_valid_state_change()? + || is_valid_reward_claim() + || is_valid_redelegation() + || **valid_sig) +} + #[cfg(test)] mod tests { // Use this as `#[test]` annotation to enable logging @@ -349,10 +450,10 @@ mod tests { // able to transfer from it tx_env.credit_tokens(&source, &token, amount); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction @@ -584,10 +685,10 @@ mod tests { ) .unwrap(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { @@ -645,10 +746,10 @@ mod tests { ) .unwrap(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction @@ -716,10 +817,10 @@ mod tests { ) .unwrap(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index d8144d441c..b5c8b3456e 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -163,10 +163,10 @@ mod tests { // able to transfer from it tx_env.credit_tokens(&source, &token, amount); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { @@ -325,10 +325,10 @@ mod tests { // be able to transfer from it tx_env.credit_tokens(&vp_owner, &token, amount); tx_env.commit_genesis(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into() - }; + token::NATIVE_MAX_DECIMAL_PLACES.into() + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { @@ -380,10 +380,10 @@ mod tests { let solution = challenge.solve(); let solution_bytes = solution.serialize_to_vec(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index 390bc389fb..29c861d04d 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -11,26 +11,47 @@ //! //! Any other storage key changes are allowed only with a valid signature. -use namada_vp_prelude::storage::KeySeg; +use core::ops::Deref; + use namada_vp_prelude::*; use once_cell::unsync::Lazy; -use proof_of_stake::types::ValidatorState; +use proof_of_stake::storage::{read_pos_params, validator_state_handle}; +use proof_of_stake::storage_key::{ + is_below_capacity_validator_set_key, is_bond_epoched_meta_key, is_bond_key, + is_consensus_keys_key, is_consensus_validator_set_key, + is_delegator_redelegations_key, is_last_pos_reward_claim_epoch_key, + is_pos_key, is_rewards_counter_key, is_total_consensus_stake_key, + is_total_deltas_key, is_unbond_key, is_validator_address_raw_hash_key, + is_validator_addresses_key, is_validator_commission_rate_key, + is_validator_deltas_key, is_validator_eth_cold_key_key, + is_validator_eth_hot_key_key, is_validator_max_commission_rate_change_key, + is_validator_metadata_key, is_validator_redelegations_key, + is_validator_set_positions_key, is_validator_state_epoched_meta_key, + is_validator_state_key, is_validator_total_bond_or_unbond_key, +}; enum KeyType<'a> { - Token { owner: &'a Address }, + TokenBalance { owner: &'a Address }, + TokenMinted, + TokenMinter(&'a Address), PoS, Vp(&'a Address), Masp, - PgfStward(&'a Address), + PgfSteward(&'a Address), GovernanceVote(&'a Address), + Ibc, Unknown, } impl<'a> From<&'a storage::Key> for KeyType<'a> { fn from(key: &'a storage::Key) -> KeyType<'a> { if let Some([_, owner]) = token::is_any_token_balance_key(key) { - Self::Token { owner } - } else if proof_of_stake::storage::is_pos_key(key) { + Self::TokenBalance { owner } + } else if token::is_any_minted_balance_key(key).is_some() { + Self::TokenMinted + } else if let Some(minter) = token::is_any_minter_key(key) { + Self::TokenMinter(minter) + } else if is_pos_key(key) { Self::PoS } else if gov_storage::keys::is_vote_key(key) { let voter_address = gov_storage::keys::get_voter_address(key); @@ -40,11 +61,13 @@ impl<'a> From<&'a storage::Key> for KeyType<'a> { Self::Unknown } } else if let Some(address) = pgf_storage::keys::is_stewards_key(key) { - Self::PgfStward(address) + Self::PgfSteward(address) } else if let Some(address) = key.is_validity_predicate() { Self::Vp(address) } else if token::is_masp_key(key) { Self::Masp + } else if ibc::is_ibc_key(key) { + Self::Ibc } else { Self::Unknown } @@ -77,7 +100,7 @@ fn validate_tx( for key in keys_changed.iter() { let key_type: KeyType = key.into(); let is_valid = match key_type { - KeyType::Token { owner, .. } => { + KeyType::TokenBalance { owner, .. } => { if owner == &addr { let pre: token::Amount = ctx.read_pre(key)?.unwrap_or_default(); @@ -107,119 +130,11 @@ fn validate_tx( true } } - KeyType::PoS => { - // Bond or unbond - let bond_id = proof_of_stake::storage::is_bond_key(key) - .map(|(bond_id, _)| bond_id) - .or_else(|| { - proof_of_stake::storage::is_unbond_key(key) - .map(|(bond_id, _, _)| bond_id) - }); - let valid_bond_or_unbond_change = match bond_id { - Some(bond_id) => { - // Bonds and unbonds changes for this address - // must be signed - bond_id.source != addr || *valid_sig - } - None => { - // Any other PoS changes are allowed without signature - true - } - }; - // Commission rate changes must be signed by the validator - let comm = - proof_of_stake::storage::is_validator_commission_rate_key( - key, - ); - let valid_commission_rate_change = match comm { - Some((validator, _epoch)) => { - *validator == addr && *valid_sig - } - None => true, - }; - // Metadata changes must be signed by the validator whose - // metadata is manipulated - let metadata = - proof_of_stake::storage::is_validator_metadata_key(key); - let valid_metadata_change = match metadata { - Some(address) => *address == addr && *valid_sig, - None => true, - }; - - // Changes due to unjailing, deactivating, and reactivating are - // marked by changes in validator state - let state_change = - proof_of_stake::storage::is_validator_state_key(key); - let valid_state_change = match state_change { - Some((address, epoch)) => { - let params_pre = - proof_of_stake::read_pos_params(&ctx.pre())?; - let state_pre = - proof_of_stake::validator_state_handle(address) - .get(&ctx.pre(), epoch, ¶ms_pre)?; - - let params_post = - proof_of_stake::read_pos_params(&ctx.post())?; - let state_post = - proof_of_stake::validator_state_handle(address) - .get(&ctx.post(), epoch, ¶ms_post)?; - - match (state_pre, state_post) { - (Some(pre), Some(post)) => { - if - // Deactivation case - (matches!( - pre, - ValidatorState::Consensus - | ValidatorState::BelowCapacity - | ValidatorState::BelowThreshold - ) && post == ValidatorState::Inactive) - // Reactivation case - || pre == ValidatorState::Inactive - && post != ValidatorState::Inactive - // Unjail case - || pre == ValidatorState::Jailed - && matches!( - post, - ValidatorState::Consensus - | ValidatorState::BelowCapacity - | ValidatorState::BelowThreshold - ) - { - *address == addr && *valid_sig - } else { - true - } - } - (None, Some(_post)) => { - // Becoming a validator must be authorized - *valid_sig - } - _ => true, - } - } - None => true, - }; - - valid_bond_or_unbond_change - && valid_commission_rate_change - && valid_state_change - && valid_metadata_change - } - KeyType::GovernanceVote(voter) => { - if voter == &addr { - *valid_sig - } else { - true - } - } - KeyType::PgfStward(address) => { - if address == &addr { - *valid_sig - } else { - true - } - } + KeyType::TokenMinted => verifiers.contains(&address::MULTITOKEN), + KeyType::TokenMinter(minter) => minter != &addr || *valid_sig, + KeyType::PoS => validate_pos_changes(ctx, &addr, key, &valid_sig)?, + KeyType::PgfSteward(address) => address != &addr || *valid_sig, + KeyType::GovernanceVote(voter) => voter != &addr || *valid_sig, KeyType::Vp(owner) => { let has_post: bool = ctx.has_key_post(key)?; if owner == &addr { @@ -235,20 +150,14 @@ fn validate_tx( is_vp_whitelisted(ctx, &vp_hash)? } } - KeyType::Masp => true, + KeyType::Masp | KeyType::Ibc => true, KeyType::Unknown => { - if key.segments.get(0) == Some(&addr.to_db_key()) { - // Unknown changes to this address space require a valid - // signature - *valid_sig - } else { - // Unknown changes anywhere else are permitted - true - } + // Unknown changes require a valid signature + *valid_sig } }; if !is_valid { - debug_log!("key {} modification failed vp", key); + log_string(format!("key {} modification failed vp_user", key)); return reject(); } } @@ -256,6 +165,178 @@ fn validate_tx( accept() } +fn validate_pos_changes( + ctx: &Ctx, + owner: &Address, + key: &storage::Key, + valid_sig: &impl Deref, +) -> VpResult { + // Bond or unbond + let is_valid_bond_or_unbond_change = || { + let bond_id = is_bond_key(key) + .map(|(bond_id, _)| bond_id) + .or_else(|| is_bond_epoched_meta_key(key)) + .or_else(|| is_unbond_key(key).map(|(bond_id, _, _)| bond_id)); + if let Some(bond_id) = bond_id { + // Bonds and unbonds changes for this address must be signed + return &bond_id.source != owner || **valid_sig; + }; + // Unknown changes are not allowed + false + }; + + // Commission rate changes must be signed by the validator + let is_valid_commission_rate_change = || { + if let Some(validator) = is_validator_commission_rate_key(key) { + return validator == owner && **valid_sig; + } + false + }; + + // Metadata changes must be signed by the validator whose + // metadata is manipulated + let is_valid_metadata_change = || { + let metadata = is_validator_metadata_key(key); + match metadata { + Some(address) => address == owner && **valid_sig, + None => false, + } + }; + + // Changes in validator state + let is_valid_state_change = || { + let state_change = is_validator_state_key(key); + let is_valid_state = match state_change { + Some((address, epoch)) => { + let params_pre = read_pos_params(&ctx.pre())?; + let state_pre = validator_state_handle(address).get( + &ctx.pre(), + epoch, + ¶ms_pre, + )?; + + let params_post = read_pos_params(&ctx.post())?; + let state_post = validator_state_handle(address).get( + &ctx.post(), + epoch, + ¶ms_post, + )?; + + match (state_pre, state_post) { + (Some(pre), Some(post)) => { + use proof_of_stake::types::ValidatorState::*; + + if ( + // Deactivation case + matches!( + pre, + Consensus | BelowCapacity | BelowThreshold + ) && post == Inactive) + // Reactivation case + || pre == Inactive && post != Inactive + // Unjail case + || pre == Jailed + && matches!( + post, + Consensus + | BelowCapacity + | BelowThreshold + ) + { + address == owner && **valid_sig + } else if + // Bonding and unbonding may affect validator sets + matches!( + pre, + Consensus | BelowCapacity | BelowThreshold + ) && matches!( + post, + Consensus | BelowCapacity | BelowThreshold + ) { + true + } else { + // Unknown state changes are not allowed + false + } + } + (None, Some(_post)) => { + // Becoming a validator must be authorized + address == owner && **valid_sig + } + (Some(_pre), None) => { + // Clearing of old epoched data + true + } + _ => false, + } + } + None => false, + }; + + VpResult::Ok( + is_valid_state + || is_validator_state_epoched_meta_key(key) + || is_consensus_validator_set_key(key) + || is_below_capacity_validator_set_key(key), + ) + }; + + let is_valid_reward_claim = || { + if let Some(bond_id) = is_last_pos_reward_claim_epoch_key(key) { + // Claims for this address must be signed + return &bond_id.source != owner || **valid_sig; + } + if let Some(bond_id) = is_rewards_counter_key(key) { + // Claims for this address must be signed + return &bond_id.source != owner || **valid_sig; + } + false + }; + + let is_valid_redelegation = || { + if is_validator_redelegations_key(key) { + return true; + } + if let Some(delegator) = is_delegator_redelegations_key(key) { + // Redelegations for this address must be signed + return delegator != owner || **valid_sig; + } + if let Some(bond_id) = is_rewards_counter_key(key) { + // Redelegations auto-claim rewards + return &bond_id.source != owner || **valid_sig; + } + false + }; + + let is_valid_become_validator = || { + if is_validator_addresses_key(key) + || is_consensus_keys_key(key) + || is_validator_eth_cold_key_key(key).is_some() + || is_validator_eth_hot_key_key(key).is_some() + || is_validator_max_commission_rate_change_key(key).is_some() + || is_validator_address_raw_hash_key(key).is_some() + { + // A signature is required to become validator + return **valid_sig; + } + false + }; + + Ok(is_valid_bond_or_unbond_change() + || is_total_deltas_key(key) + || is_validator_deltas_key(key) + || is_validator_total_bond_or_unbond_key(key) + || is_validator_set_positions_key(key) + || is_total_consensus_stake_key(key) + || is_valid_state_change()? + || is_valid_reward_claim() + || is_valid_redelegation() + || is_valid_commission_rate_change() + || is_valid_metadata_change() + || is_valid_become_validator() + || **valid_sig) +} + #[cfg(test)] mod tests { use address::testing::arb_non_internal_address; @@ -321,10 +402,10 @@ mod tests { ) .unwrap(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction @@ -376,10 +457,10 @@ mod tests { // be able to transfer from it tx_env.credit_tokens(&vp_owner, &token, amount); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction @@ -434,10 +515,10 @@ mod tests { ) .unwrap(); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { @@ -1042,10 +1123,10 @@ mod tests { // be able to transfer from it tx_env.credit_tokens(&source, &token, amount); - let amount = token::DenominatedAmount { + let amount = token::DenominatedAmount::new( amount, - denom: token::NATIVE_MAX_DECIMAL_PLACES.into(), - }; + token::NATIVE_MAX_DECIMAL_PLACES.into(), + ); // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { diff --git a/wasm_for_tests/tx_fail.wasm b/wasm_for_tests/tx_fail.wasm index a6d0598cd5..0748dc04de 100755 Binary files a/wasm_for_tests/tx_fail.wasm and b/wasm_for_tests/tx_fail.wasm differ diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index 25ca98f34e..87667b88cf 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index e79d206dfd..761e1a14e9 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index f62604f1b8..e6bc331403 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index e899beb121..568e7457ea 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index b900802c53..8c7108bb5e 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write.wasm b/wasm_for_tests/tx_write.wasm index d486dedec9..3467344ebc 100755 Binary files a/wasm_for_tests/tx_write.wasm and b/wasm_for_tests/tx_write.wasm differ diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm index 902d297af7..2f6e3946b4 100755 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and b/wasm_for_tests/tx_write_storage_key.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index 74b8b7c033..fe635e684c 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index 3fe4955cb4..1a40c09085 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 7062d826a1..66a2ea5448 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 6066babe71..5cffc14b0d 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index 18cf7a29b6..c202de9ea9 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 3c1fc857eb..1865570861 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -3304,7 +3304,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.28.2" +version = "0.29.0" dependencies = [ "async-trait", "bimap", @@ -3366,7 +3366,7 @@ dependencies = [ [[package]] name = "namada_core" -version = "0.28.2" +version = "0.29.0" dependencies = [ "ark-bls12-381", "ark-serialize", @@ -3390,6 +3390,7 @@ dependencies = [ "k256", "masp_primitives", "namada_macros", + "num-derive", "num-integer", "num-rational 0.4.1", "num-traits", @@ -3418,7 +3419,7 @@ dependencies = [ [[package]] name = "namada_ethereum_bridge" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -3439,7 +3440,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.28.2" +version = "0.29.0" dependencies = [ "proc-macro2", "quote", @@ -3448,7 +3449,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "data-encoding", @@ -3463,7 +3464,7 @@ dependencies = [ [[package]] name = "namada_sdk" -version = "0.28.2" +version = "0.29.0" dependencies = [ "async-trait", "bimap", @@ -3487,6 +3488,7 @@ dependencies = [ "owo-colors", "parse_duration", "paste", + "proptest", "prost 0.12.3", "rand 0.8.5", "rand_core 0.6.4", @@ -3508,7 +3510,7 @@ dependencies = [ [[package]] name = "namada_test_utils" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "namada_core", @@ -3517,7 +3519,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.28.2" +version = "0.29.0" dependencies = [ "async-trait", "chrono", @@ -3551,7 +3553,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -3566,7 +3568,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "masp_primitives", @@ -3575,7 +3577,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "borsh-ext", @@ -3589,7 +3591,7 @@ dependencies = [ [[package]] name = "namada_wasm_for_tests" -version = "0.28.2" +version = "0.29.0" dependencies = [ "borsh", "getrandom 0.2.11", diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index c4d7b9259a..da411e40b0 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm_for_tests" resolver = "2" -version = "0.28.2" +version = "0.29.0" [lib] crate-type = ["cdylib"]