diff --git a/.github/env b/.github/env index bb61e1f4cd99..730c37f1db80 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index c733a2517cb8..4c26b85a6303 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -97,7 +97,6 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" - "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index 8f23bcd75f01..eecf0ac72d2c 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -40,7 +40,7 @@ jobs: uses: korthout/backport-action@v3 id: backport with: - target_branches: stable2407 stable2409 + target_branches: stable2407 stable2409 stable2412 merge_commits: skip github_token: ${{ steps.generate_token.outputs.token }} pull_description: | diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f508404f1efa..42a7e87bda43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ workflow: variables: # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/Cargo.lock b/Cargo.lock index c2d2eb3e9644..a945d148e051 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -899,6 +899,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub-router 0.5.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -1036,6 +1037,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub-router 0.5.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -1077,6 +1079,7 @@ dependencies = [ "frame-support 28.0.0", "frame-system 28.0.0", "hex-literal", + "pallet-asset-conversion 10.0.0", "pallet-assets 29.1.0", "pallet-balances 28.0.0", "pallet-collator-selection 9.0.0", @@ -1094,6 +1097,7 @@ dependencies = [ "staging-xcm-builder 7.0.0", "staging-xcm-executor 7.0.0", "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -2578,6 +2582,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -2815,6 +2820,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -3550,6 +3556,7 @@ dependencies = [ "pallet-utility 28.0.0", "pallet-xcm 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -3991,6 +3998,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -4090,6 +4098,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -5966,6 +5975,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -7215,6 +7233,18 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "frame-metadata-hash-extension" version = "0.1.0" @@ -7222,7 +7252,7 @@ dependencies = [ "array-bytes", "const-hex", "docify", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support 28.0.0", "frame-system 28.0.0", "log", @@ -7307,7 +7337,7 @@ dependencies = [ "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support-procedural 23.0.0", "frame-system 28.0.0", "impl-trait-for-tuples", @@ -7485,7 +7515,7 @@ version = "3.0.0" dependencies = [ "frame-benchmarking 28.0.0", "frame-executive 28.0.0", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support 28.0.0", "frame-support-test-pallet", "frame-system 28.0.0", @@ -10195,9 +10225,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b67484b8ac41e1cfdf012f65fa81e88c2ef5f8a7d6dec0e2678c2d06dc04530" +checksum = "569e7dbec8a0d4b08d30f4942cd579cfe8db5d3f83f8604abe61697c38d17e73" dependencies = [ "async-trait", "bs58", @@ -10511,13 +10541,13 @@ dependencies = [ [[package]] name = "merkleized-metadata" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" +checksum = "943f6d92804ed0100803d51fa9b21fd9432b5d122ba4c713dc26fe6d2f619cf6" dependencies = [ "array-bytes", "blake3", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", "scale-decode 0.13.1", "scale-info", @@ -14625,7 +14655,7 @@ dependencies = [ "pallet-utility 28.0.0", "parity-scale-codec", "paste", - "polkavm 0.13.0", + "polkavm 0.17.0", "pretty_assertions", "rlp 0.6.1", "scale-info", @@ -14721,12 +14751,10 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "parity-wasm", - "polkavm-linker 0.14.0", + "polkavm-linker 0.17.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "tempfile", "toml 0.8.12", ] @@ -14843,7 +14871,7 @@ dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.14.0", + "polkavm-derive 0.17.0", "scale-info", ] @@ -16165,6 +16193,7 @@ dependencies = [ "pallet-session 28.0.0", "pallet-timestamp 27.0.0", "pallet-xcm 7.0.0", + "parachains-common 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "sp-consensus-aura 0.32.0", @@ -16176,6 +16205,7 @@ dependencies = [ "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -16574,6 +16604,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -16631,6 +16662,7 @@ dependencies = [ "sp-runtime 31.0.1", "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", + "westend-runtime", "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] @@ -16675,6 +16707,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -19674,15 +19707,15 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" +checksum = "84979be196ba2855f73616413e7b1d18258128aa396b3dc23f520a00a807720e" dependencies = [ "libc", "log", - "polkavm-assembler 0.13.0", - "polkavm-common 0.13.0", - "polkavm-linux-raw 0.13.0", + "polkavm-assembler 0.17.0", + "polkavm-common 0.17.0", + "polkavm-linux-raw 0.17.0", ] [[package]] @@ -19705,9 +19738,9 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" +checksum = "0ba7b434ff630b0f73a1560e8baea807246ca22098abe49f97821e0e2d2accc4" dependencies = [ "log", ] @@ -19739,20 +19772,14 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" +checksum = "8f0dbafef4ab6ceecb4982ac3b550df430ef4f9fdbf07c108b7d4f91a0682fce" dependencies = [ "log", - "polkavm-assembler 0.13.0", + "polkavm-assembler 0.17.0", ] -[[package]] -name = "polkavm-common" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711952a783e9c5ad407cdacb1ed147f36d37c5d43417c1091d86456d2999417b" - [[package]] name = "polkavm-derive" version = "0.8.0" @@ -19782,11 +19809,11 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4832a0aebf6cefc988bb7b2d74ea8c86c983164672e2fc96300f356a1babfc1" +checksum = "c0c3dbb6c8c7bd3e5f5b05aa7fc9355acf14df7ce5d392911e77d01090a38d0d" dependencies = [ - "polkavm-derive-impl-macro 0.14.0", + "polkavm-derive-impl-macro 0.17.0", ] [[package]] @@ -19827,11 +19854,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e339fc7c11310fe5adf711d9342278ac44a75c9784947937cce12bd4f30842f2" +checksum = "42565aed4adbc4034612d0b17dea8db3681fb1bd1aed040d6edc5455a9f478a1" dependencies = [ - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -19869,11 +19896,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569754b15060d03000c09e3bf11509d527f60b75d79b4c30c3625b5071d9702" +checksum = "86d9838e95241b0bce4fe269cdd4af96464160505840ed5a8ac8536119ba19e2" dependencies = [ - "polkavm-derive-impl 0.14.0", + "polkavm-derive-impl 0.17.0", "syn 2.0.87", ] @@ -19909,15 +19936,16 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0959ac3b0f4fd5caf5c245c637705f19493efe83dba31a83bbba928b93b0116a" +checksum = "d359dc721d2cc9b555ebb3558c305112ddc5bdac09d26f95f2f7b49c1f2db7e9" dependencies = [ + "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -19936,9 +19964,9 @@ checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" [[package]] name = "polkavm-linux-raw" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" +checksum = "e64c3d93a58ffbc3099d1227f0da9675a025a9ea6c917038f266920c1de1e568" [[package]] name = "polling" @@ -20934,7 +20962,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", + "regex-automata 0.4.8", "regex-syntax 0.8.5", ] @@ -20955,9 +20983,9 @@ checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -23252,6 +23280,7 @@ dependencies = [ "futures", "futures-util", "hex", + "itertools 0.11.0", "jsonrpsee", "log", "parity-scale-codec", @@ -23687,9 +23716,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", @@ -23701,9 +23730,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", @@ -25520,6 +25549,7 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", @@ -26651,7 +26681,7 @@ dependencies = [ name = "sp-metadata-ir" version = "0.6.0" dependencies = [ - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", "scale-info", ] @@ -28588,7 +28618,7 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "jobserver", "merkleized-metadata", "parity-scale-codec", diff --git a/Cargo.toml b/Cargo.toml index 53f95406e79a..ecc385504181 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -779,7 +779,7 @@ frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/fr frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "substrate/frame/executive", default-features = false } -frame-metadata = { version = "16.0.0", default-features = false } +frame-metadata = { version = "18.0.0", default-features = false } frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "substrate/frame/support", default-features = false } frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } @@ -848,13 +848,13 @@ linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.1", features = ["websocket"] } +litep2p = { version = "0.8.2", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } memory-db = { version = "0.32.0", default-features = false } -merkleized-metadata = { version = "0.1.0" } +merkleized-metadata = { version = "0.1.2" } merlin = { version = "3.0", default-features = false } messages-relay = { path = "bridges/relays/messages" } metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } @@ -1197,7 +1197,7 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.1", default-features = false } +scale-info = { version = "2.11.6", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } diff --git a/bridges/modules/relayers/src/extension/mod.rs b/bridges/modules/relayers/src/extension/mod.rs index 34d280d26d6e..d562ed9bcd0e 100644 --- a/bridges/modules/relayers/src/extension/mod.rs +++ b/bridges/modules/relayers/src/extension/mod.rs @@ -129,7 +129,7 @@ pub struct BridgeRelayersTransactionExtension( impl BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -250,7 +250,7 @@ where // let's also replace the weight of slashing relayer with the weight of rewarding relayer if call_info.is_receive_messages_proof_call() { post_info_weight = post_info_weight.saturating_sub( - ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), + >::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), ); } @@ -278,7 +278,7 @@ impl TransactionExtension for BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -326,7 +326,9 @@ where }; // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active(&data.relayer) { + if !RelayersPallet::::is_registration_active( + &data.relayer, + ) { return Ok((Default::default(), Some(data), origin)) } @@ -382,7 +384,11 @@ where match call_result { RelayerAccountAction::None => (), RelayerAccountAction::Reward(relayer, reward_account, reward) => { - RelayersPallet::::register_relayer_reward(reward_account, &relayer, reward); + RelayersPallet::::register_relayer_reward( + reward_account, + &relayer, + reward, + ); log::trace!( target: LOG_TARGET, @@ -394,7 +400,7 @@ where ); }, RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister( + RelayersPallet::::slash_and_deregister( &relayer, ExplicitOrAccountParams::Params(slash_account), ), diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index aa6eebc5458f..53acd038cdf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } westend-runtime-constants = { workspace = true, default-features = true } +westend-runtime = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs new file mode 100644 index 000000000000..3dadcdd94870 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs @@ -0,0 +1,503 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::ProcessMessageError; + +use codec::Encode; +use frame_support::sp_runtime::traits::Dispatchable; +use parachains_common::AccountId; +use people_westend_runtime::people::IdentityInfo; +use westend_runtime::governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin; +use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; + +use pallet_identity::Data; + +use emulated_integration_tests_common::accounts::{ALICE, BOB}; + +#[test] +fn relay_commands_add_registrar() { + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_registrar_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + let mut signed_origin = true; + + for (origin_kind, origin) in origins { + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if signed_origin { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, + ] + ); + } else { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + } + }); + + signed_origin = false; + } +} + +#[test] +fn relay_commands_kill_identity() { + // To kill an identity, first one must be set + PeopleWestend::execute_with(|| { + type PeopleRuntime = ::Runtime; + type PeopleRuntimeEvent = ::RuntimeEvent; + + let people_westend_alice = + ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); + + let identity_info = IdentityInfo { + email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), + ..Default::default() + }; + let identity: Box<::IdentityInformation> = + Box::new(identity_info); + + assert_ok!(::Identity::set_identity( + people_westend_alice, + identity + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, + ] + ); + }); + + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_kill_identity_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(BOB); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} + +#[test] +fn relay_commands_add_remove_username_authority() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + let people_westend_bob = PeopleWestend::account_id_of(BOB); + + let (origin_kind, origin, usr) = + (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); + + // First, add a username authority. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = + PeopleCall::Identity(pallet_identity::Call::::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); + + // Now, use the previously added username authority to concede a username to an account. + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::set_username_for( + ::RuntimeOrigin::signed(people_westend_alice.clone()), + people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), + full_username, + None, + true + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, + ] + ); + }); + + // Accept the given username + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::accept_username( + ::RuntimeOrigin::signed(people_westend_bob.clone()), + full_username.try_into().unwrap(), + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, + ] + ); + }); + + // Now, remove the username authority with another priviledged XCM call. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: remove_username_authority.encode().into() } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Final event check. + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_remove_username_authority_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice.clone()), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = + RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: remove_username_authority.encode().into(), + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 08749b295dc2..b9ad9e3db467 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,4 +14,5 @@ // limitations under the License. mod claim_assets; +mod governance; mod teleport; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 42adaba7a27c..bfe8ed869758 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -99,6 +99,7 @@ snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index bc48c2d805fd..b6f3ccd3901b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1415,37 +1415,31 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); - acceptable_assets.extend(assets_in_pool_with_native); + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::TokenLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == native_asset => { // for native token Ok(fee_in_native) }, Ok(asset_id) => { - let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; - if assets_in_pool_with_this_asset - .into_iter() - .map(|asset_id| asset_id.0) - .any(|location| location == native_asset) { - pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_id.clone().0, + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), native_asset, fee_in_native, true, // We include the fee. - ).ok_or(XcmPaymentApiError::AssetNotFound) + ) { + Ok(swapped_in_native) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 5da8b45417a3..d056405adff8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -24,10 +24,10 @@ use asset_hub_rococo_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, - AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, - ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - SessionKeys, TrustBackedAssetsInstance, XcmpQueue, + AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, Block, + CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, + MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, + RuntimeEvent, RuntimeOrigin, SessionKeys, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -1471,3 +1471,19 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index d5eaa43ab834..a3eaebb59153 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -101,6 +101,7 @@ snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index cafea3b6ff8b..f20b6b1fece0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1528,38 +1528,31 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); - acceptable_assets.extend(assets_in_pool_with_native); + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::WestendLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == native_asset => { // for native asset Ok(fee_in_native) }, Ok(asset_id) => { - // We recognize assets in a pool with the native one. - let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; - if assets_in_pool_with_this_asset - .into_iter() - .map(|asset_id| asset_id.0) - .any(|location| location == native_asset) { - pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_id.clone().0, + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), native_asset, fee_in_native, true, // We include the fee. - ).ok_or(XcmPaymentApiError::AssetNotFound) + ) { + Ok(swapped_in_native) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 5d0f843554a1..109a5dd2c029 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -24,7 +24,7 @@ use asset_hub_westend_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, XcmConfig, }, - AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, + AllPalletsWithoutSystem, Assets, Balances, Block, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, TrustBackedAssetsInstance, XcmpQueue, @@ -1446,3 +1446,19 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 1d2d45b42c5d..25c2df6b68d1 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -28,7 +28,7 @@ extern crate alloc; use crate::matching::{LocalLocationPattern, ParentLocation}; use alloc::vec::Vec; use codec::{Decode, EncodeLike}; -use core::cmp::PartialEq; +use core::{cmp::PartialEq, marker::PhantomData}; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; use sp_runtime::traits::TryConvertInto; @@ -137,24 +137,62 @@ pub type PoolAssetsConvertedConcreteId = TryConvertInto, >; -/// Returns an iterator of all assets in a pool with `asset`. -/// -/// Should only be used in runtime APIs since it iterates over the whole -/// `pallet_asset_conversion::Pools` map. -/// -/// It takes in any version of an XCM Location but always returns the latest one. -/// This is to allow some margin of migrating the pools when updating the XCM version. -/// -/// An error of type `()` is returned if the version conversion fails for XCM locations. -/// This error should be mapped by the caller to a more descriptive one. -pub fn get_assets_in_pool_with< - Runtime: pallet_asset_conversion::Config, - L: TryInto + Clone + Decode + EncodeLike + PartialEq, ->( - asset: &L, -) -> Result, ()> { - pallet_asset_conversion::Pools::::iter_keys() - .filter_map(|(asset_1, asset_2)| { +/// Adapter implementation for accessing pools (`pallet_asset_conversion`) that uses `AssetKind` as +/// a `xcm::v*` which could be different from the `xcm::latest`. +pub struct PoolAdapter(PhantomData); +impl< + Runtime: pallet_asset_conversion::Config, + L: TryFrom + TryInto + Clone + Decode + EncodeLike + PartialEq, + > PoolAdapter +{ + /// Returns a vector of all assets in a pool with `asset`. + /// + /// Should only be used in runtime APIs since it iterates over the whole + /// `pallet_asset_conversion::Pools` map. + /// + /// It takes in any version of an XCM Location but always returns the latest one. + /// This is to allow some margin of migrating the pools when updating the XCM version. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn get_assets_in_pool_with(asset: Location) -> Result, ()> { + // convert latest to the `L` version. + let asset: L = asset.try_into().map_err(|_| ())?; + Self::iter_assets_in_pool_with(&asset) + .map(|location| { + // convert `L` to the latest `AssetId` + location.try_into().map_err(|_| ()).map(AssetId) + }) + .collect::, _>>() + } + + /// Provides a current prices. Wrapper over + /// `pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens`. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn quote_price_tokens_for_exact_tokens( + asset_1: Location, + asset_2: Location, + amount: Runtime::Balance, + include_fees: bool, + ) -> Result, ()> { + // Convert latest to the `L` version. + let asset_1: L = asset_1.try_into().map_err(|_| ())?; + let asset_2: L = asset_2.try_into().map_err(|_| ())?; + + // Quote swap price. + Ok(pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_1, + asset_2, + amount, + include_fees, + )) + } + + /// Helper function for filtering pool. + pub fn iter_assets_in_pool_with(asset: &L) -> impl Iterator + '_ { + pallet_asset_conversion::Pools::::iter_keys().filter_map(|(asset_1, asset_2)| { if asset_1 == *asset { Some(asset_2) } else if asset_2 == *asset { @@ -163,8 +201,7 @@ pub fn get_assets_in_pool_with< None } }) - .map(|location| location.try_into().map_err(|_| ()).map(AssetId)) - .collect::, _>>() + } } #[cfg(test)] diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 529d6460fc4e..f6b3c13e8102 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -16,6 +16,7 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-balances = { workspace = true } pallet-timestamp = { workspace = true } pallet-session = { workspace = true } @@ -36,6 +37,7 @@ xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Bridges pallet-xcm-bridge-hub-router = { workspace = true } @@ -55,6 +57,7 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", + "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", @@ -69,5 +72,6 @@ std = [ "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 8dc720e27753..aeacc1a5471e 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -34,11 +34,14 @@ use parachains_runtimes_test_utils::{ CollatorSessionKeys, ExtBuilder, SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ - traits::{MaybeEquivalence, StaticLookup, Zero}, + traits::{Block as BlockT, MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; @@ -1584,3 +1587,108 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< ); }) } + +pub fn xcm_payment_api_with_pools_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config + + pallet_assets::Config< + pallet_assets::Instance1, + AssetId = u32, + Balance = ::Balance, + > + pallet_asset_conversion::Config< + AssetKind = xcm::v5::Location, + Balance = ::Balance, + >, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + + ExtBuilder::::default().build().execute_with(|| { + let test_account = AccountId::from([0u8; 32]); + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token.clone())); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // We need some balance to create an asset. + assert_ok!( + pallet_balances::Pallet::::mint_into(&test_account, 3_000_000_000_000,) + ); + + // Now we try to use an asset that's not in a pool. + let asset_id = 1984u32; // USDT. + let asset_not_in_pool: Location = + (PalletInstance(50), GeneralIndex(asset_id.into())).into(); + assert_ok!(pallet_assets::Pallet::::create( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 1000 + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We add it to a pool with native. + assert_ok!(pallet_asset_conversion::Pallet::::create_pool( + RuntimeOrigin::signed(test_account.clone()), + native_token.clone().try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap() + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + // Still not enough because it doesn't have any liquidity. + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We mint some of the asset... + assert_ok!(pallet_assets::Pallet::::mint( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 3_000_000_000_000, + )); + // ...so we can add liquidity to the pool. + assert_ok!(pallet_asset_conversion::Pallet::::add_liquidity( + RuntimeOrigin::signed(test_account.clone()), + native_token.try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap(), + 1_000_000_000_000, + 2_000_000_000_000, + 0, + 0, + test_account + )); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), asset_not_in_pool.into()); + // Now it works! + assert_ok!(execution_fees); + }); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 4af8a9f43850..3eb06e3a18c1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -122,6 +122,7 @@ bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 3f3316d0be49..598afeddb984 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -847,7 +847,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 6ca858e961d3..29f9615bff6a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -20,9 +20,9 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -838,3 +838,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 637e7c710640..871bf44ec5b2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -121,6 +121,7 @@ bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 65e7d291dc37..ae3dbfa06cba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -779,7 +779,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 84025c4cefeb..d7e70ed769b1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -27,9 +27,9 @@ use bridge_hub_westend_runtime::{ bridge_common_config, bridge_to_rococo_config, bridge_to_rococo_config::RococoGlobalConsensusNetwork, xcm_config::{LocationToAccountId, RelayNetwork, WestendLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoLocation, BridgeParachainRococoInstance, @@ -525,3 +525,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index e03fc934ceaf..810abcf572d4 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -94,6 +94,7 @@ substrate-wasm-builder = { optional = true, workspace = true, default-features = [dev-dependencies] sp-io = { features = ["std"], workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 0ee3a4068718..f4c62f212e8c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -963,7 +963,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs index 7add10559d84..c9191eba49f6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use collectives_westend_runtime::xcm_config::LocationToAccountId; +use collectives_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index a38b7400cfa3..02807827cf92 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -80,6 +80,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true } + [features] default = ["std"] std = [ @@ -120,6 +123,7 @@ std = [ "pallet-xcm/std", "parachain-info/std", "parachains-common/std", + "parachains-runtimes-test-utils/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "rococo-runtime-constants/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 3f3126b749d8..ae3ad93a9e85 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -835,7 +835,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs index 2cabce567b6e..89a593ab0f57 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use coretime_rococo_runtime::xcm_config::LocationToAccountId; +use coretime_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 149fa5d0b045..34353d312b1f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -80,6 +80,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 098a17cc9984..39ea39f25a8b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -827,7 +827,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs index e391d71a9ab7..976ce23d6e87 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use coretime_westend_runtime::xcm_config::LocationToAccountId; +use coretime_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 34458c2352fb..a55143b62071 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -77,6 +77,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 7921030f2bb8..dc5f2ac0997c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -783,7 +783,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs index 3627d9c40ec2..00fe7781822a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs @@ -17,7 +17,9 @@ #![cfg(test)] use parachains_common::AccountId; -use people_rococo_runtime::xcm_config::LocationToAccountId; +use people_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 6840b97d8c3f..4d66332e96dd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -77,6 +77,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 19a64ab8d6e8..1b9a3b60a2c4 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -781,7 +781,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs index fa9331952b4b..5cefec44b1cd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs @@ -17,7 +17,9 @@ #![cfg(test)] use parachains_common::AccountId; -use people_westend_runtime::xcm_config::LocationToAccountId; +use people_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 01d7fcc2b5c8..e9d666617ee2 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -29,6 +29,7 @@ cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } +parachains-common = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true } @@ -37,6 +38,7 @@ cumulus-test-relay-sproof-builder = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } polkadot-parachain-primitives = { workspace = true } [dev-dependencies] @@ -62,11 +64,13 @@ std = [ "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", + "parachains-common/std", "polkadot-parachain-primitives/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index a66163154cf6..6bdf3ef09d1b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -18,7 +18,15 @@ use crate::{AccountIdOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf}; use codec::Encode; -use frame_support::{assert_ok, traits::Get}; +use frame_support::{ + assert_ok, + traits::{Get, OriginTrait}, +}; +use parachains_common::AccountId; +use sp_runtime::traits::{Block as BlockT, StaticLookup}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = crate::RuntimeHelper; @@ -128,3 +136,60 @@ pub fn set_storage_keys_by_governance_works( assert_storage(); }); } + +pub fn xcm_payment_api_with_native_token_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + ExtBuilder::::default().build().execute_with(|| { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + // We first try calling it with a lower XCM version. + let lower_version_xcm_to_weigh = + versioned_xcm_to_weigh.clone().into_version(XCM_VERSION - 1).unwrap(); + let xcm_weight = Runtime::query_xcm_weight(lower_version_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token)); + let lower_version_native_token = + native_token_versioned.clone().into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), lower_version_native_token); + assert!(execution_fees.is_ok()); + + // Now we call it with the latest version. + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // If we call it with anything other than the native token it will error. + let non_existent_token: Location = Here.into(); + let non_existent_token_versioned = VersionedAssetId::from(AssetId(non_existent_token)); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), non_existent_token_versioned); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + }); +} diff --git a/docs/contributor/container.md b/docs/contributor/container.md index ec51b8b9d7cc..e387f568d7b5 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ + docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs index c2fe5a6727e6..df400b68f79d 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs @@ -1,8 +1,10 @@ use serde_json::{json, Value}; use std::{process::Command, str}; -const WASM_FILE_PATH: &str = - "../../../../../target/release/wbuild/chain-spec-guide-runtime/chain_spec_guide_runtime.wasm"; +fn wasm_file_path() -> &'static str { + chain_spec_guide_runtime::runtime::WASM_BINARY_PATH + .expect("chain_spec_guide_runtime wasm should exist. qed") +} const CHAIN_SPEC_BUILDER_PATH: &str = "../../../../../target/release/chain-spec-builder"; @@ -26,7 +28,7 @@ fn list_presets() { let output = Command::new(get_chain_spec_builder_path()) .arg("list-presets") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .output() .expect("Failed to execute command"); @@ -50,7 +52,7 @@ fn get_preset() { let output = Command::new(get_chain_spec_builder_path()) .arg("display-preset") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .arg("-p") .arg("preset_2") .output() @@ -83,7 +85,7 @@ fn generate_chain_spec() { .arg("/dev/stdout") .arg("create") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .arg("named-preset") .arg("preset_2") .output() @@ -140,7 +142,7 @@ fn generate_para_chain_spec() { .arg("-p") .arg("1000") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .arg("named-preset") .arg("preset_2") .output() diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 87bf27e4ff18..f4836b7cdde1 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -13,12 +13,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] use super::*; use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use alloc::{vec, vec::Vec}; use codec::Encode; -use frame_benchmarking::{benchmarks, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::traits::fungible::Inspect; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, @@ -29,16 +30,21 @@ use xcm_executor::{ ExecutorError, FeesMode, }; -benchmarks! { - report_holding { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn report_holding() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // generate holding and add possible required fees @@ -64,21 +70,33 @@ benchmarks! { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. - assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), + // Worst case is looking through all holdings for every asset explicitly - respecting + // the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite( + holding + .into_inner() + .into_iter() + .take(MAX_ITEMS_IN_ASSETS) + .collect::>() + .into(), + ), }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } // This benchmark does not use any additional orders or instructions. This should be managed // by the `deep` and `shallow` implementation. - buy_execution { + #[benchmark] + fn buy_execution() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -92,13 +110,16 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - pay_fees { + #[benchmark] + fn pay_fees() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -111,40 +132,53 @@ benchmarks! { let instruction = Instruction::>::PayFees { asset: fee_asset }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify {} + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) + } - set_asset_claimer { + #[benchmark] + fn set_asset_claimer() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (_, sender_location) = account_and_location::(1); - let instruction = Instruction::SetAssetClaimer{ location:sender_location.clone() }; + let instruction = Instruction::SetAssetClaimer { location: sender_location.clone() }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.asset_claimer(), Some(sender_location.clone())); + + Ok(()) } - query_response { + #[benchmark] + fn query_response() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (query_id, response) = T::worst_case_response(); let max_weight = Weight::MAX; let querier: Option = Some(Here.into()); let instruction = Instruction::QueryResponse { query_id, response, max_weight, querier }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + + #[block] + { + executor.bench_process(xcm)?; + } // The assert above is enough to show this XCM succeeded + + Ok(()) } // We don't care about the call itself, since that is accounted for in the weight parameter // and included in the final weight calculation. So this is just the overhead of submitting // a noop call. - transact { + #[benchmark] + fn transact() -> Result<(), BenchmarkError> { let (origin, noop_call) = T::transact_origin_and_runtime_call()?; let mut executor = new_executor::(origin); let double_encoded_noop_call: DoubleEncoded<_> = noop_call.encode().into(); @@ -154,119 +188,145 @@ benchmarks! { call: double_encoded_noop_call, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // TODO Make the assertion configurable? + + Ok(()) } - refund_surplus { + #[benchmark] + fn refund_surplus() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let holding_assets = T::worst_case_holding(1); // We can already buy execution since we'll load the holding register manually let asset_for_fees = T::fee_asset().unwrap(); - let previous_xcm = Xcm(vec![BuyExecution { fees: asset_for_fees, weight_limit: Limited(Weight::from_parts(1337, 1337)) }]); + let previous_xcm = Xcm(vec![BuyExecution { + fees: asset_for_fees, + weight_limit: Limited(Weight::from_parts(1337, 1337)), + }]); executor.set_holding(holding_assets.into()); executor.set_total_surplus(Weight::from_parts(1337, 1337)); executor.set_total_refunded(Weight::zero()); - executor.bench_process(previous_xcm).expect("Holding has been loaded, so we can buy execution here"); + executor + .bench_process(previous_xcm) + .expect("Holding has been loaded, so we can buy execution here"); let instruction = Instruction::>::RefundSurplus; let xcm = Xcm(vec![instruction]); - } : { - let result = executor.bench_process(xcm)?; - } verify { + #[block] + { + let _result = executor.bench_process(xcm)?; + } assert_eq!(executor.total_surplus(), &Weight::from_parts(1337, 1337)); assert_eq!(executor.total_refunded(), &Weight::from_parts(1337, 1337)); + + Ok(()) } - set_error_handler { + #[benchmark] + fn set_error_handler() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::>::SetErrorHandler(Xcm(vec![])); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.error_handler(), &Xcm(vec![])); + + Ok(()) } - set_appendix { + #[benchmark] + fn set_appendix() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let appendix = Xcm(vec![]); let instruction = Instruction::>::SetAppendix(appendix); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.appendix(), &Xcm(vec![])); + Ok(()) } - clear_error { + #[benchmark] + fn clear_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((5u32, XcmError::Overflow))); let instruction = Instruction::>::ClearError; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(executor.error().is_none()) + #[block] + { + executor.bench_process(xcm)?; + } + assert!(executor.error().is_none()); + Ok(()) } - descend_origin { + #[benchmark] + fn descend_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who = Junctions::from([OnlyChild, OnlyChild]); let instruction = Instruction::DescendOrigin(who.clone()); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: who, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: who }),); + + Ok(()) } - execute_with_origin { + #[benchmark] + fn execute_with_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who: Junctions = Junctions::from([AccountId32 { id: [0u8; 32], network: None }]); - let instruction = Instruction::ExecuteWithOrigin { descendant_origin: Some(who.clone()), xcm: Xcm(vec![]) }; + let instruction = Instruction::ExecuteWithOrigin { + descendant_origin: Some(who.clone()), + xcm: Xcm(vec![]), + }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: Here, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: Here }),); + + Ok(()) } - clear_origin { + #[benchmark] + fn clear_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::ClearOrigin; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &None); + Ok(()) } - report_error { + #[benchmark] + fn report_error() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let max_weight = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -278,18 +338,21 @@ benchmarks! { } executor.set_error(Some((0u32, XcmError::Unimplemented))); - let instruction = Instruction::ReportError(QueryResponseInfo { - query_id, destination, max_weight - }); + let instruction = + Instruction::ReportError(QueryResponseInfo { query_id, destination, max_weight }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } - claim_asset { + #[benchmark] + fn claim_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::DropAssets; let (origin, ticket, assets) = T::claimable_asset()?; @@ -298,11 +361,7 @@ benchmarks! { ::AssetTrap::drop_assets( &origin, assets.clone().into(), - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, ); // Assets should be in the trap now. @@ -310,28 +369,32 @@ benchmarks! { let mut executor = new_executor::(origin); let instruction = Instruction::ClaimAsset { assets: assets.clone(), ticket }; let xcm = Xcm(vec![instruction]); - } :{ - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().ensure_contains(&assets).is_ok()); + Ok(()) } - trap { + #[benchmark] + fn trap() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::Trap(10); let xcm = Xcm(vec![instruction]); // In order to access result in the verification below, it needs to be defined here. - let mut _result = Ok(()); - } : { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::Trap(10), - .. - }))); + let result; + #[block] + { + result = executor.bench_process(xcm); + } + assert!(matches!(result, Err(ExecutorError { xcm_error: XcmError::Trap(10), .. }))); + + Ok(()) } - subscribe_version { + #[benchmark] + fn subscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; let origin = T::subscribe_origin()?; let query_id = Default::default(); @@ -339,13 +402,18 @@ benchmarks! { let mut executor = new_executor::(origin.clone()); let instruction = Instruction::SubscribeVersion { query_id, max_response_weight }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(::SubscriptionService::is_subscribed(&origin)); + #[block] + { + executor.bench_process(xcm)?; + } + assert!(::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - unsubscribe_version { + #[benchmark] + fn unsubscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; // First we need to subscribe to notifications. let (origin, _) = T::transact_origin_and_runtime_call()?; @@ -355,24 +423,28 @@ benchmarks! { &origin, query_id, max_response_weight, - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, - ).map_err(|_| "Could not start subscription")?; - assert!(::SubscriptionService::is_subscribed(&origin)); + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, + ) + .map_err(|_| "Could not start subscription")?; + assert!(::SubscriptionService::is_subscribed( + &origin + )); let mut executor = new_executor::(origin.clone()); let instruction = Instruction::UnsubscribeVersion; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(!::SubscriptionService::is_subscribed(&origin)); + #[block] + { + executor.bench_process(xcm)?; + } + assert!(!::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - burn_asset { + #[benchmark] + fn burn_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -381,13 +453,16 @@ benchmarks! { let instruction = Instruction::BurnAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().is_empty()); + Ok(()) } - expect_asset { + #[benchmark] + fn expect_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -396,71 +471,86 @@ benchmarks! { let instruction = Instruction::ExpectAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // `execute` completing successfully is as good as we can check. + + Ok(()) } - expect_origin { + #[benchmark] + fn expect_origin() -> Result<(), BenchmarkError> { let expected_origin = Parent.into(); let mut executor = new_executor::(Default::default()); let instruction = Instruction::ExpectOrigin(Some(expected_origin)); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_error { + #[benchmark] + fn expect_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((3u32, XcmError::Overflow))); let instruction = Instruction::ExpectError(None); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_transact_status { + #[benchmark] + fn expect_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); - let worst_error = || -> MaybeErrorCode { - vec![0; MaxDispatchErrorLen::get() as usize].into() - }; + let worst_error = + || -> MaybeErrorCode { vec![0; MaxDispatchErrorLen::get() as usize].into() }; executor.set_transact_status(worst_error()); let instruction = Instruction::ExpectTransactStatus(worst_error()); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { + #[block] + { + _result = executor.bench_process(xcm); + } assert!(matches!(_result, Ok(..))); + Ok(()) } - query_pallet { + #[benchmark] + fn query_pallet() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::QueryPallet, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::QueryPallet, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); if let Some(expected_fees_mode) = expected_fees_mode { @@ -476,15 +566,19 @@ benchmarks! { response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + + Ok(()) } - expect_pallet { + #[benchmark] + fn expect_pallet() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { @@ -495,23 +589,27 @@ benchmarks! { min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // the execution succeeding is all we need to verify this xcm was successful + Ok(()) } - report_transact_status { + #[benchmark] + fn report_transact_status() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -529,84 +627,102 @@ benchmarks! { max_weight, }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - clear_transact_status { + #[benchmark] + fn clear_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_transact_status(b"MyError".to_vec().into()); let instruction = Instruction::ClearTransactStatus; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.transact_status(), &MaybeErrorCode::Success); + Ok(()) } - set_topic { + #[benchmark] + fn set_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::SetTopic([1; 32]); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &Some([1; 32])); + Ok(()) } - clear_topic { + #[benchmark] + fn clear_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_topic(Some([2; 32])); let instruction = Instruction::ClearTopic; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &None); + Ok(()) } - exchange_asset { + #[benchmark] + fn exchange_asset() -> Result<(), BenchmarkError> { let (give, want) = T::worst_case_asset_exchange().map_err(|_| BenchmarkError::Skip)?; let assets = give.clone(); let mut executor = new_executor::(Default::default()); executor.set_holding(give.into()); - let instruction = Instruction::ExchangeAsset { - give: assets.into(), - want: want.clone(), - maximal: true, - }; + let instruction = + Instruction::ExchangeAsset { give: assets.into(), want: want.clone(), maximal: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.holding(), &want.into()); + Ok(()) } - universal_origin { + #[benchmark] + fn universal_origin() -> Result<(), BenchmarkError> { let (origin, alias) = T::universal_alias().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::UniversalOrigin(alias); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } use frame_support::traits::Get; let universal_location = ::UniversalLocation::get(); - assert_eq!(executor.origin(), &Some(Junctions::from([alias]).relative_to(&universal_location))); + assert_eq!( + executor.origin(), + &Some(Junctions::from([alias]).relative_to(&universal_location)) + ); + + Ok(()) } - export_message { - let x in 1 .. 1000; + #[benchmark] + fn export_message(x: Linear<1, 1000>) -> Result<(), BenchmarkError> { // The `inner_xcm` influences `ExportMessage` total weight based on // `inner_xcm.encoded_size()`, so for this benchmark use smallest encoded instruction // to approximate weight per "unit" of encoded size; then actual weight can be estimated @@ -616,11 +732,12 @@ benchmarks! { // Get `origin`, `network` and `destination` from configured runtime. let (origin, network, destination) = T::export_message_origin_and_destination()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &origin, - &destination.clone().into(), - FeeReason::Export { network, destination: destination.clone() }, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &origin, + &destination.clone().into(), + FeeReason::Export { network, destination: destination.clone() }, + ); let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -631,37 +748,39 @@ benchmarks! { if let Some(expected_assets_in_holding) = expected_assets_in_holding { executor.set_holding(expected_assets_in_holding.into()); } - let xcm = Xcm(vec![ExportMessage { - network, destination: destination.clone(), xcm: inner_xcm, - }]); - }: { - executor.bench_process(xcm)?; - } verify { + let xcm = + Xcm(vec![ExportMessage { network, destination: destination.clone(), xcm: inner_xcm }]); + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - set_fees_mode { + #[benchmark] + fn set_fees_mode() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_fees_mode(FeesMode { jit_withdraw: false }); let instruction = Instruction::SetFeesMode { jit_withdraw: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.fees_mode(), &FeesMode { jit_withdraw: true }); + Ok(()) } - lock_asset { + #[benchmark] + fn lock_asset() -> Result<(), BenchmarkError> { let (unlocker, owner, asset) = T::unlockable_asset()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &unlocker, - FeeReason::LockAsset, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery(&owner, &unlocker, FeeReason::LockAsset); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -681,15 +800,18 @@ benchmarks! { let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unlock_asset { + #[benchmark] + fn unlock_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -709,13 +831,15 @@ benchmarks! { // ... then unlock them with the UnlockAsset instruction. let instruction = Instruction::UnlockAsset { asset, target: owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - note_unlockable { + #[benchmark] + fn note_unlockable() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -735,13 +859,15 @@ benchmarks! { // ... then note them as unlockable with the NoteUnlockable instruction. let instruction = Instruction::NoteUnlockable { asset, owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - request_unlock { + #[benchmark] + fn request_unlock() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (locker, owner, asset) = T::unlockable_asset()?; @@ -756,11 +882,12 @@ benchmarks! { .enact() .map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &locker, - FeeReason::RequestUnlock, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &owner, + &locker, + FeeReason::RequestUnlock, + ); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -774,15 +901,18 @@ benchmarks! { } let instruction = Instruction::RequestUnlock { asset, locker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unpaid_execution { + #[benchmark] + fn unpaid_execution() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_origin(Some(Here.into())); @@ -792,21 +922,27 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - alias_origin { + #[benchmark] + fn alias_origin() -> Result<(), BenchmarkError> { let (origin, target) = T::alias_origin().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::AliasOrigin(target.clone()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &Some(target)); + Ok(()) } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 2d14b4e571c6..c3046b134d1f 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -353,3 +353,26 @@ fn dry_run_xcm() { ); }); } + +#[test] +fn calling_payment_api_with_a_lower_version_works() { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + let lower_version_xcm_to_weigh = versioned_xcm_to_weigh.into_version(XCM_VERSION - 1).unwrap(); + let client = TestClient; + let runtime_api = client.runtime_api(); + let xcm_weight = + runtime_api.query_xcm_weight(H256::zero(), lower_version_xcm_to_weigh).unwrap(); + assert!(xcm_weight.is_ok()); + let native_token = VersionedAssetId::from(AssetId(Here.into())); + let lower_version_native_token = native_token.into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = runtime_api + .query_weight_to_asset_fee(H256::zero(), xcm_weight.unwrap(), lower_version_native_token) + .unwrap(); + assert!(execution_fees.is_ok()); +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index f0a5be908f69..fb5d1ae7c0e5 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -453,7 +453,8 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == HereLocation::get() => { Ok(WeightToFee::weight_to_fee(&weight)) }, diff --git a/prdoc/pr_5732.prdoc b/prdoc/pr_5732.prdoc new file mode 100644 index 000000000000..6f3f9b8a1668 --- /dev/null +++ b/prdoc/pr_5732.prdoc @@ -0,0 +1,29 @@ +title: Expose the unstable metadata v16 +doc: +- audience: Node Dev + description: | + This PR exposes the *unstable* metadata V16. The metadata is exposed under the unstable u32::MAX number. + Developers can start experimenting with the new features of the metadata v16. *Please note that this metadata is under development and expect breaking changes until stabilization.* + The `ExtrinsicMetadata` trait receives a breaking change. Its associated type `VERSION` is rename to `VERSIONS` and now supports a constant static list of metadata versions. + The versions implemented for `UncheckedExtrinsic` are v4 (legacy version) and v5 (new version). + For metadata collection, it is assumed that all `TransactionExtensions` are under version 0. + +crates: + - name: sp-metadata-ir + bump: major + - name: frame-support-procedural + bump: patch + - name: frame-support + bump: minor + - name: frame-support-test + bump: major + - name: frame-metadata-hash-extension + bump: patch + - name: substrate-wasm-builder + bump: minor + - name: pallet-revive + bump: minor + - name: sp-runtime + bump: major + - name: frame-benchmarking-cli + bump: patch diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc new file mode 100644 index 000000000000..7735cfee9f37 --- /dev/null +++ b/prdoc/pr_5855.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove feature `test-helpers` from sc-service + +doc: + - audience: Node Dev + description: | + Removes feature `test-helpers` from sc-service. + +crates: + - name: sc-service + bump: major + - name: sc-rpc-spec-v2 + bump: major diff --git a/prdoc/pr_5997.prdoc b/prdoc/pr_5997.prdoc new file mode 100644 index 000000000000..6bac36a44586 --- /dev/null +++ b/prdoc/pr_5997.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement archive_unstable_storageDiff method + +doc: + - audience: Node Dev + description: | + This PR implements the `archive_unstable_storageDiff` rpc-v2 method. + Developers can use this method to fetch the storage differences + between two blocks. This is useful for oracles and archive nodes. + For more details see: https://github.com/paritytech/json-rpc-interface-spec/blob/main/src/api/archive_unstable_storageDiff.md. + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-service + bump: patch diff --git a/prdoc/pr_6459.prdoc b/prdoc/pr_6459.prdoc new file mode 100644 index 000000000000..592ba4c6b29d --- /dev/null +++ b/prdoc/pr_6459.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix version conversion in XcmPaymentApi::query_weight_to_asset_fee. + +doc: + - audience: Runtime Dev + description: | + The `query_weight_to_asset_fee` function of the `XcmPaymentApi` was trying + to convert versions in the wrong way. + This resulted in all calls made with lower versions failing. + The version conversion is now done correctly and these same calls will now succeed. + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: xcm-runtime-apis + bump: patch + - name: assets-common + bump: patch diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc new file mode 100644 index 000000000000..f9a75a16a6a7 --- /dev/null +++ b/prdoc/pr_6565.prdoc @@ -0,0 +1,35 @@ +title: 'pallet_revive: Switch to 64bit RISC-V' +doc: +- audience: Runtime Dev + description: |- + This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. + + Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. + + ## Fixtures + + The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. + + ## Syscall interface + + ### Passing pointer + + Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. + + ### Functions with more than 6 arguments + + We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. + + This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. + + ## TODO + - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6603.prdoc b/prdoc/pr_6603.prdoc new file mode 100644 index 000000000000..20c5e7294dfa --- /dev/null +++ b/prdoc/pr_6603.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Always provide main protocol name in litep2p responses + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR aligns litep2p behavior with libp2p. Previously, litep2p network backend + would provide the actual negotiated request-response protocol that produced a + response message. After this PR, only the main protocol name is reported to other + subsystems. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc new file mode 100644 index 000000000000..b5aaf8a3b184 --- /dev/null +++ b/prdoc/pr_6665.prdoc @@ -0,0 +1,15 @@ +title: Fix runtime api impl detection by construct runtime +doc: +- audience: Runtime Dev + description: |- + Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. + + + Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 +crates: +- name: frame-support-procedural + bump: patch +- name: sp-api-proc-macro + bump: patch +- name: sp-metadata-ir + bump: patch diff --git a/prdoc/pr_6673.prdoc b/prdoc/pr_6673.prdoc new file mode 100644 index 000000000000..d2ca3c61ff39 --- /dev/null +++ b/prdoc/pr_6673.prdoc @@ -0,0 +1,7 @@ +title: 'chain-spec-guide-runtime: path to wasm blob fixed' +doc: +- audience: Runtime Dev + description: In `chain-spec-guide-runtime` crate's tests, there was assumption that + release version of wasm blob exists. This PR uses `chain_spec_guide_runtime::runtime::WASM_BINARY_PATH` + const to use correct path to runtime blob. +crates: [] diff --git a/prdoc/pr_6677.prdoc b/prdoc/pr_6677.prdoc new file mode 100644 index 000000000000..c6766889e68d --- /dev/null +++ b/prdoc/pr_6677.prdoc @@ -0,0 +1,11 @@ +title: 'chore: Update litep2p to v0.8.2' +doc: +- audience: Node Dev + description: |- + This includes a critical fix for debug release versions of litep2p (which are running in Kusama as validators). + + While at it, have stopped the oncall pain of alerts around `incoming_connections_total`. We can rethink the metric expose of litep2p in Q1. + +crates: +- name: sc-network + bump: minor diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc new file mode 100644 index 000000000000..93a967d4a66c --- /dev/null +++ b/prdoc/pr_6681.prdoc @@ -0,0 +1,406 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: update scale-info to 2.11.6 + +doc: + - audience: Runtime Dev + description: | + Updates scale-info to 2.11.1 from 2.11.5. + Updated version of scale-info annotates generated code with `allow(deprecated)` + +crates: + - name: bridge-runtime-common + bump: none + - name: bp-header-chain + bump: none + - name: bp-runtime + bump: none + - name: frame-support + bump: none + - name: sp-core + bump: none + - name: sp-trie + bump: none + - name: sp-runtime + bump: none + - name: sp-application-crypto + bump: none + - name: sp-arithmetic + bump: none + - name: sp-weights + bump: none + - name: sp-api + bump: none + - name: sp-metadata-ir + bump: none + - name: sp-version + bump: none + - name: sp-inherents + bump: none + - name: frame-executive + bump: none + - name: frame-system + bump: none + - name: pallet-balances + bump: none + - name: frame-benchmarking + bump: none + - name: pallet-migrations + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-slots + bump: none + - name: sp-staking + bump: none + - name: staging-xcm + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: pallet-message-queue + bump: none + - name: polkadot-runtime-common + bump: none + - name: frame-election-provider-support + bump: none + - name: sp-npos-elections + bump: none + - name: sp-consensus-grandpa + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-grandpa + bump: none + - name: sp-genesis-builder + bump: none + - name: sp-consensus-babe + bump: none + - name: sp-mixnet + bump: none + - name: sc-rpc-api + bump: none + - name: sp-session + bump: none + - name: sp-statement-store + bump: none + - name: sp-transaction-storage-proof + bump: none + - name: pallet-asset-rate + bump: none + - name: pallet-authorship + bump: none + - name: pallet-babe + bump: none + - name: pallet-session + bump: none + - name: pallet-timestamp + bump: none + - name: pallet-offences + bump: none + - name: pallet-staking + bump: none + - name: pallet-bags-list + bump: none + - name: pallet-broker + bump: none + - name: pallet-election-provider-multi-phase + bump: none + - name: pallet-fast-unstake + bump: none + - name: pallet-identity + bump: none + - name: pallet-transaction-payment + bump: none + - name: pallet-treasury + bump: none + - name: pallet-utility + bump: none + - name: pallet-collective + bump: none + - name: pallet-root-testing + bump: none + - name: pallet-vesting + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: pallet-authority-discovery + bump: none + - name: pallet-mmr + bump: none + - name: sp-mmr-primitives + bump: none + - name: staging-xcm-executor + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-asset-conversion + bump: none + - name: pallet-assets + bump: none + - name: pallet-salary + bump: none + - name: pallet-ranked-collective + bump: none + - name: pallet-xcm + bump: none + - name: xcm-runtime-apis + bump: none + - name: pallet-grandpa + bump: none + - name: pallet-indices + bump: none + - name: pallet-sudo + bump: none + - name: sp-consensus-beefy + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: pallet-aura + bump: none + - name: sp-consensus-aura + bump: none + - name: pallet-collator-selection + bump: none + - name: pallet-glutton + bump: none + - name: staging-parachain-info + bump: none + - name: westend-runtime + bump: none + - name: frame-metadata-hash-extension + bump: none + - name: frame-system-benchmarking + bump: none + - name: pallet-beefy + bump: none + - name: pallet-beefy-mmr + bump: none + - name: pallet-conviction-voting + bump: none + - name: pallet-scheduler + bump: none + - name: pallet-preimage + bump: none + - name: pallet-delegated-staking + bump: none + - name: pallet-nomination-pools + bump: none + - name: pallet-democracy + bump: none + - name: pallet-elections-phragmen + bump: none + - name: pallet-membership + bump: none + - name: pallet-multisig + bump: none + - name: polkadot-sdk-frame + bump: none + - name: pallet-dev-mode + bump: none + - name: pallet-verify-signature + bump: none + - name: pallet-nomination-pools-benchmarking + bump: none + - name: pallet-offences-benchmarking + bump: none + - name: pallet-im-online + bump: none + - name: pallet-parameters + bump: none + - name: pallet-proxy + bump: none + - name: pallet-recovery + bump: none + - name: pallet-referenda + bump: none + - name: pallet-society + bump: none + - name: pallet-state-trie-migration + bump: none + - name: pallet-whitelist + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: rococo-runtime + bump: none + - name: pallet-bounties + bump: none + - name: pallet-child-bounties + bump: none + - name: pallet-nis + bump: none + - name: pallet-tips + bump: none + - name: parachains-common + bump: none + - name: pallet-asset-tx-payment + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: bp-xcm-bridge-hub-router + bump: none + - name: pallet-xcm-bridge-hub-router + bump: none + - name: assets-common + bump: none + - name: bp-messages + bump: none + - name: bp-parachains + bump: none + - name: bp-polkadot-core + bump: none + - name: bp-relayers + bump: none + - name: bp-xcm-bridge-hub + bump: none + - name: bridge-hub-common + bump: none + - name: snowbridge-core + bump: none + - name: snowbridge-beacon-primitives + bump: none + - name: snowbridge-ethereum + bump: none + - name: pallet-bridge-grandpa + bump: none + - name: pallet-bridge-messages + bump: none + - name: pallet-bridge-parachains + bump: none + - name: pallet-bridge-relayers + bump: none + - name: pallet-xcm-bridge-hub + bump: none + - name: cumulus-pallet-dmp-queue + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: frame-benchmarking-pallet-pov + bump: none + - name: pallet-alliance + bump: none + - name: pallet-asset-conversion-ops + bump: none + - name: pallet-asset-conversion-tx-payment + bump: none + - name: pallet-assets-freezer + bump: none + - name: pallet-atomic-swap + bump: none + - name: pallet-collective-content + bump: none + - name: pallet-contracts + bump: none + - name: pallet-contracts-uapi + bump: none + - name: pallet-insecure-randomness-collective-flip + bump: none + - name: pallet-contracts-mock-network + bump: none + - name: xcm-simulator + bump: none + - name: pallet-core-fellowship + bump: none + - name: pallet-lottery + bump: none + - name: pallet-mixnet + bump: none + - name: pallet-nft-fractionalization + bump: none + - name: pallet-nfts + bump: none + - name: pallet-node-authorization + bump: none + - name: pallet-paged-list + bump: none + - name: pallet-remark + bump: none + - name: pallet-revive + bump: none + - name: pallet-revive-uapi + bump: none + - name: pallet-revive-eth-rpc + bump: none + - name: pallet-skip-feeless-payment + bump: none + - name: pallet-revive-mock-network + bump: none + - name: pallet-root-offences + bump: none + - name: pallet-safe-mode + bump: none + - name: pallet-scored-pool + bump: none + - name: pallet-statement + bump: none + - name: pallet-transaction-storage + bump: none + - name: pallet-tx-pause + bump: none + - name: pallet-uniques + bump: none + - name: snowbridge-outbound-queue-merkle-tree + bump: none + - name: snowbridge-pallet-ethereum-client + bump: none + - name: snowbridge-pallet-inbound-queue + bump: none + - name: snowbridge-router-primitives + bump: none + - name: snowbridge-pallet-outbound-queue + bump: none + - name: snowbridge-pallet-system + bump: none + - name: bp-asset-hub-rococo + bump: none + - name: bp-asset-hub-westend + bump: none + - name: bp-polkadot-bulletin + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: penpal-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: xcm-simulator-example + bump: none \ No newline at end of file diff --git a/prdoc/pr_6695.prdoc b/prdoc/pr_6695.prdoc new file mode 100644 index 000000000000..7a950e8546cd --- /dev/null +++ b/prdoc/pr_6695.prdoc @@ -0,0 +1,8 @@ +title: '[pallet-revive] bugfix decoding 64bit args in the decoder' +doc: +- audience: Runtime Dev + description: The argument index of the next argument is dictated by the size of + the current one. +crates: +- name: pallet-revive-proc-macro + bump: patch diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 16112386ad7c..1972c03a368b 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -37,7 +37,7 @@ sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 10cf9f4da36d..6d3575fc2b6b 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -986,7 +986,15 @@ impl NetworkBackend for Litep2pNetworkBac let direction = match endpoint { Endpoint::Dialer { .. } => "out", - Endpoint::Listener { .. } => "in", + Endpoint::Listener { .. } => { + // Increment incoming connections counter. + // + // Note: For litep2p these are represented by established negotiated connections, + // while for libp2p (legacy) these represent not-yet-negotiated connections. + metrics.incoming_connections_total.inc(); + + "in" + }, }; metrics.connections_opened_total.with_label_values(&[direction]).inc(); @@ -1058,6 +1066,7 @@ impl NetworkBackend for Litep2pNetworkBac NegotiationError::ParseError(_) => "parse-error", NegotiationError::IoError(_) => "io-error", NegotiationError::WebSocket(_) => "webscoket-error", + NegotiationError::BadSignature => "bad-signature", } }; diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index bfd7a60ef9fe..146f2e4add97 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -320,7 +320,7 @@ impl RequestResponseProtocol { &mut self, peer: litep2p::PeerId, request_id: RequestId, - fallback: Option, + _fallback: Option, response: Vec, ) { match self.pending_inbound_responses.remove(&request_id) { @@ -337,10 +337,7 @@ impl RequestResponseProtocol { response.len(), ); - let _ = tx.send(Ok(( - response, - fallback.map_or_else(|| self.protocol.clone(), Into::into), - ))); + let _ = tx.send(Ok((response, self.protocol.clone()))); self.metrics.register_outbound_request_success(started.elapsed()); }, } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index ebece1762f29..6340d1dfb2f4 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -33,7 +33,7 @@ sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index daa805912fb9..70f68436767f 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -42,6 +42,7 @@ log = { workspace = true, default-features = true } futures-util = { workspace = true } rand = { workspace = true, default-features = true } schnellru = { workspace = true } +itertools = { workspace = true } [dev-dependencies] async-trait = { workspace = true } @@ -55,7 +56,7 @@ sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } assert_matches = { workspace = true } pretty_assertions = { workspace = true } diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index b19738304000..a205d0502c93 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -19,7 +19,9 @@ //! API trait of the archive methods. use crate::{ - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, + common::events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, MethodResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -97,11 +99,32 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); + + /// Returns the storage difference between two blocks. + /// + /// # Unstable + /// + /// This method is unstable and can change in minor or patch releases. + #[subscription( + name = "archive_unstable_storageDiff" => "archive_unstable_storageDiffEvent", + unsubscribe = "archive_unstable_storageDiff_stopStorageDiff", + item = ArchiveStorageDiffEvent, + )] + fn archive_unstable_storage_diff( + &self, + hash: Hash, + items: Vec>, + previous_hash: Option, + ); } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index dd6c566a76ed..62e44a016241 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -19,17 +19,29 @@ //! API implementation for `archive`. use crate::{ - archive::{error::Error as ArchiveError, ArchiveApiServer}, - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, - hex_string, MethodResult, + archive::{ + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, + }, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, + }, + hex_string, MethodResult, SubscriptionTaskExecutor, }; use codec::Encode; -use jsonrpsee::core::{async_trait, RpcResult}; +use futures::FutureExt; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + PendingSubscriptionSink, +}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, StorageProvider, }; +use sc_rpc::utils::Subscription; use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, @@ -41,37 +53,15 @@ use sp_runtime::{ }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; -use super::archive_storage::ArchiveStorage; - -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} +use tokio::sync::mpsc; -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. -/// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; +pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The maximum number of queried items allowed for the `archive_storage` at a time. +/// The buffer capacity for each storage query. /// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} +/// This is small because the underlying JSON-RPC server has +/// its down buffer capacity per connection as well. +const STORAGE_QUERY_BUF: usize = 16; /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { @@ -79,13 +69,10 @@ pub struct Archive, Block: BlockT, Client> { client: Arc, /// Backend of the chain. backend: Arc, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -96,17 +83,10 @@ impl, Block: BlockT, Client> Archive { client: Arc, backend: Arc, genesis_hash: GenesisHash, - config: ArchiveConfig, + executor: SubscriptionTaskExecutor, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -236,46 +216,157 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) + }) + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; + + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } + + fn archive_unstable_storage_diff( + &self, + pending: PendingSubscriptionSink, + hash: Block::Hash, + items: Vec>, + previous_hash: Option, + ) { + let storage_client = ArchiveStorageDiff::new(self.client.clone()); + let client = self.client.clone(); + + log::trace!(target: LOG_TARGET, "Storage diff subscription started"); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let previous_hash = if let Some(previous_hash) = previous_hash { + previous_hash + } else { + let Ok(Some(current_header)) = client.header(hash) else { + let message = format!("Block header is not present: {hash}"); + let _ = sink.send(&ArchiveStorageDiffEvent::err(message)).await; + return + }; + *current_header.parent_hash() + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = + storage_client.handle_trie_queries(hash, items, previous_hash, tx.clone()); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } +} + +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); } - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, - }) - }) - .collect::, ArchiveError>>()?; + if sink.send(&event).await.is_err() { + return + } + } + } + } +} - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); + match event { + Ok(None) => continue, - Ok(storage_client.handle_query(hash, items, child_trie)) + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, + + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } + } } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 26e7c299de41..390db765a48f 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -18,112 +18,832 @@ //! Implementation of the `archive_storage` method. -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; +use itertools::Itertools; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; -use crate::common::{ - events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType}, - storage::{IterQueryType, QueryIter, Storage}, +use super::error::Error as ArchiveError; +use crate::{ + archive::archive::LOG_TARGET, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, + }, + storage::Storage, + }, }; +use tokio::sync::mpsc; + +/// Parse hex-encoded string parameter as raw bytes. +/// +/// If the parsing fails, returns an error propagated to the RPC method. +pub fn parse_hex_param(param: String) -> Result, ArchiveError> { + // Methods can accept empty parameters. + if param.is_empty() { + return Ok(Default::default()) + } + + array_bytes::hex2bytes(¶m).map_err(|_| ArchiveError::InvalidParam(param)) +} + +#[derive(Debug, PartialEq, Clone)] +pub struct DiffDetails { + key: StorageKey, + return_type: ArchiveStorageDiffType, + child_trie_key: Option, + child_trie_key_string: Option, +} + +/// The type of storage query. +#[derive(Debug, PartialEq, Clone, Copy)] +enum FetchStorageType { + /// Only fetch the value. + Value, + /// Only fetch the hash. + Hash, + /// Fetch both the value and the hash. + Both, +} -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. +/// The return value of the `fetch_storage` method. +#[derive(Debug, PartialEq, Clone)] +enum FetchedStorage { + /// Storage value under a key. + Value(StorageResult), + /// Storage hash under a key. + Hash(StorageResult), + /// Both storage value and hash under a key. + Both { value: StorageResult, hash: StorageResult }, +} + +pub struct ArchiveStorageDiff { client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, } -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } +impl ArchiveStorageDiff { + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client) } } } -impl ArchiveStorage +impl ArchiveStorageDiff where Block: BlockT + 'static, BE: Backend + 'static, - Client: StorageProvider + 'static, + Client: StorageProvider + Send + Sync + 'static, { - /// Generate the response of the `archive_storage` method. - pub fn handle_query( + /// Fetch the storage from the given key. + fn fetch_storage( &self, hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); + key: StorageKey, + maybe_child_trie: Option, + ty: FetchStorageType, + ) -> Result, String> { + match ty { + FetchStorageType::Value => { + let result = self.client.query_value(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Value)) + }, + + FetchStorageType::Hash => { + let result = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Hash)) + }, + + FetchStorageType::Both => { + let Some(value) = self.client.query_value(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + let Some(hash) = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + Ok(Some(FetchedStorage::Both { value, hash })) + }, + } + } + + /// Check if the key belongs to the provided query items. + /// + /// A key belongs to the query items when: + /// - the provided key is a prefix of the key in the query items. + /// - the query items are empty. + /// + /// Returns an optional `FetchStorageType` based on the query items. + /// If the key does not belong to the query items, returns `None`. + fn belongs_to_query(key: &StorageKey, items: &[DiffDetails]) -> Option { + // User has requested all keys, by default this fallbacks to fetching the value. + if items.is_empty() { + return Some(FetchStorageType::Value) + } + + let mut value = false; + let mut hash = false; - let mut storage_results = Vec::with_capacity(items.len()); for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, + if key.as_ref().starts_with(&item.key.as_ref()) { + match item.return_type { + ArchiveStorageDiffType::Value => value = true, + ArchiveStorageDiffType::Hash => hash = true, + } + } + } + + match (value, hash) { + (true, true) => Some(FetchStorageType::Both), + (true, false) => Some(FetchStorageType::Value), + (false, true) => Some(FetchStorageType::Hash), + (false, false) => None, + } + } + + /// Send the provided result to the `tx` sender. + /// + /// Returns `false` if the sender has been closed. + fn send_result( + tx: &mpsc::Sender, + result: FetchedStorage, + operation_type: ArchiveStorageDiffOperationType, + child_trie_key: Option, + ) -> bool { + let items = match result { + FetchedStorage::Value(storage_result) | FetchedStorage::Hash(storage_result) => + vec![storage_result], + FetchedStorage::Both { value, hash } => vec![value, hash], + }; + + for item in items { + let res = ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: item.key, + result: item.result, + operation_type, + child_trie_key: child_trie_key.clone(), + }); + if tx.blocking_send(res).is_err() { + return false + } + } + + true + } + + fn handle_trie_queries_inner( + &self, + hash: Block::Hash, + previous_hash: Block::Hash, + items: Vec, + tx: &mpsc::Sender, + ) -> Result<(), String> { + // Parse the child trie key as `ChildInfo` and `String`. + let maybe_child_trie = items.first().and_then(|item| item.child_trie_key.clone()); + let maybe_child_trie_str = + items.first().and_then(|item| item.child_trie_key_string.clone()); + + // Iterator over the current block and previous block + // at the same time to compare the keys. This approach effectively + // leverages backpressure to avoid memory consumption. + let keys_iter = self.client.raw_keys_iter(hash, maybe_child_trie.clone())?; + let previous_keys_iter = + self.client.raw_keys_iter(previous_hash, maybe_child_trie.clone())?; + + let mut diff_iter = lexicographic_diff(keys_iter, previous_keys_iter); + + while let Some(item) = diff_iter.next() { + let (operation_type, key) = match item { + Diff::Added(key) => (ArchiveStorageDiffOperationType::Added, key), + Diff::Deleted(key) => (ArchiveStorageDiffOperationType::Deleted, key), + Diff::Equal(key) => (ArchiveStorageDiffOperationType::Modified, key), + }; + + let Some(fetch_type) = Self::belongs_to_query(&key, &items) else { + // The key does not belong the the query items. + continue; + }; + + let maybe_result = match operation_type { + ArchiveStorageDiffOperationType::Added => + self.fetch_storage(hash, key.clone(), maybe_child_trie.clone(), fetch_type)?, + ArchiveStorageDiffOperationType::Deleted => self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )?, + ArchiveStorageDiffOperationType::Modified => { + let Some(storage_result) = self.fetch_storage( hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + let Some(previous_storage_result) = self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + // For modified records we need to check the actual storage values. + if storage_result == previous_storage_result { + continue } + + Some(storage_result) }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } + }; + + if let Some(storage_result) = maybe_result { + if !Self::send_result( + &tx, + storage_result, + operation_type, + maybe_child_trie_str.clone(), + ) { + return Ok(()) + } + } + } + + Ok(()) + } + + /// This method will iterate over the keys of the main trie or a child trie and fetch the + /// given keys. The fetched keys will be sent to the provided `tx` sender to leverage + /// the backpressure mechanism. + pub async fn handle_trie_queries( + &self, + hash: Block::Hash, + items: Vec>, + previous_hash: Block::Hash, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = ArchiveStorageDiff { client: self.client.clone() }; + + tokio::task::spawn_blocking(move || { + // Deduplicate the items. + let mut trie_items = match deduplicate_storage_diff_items(items) { + Ok(items) => items, + Err(error) => { + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error.to_string())); + return }, }; + // Default to using the main storage trie if no items are provided. + if trie_items.is_empty() { + trie_items.push(Vec::new()); + } + log::trace!(target: LOG_TARGET, "Storage diff deduplicated items: {:?}", trie_items); + + for items in trie_items { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: hash={:?}, previous_hash={:?}, items={:?}", + hash, + previous_hash, + items + ); + + let result = this.handle_trie_queries_inner(hash, previous_hash, items, &tx); + + if let Err(error) = result { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending error={:?}", + error, + ); + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error)); + + return + } else { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending storage diff done", + ); + } + } + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::StorageDiffDone); + }) + .await?; + + Ok(()) + } +} + +/// The result of the `lexicographic_diff` method. +#[derive(Debug, PartialEq)] +enum Diff { + Added(T), + Deleted(T), + Equal(T), +} + +/// Compare two iterators lexicographically and return the differences. +fn lexicographic_diff( + mut left: LeftIter, + mut right: RightIter, +) -> impl Iterator> +where + T: Ord, + LeftIter: Iterator, + RightIter: Iterator, +{ + let mut a = left.next(); + let mut b = right.next(); + + core::iter::from_fn(move || match (a.take(), b.take()) { + (Some(a_value), Some(b_value)) => + if a_value < b_value { + b = Some(b_value); + a = left.next(); + + Some(Diff::Added(a_value)) + } else if a_value > b_value { + a = Some(a_value); + b = right.next(); + + Some(Diff::Deleted(b_value)) + } else { + a = left.next(); + b = right.next(); + + Some(Diff::Equal(a_value)) + }, + (Some(a_value), None) => { + a = left.next(); + Some(Diff::Added(a_value)) + }, + (None, Some(b_value)) => { + b = right.next(); + Some(Diff::Deleted(b_value)) + }, + (None, None) => None, + }) +} + +/// Deduplicate the provided items and return a list of `DiffDetails`. +/// +/// Each list corresponds to a single child trie or the main trie. +fn deduplicate_storage_diff_items( + items: Vec>, +) -> Result>, ArchiveError> { + let mut deduplicated: HashMap, Vec> = HashMap::new(); + + for diff_item in items { + // Ensure the provided hex keys are valid before deduplication. + let key = StorageKey(parse_hex_param(diff_item.key)?); + let child_trie_key_string = diff_item.child_trie_key.clone(); + let child_trie_key = diff_item + .child_trie_key + .map(|child_trie_key| parse_hex_param(child_trie_key)) + .transpose()? + .map(ChildInfo::new_default_from_vec); + + let diff_item = DiffDetails { + key, + return_type: diff_item.return_type, + child_trie_key: child_trie_key.clone(), + child_trie_key_string, + }; + + match deduplicated.entry(child_trie_key.clone()) { + Entry::Occupied(mut entry) => { + let mut should_insert = true; + + for existing in entry.get() { + // This points to a different return type. + if existing.return_type != diff_item.return_type { + continue + } + // Keys and return types are identical. + if existing.key == diff_item.key { + should_insert = false; + break + } + + // The following two conditions ensure that we keep the shortest key. + + // The current key is a longer prefix of the existing key. + if diff_item.key.as_ref().starts_with(&existing.key.as_ref()) { + should_insert = false; + break + } + + // The existing key is a longer prefix of the current key. + // We need to keep the current key and remove the existing one. + if existing.key.as_ref().starts_with(&diff_item.key.as_ref()) { + let to_remove = existing.clone(); + entry.get_mut().retain(|item| item != &to_remove); + break; + } + } + + if should_insert { + entry.get_mut().push(diff_item); + } + }, + Entry::Vacant(entry) => { + entry.insert(vec![diff_item]); + }, } + } + + Ok(deduplicated + .into_iter() + .sorted_by_key(|(child_trie_key, _)| child_trie_key.clone()) + .map(|(_, values)| values) + .collect()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn dedup_empty() { + let items = vec![]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert!(result.is_empty()); + } + + #[test] + fn dedup_single() { + let items = vec![ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }; + assert_eq!(result[0][0], expected); + } + + #[test] + fn dedup_with_different_keys() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_keys() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_prefix() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_return_types() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result[0].len(), 1); + assert_eq!(result[1].len(), 1); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }], + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }], + ]; + assert_eq!(result, expected); + } + + #[test] + fn dedup_with_same_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_shorter_key_reverse_order() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_multiple_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + + let result = deduplicate_storage_diff_items(items).unwrap(); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + ], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + ], + ]; + + assert_eq!(result, expected); + } + + #[test] + fn test_lexicographic_diff() { + let left = vec![1, 2, 3, 4, 5]; + let right = vec![2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Equal(2), + Diff::Equal(3), + Diff::Equal(4), + Diff::Equal(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + } + + #[test] + fn test_lexicographic_diff_one_side_empty() { + let left = vec![]; + let right = vec![1, 2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Deleted(1), + Diff::Deleted(2), + Diff::Deleted(3), + Diff::Deleted(4), + Diff::Deleted(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + + let left = vec![1, 2, 3, 4, 5, 6]; + let right = vec![]; - ArchiveStorageResult::ok(storage_results, discarded_items) + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Added(2), + Diff::Added(3), + Diff::Added(4), + Diff::Added(5), + Diff::Added(6), + ]; + assert_eq!(diff, expected); } } diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab..14fa104c113a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 078016f5b3e2..cddaafde6659 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -18,24 +18,25 @@ use crate::{ common::events::{ - ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, - StorageResultType, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; use jsonrpsee::{ - core::EmptyServerParams as EmptyParams, rpc_params, MethodsError as Error, RpcModule, + core::{server::Subscription as RpcSubscription, EmptyServerParams as EmptyParams}, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; +use sc_rpc::testing::TokioTestExecutor; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{Blake2Hasher, Hasher}; @@ -51,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -61,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -78,16 +74,25 @@ fn setup_api( client.clone(), backend, CHAIN_GENESIS, - ArchiveConfig { max_descendant_responses, max_queried_items }, + Arc::new(TokioTestExecutor::default()), ) .into_rpc(); (client, api) } +async fn get_next_event(sub: &mut RpcSubscription) -> T { + let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + event +} + #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -96,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -130,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -164,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -176,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -282,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -341,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -355,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -406,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -443,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -499,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -590,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -611,230 +662,344 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), - }] - ], - ) - .await + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_diff_main_trie() { + let (client, api) = setup_api(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"11".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"22".to_vec())).unwrap(); + builder.push_storage_change(b":AAA".to_vec(), Some(b"222".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], + // Search for items in the main trie: + // - values of keys under ":A" + // - hashes of keys under ":AA" + let items = vec![ + ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem:: { + key: hex_string(b":AA"), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":A"), + result: StorageResultType::Value(hex_string(b"11")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), - }] - ], - ) - .await + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"22")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"22"))), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + // Added key. + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Value(hex_string(b"222")), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"222"))), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_no_changes() { + let (client, api) = setup_api(); + + // Build 2 identical blocks. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); } #[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); +async fn archive_storage_diff_deleted_changes() { + let (client, api) = setup_api(); - // Import a new block with storage changes. + // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) .with_parent_block_number(0) .build() .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"BB")), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_invalid_params() { + let invalid_hash = hex_string(&INVALID_HASH); + let (_, api) = setup_api(); + + // Invalid shape for parameters. + let items: Vec> = Vec::new(); + let err = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params!["123", items.clone(), &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(ref err) if err.code() == crate::chain_head::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + // The shape is right, but the block hash is invalid. + let items: Vec> = Vec::new(); + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&invalid_hash, items.clone(), &invalid_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_matches!(event, + ArchiveStorageDiffEvent::StorageDiffError(ref err) if err.error.contains("Header was not found") + ); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd9863060910..de74145a3f08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 95a7c7fe1832..3e1bd23776d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -784,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_in_mem; + use sc_service::client::new_with_backend; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -811,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c505566d887d..21e8365622a1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,7 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_rpc::testing::TokioTestExecutor; -use sc_service::client::new_in_mem; +use sc_service::client::new_with_backend; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -2547,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TokioTestExecutor::default()), None, None, - Box::new(TokioTestExecutor::default()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index b1627d74c844..44f722c0c61b 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,10 +78,14 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum StorageResultType { /// Fetch the value of the provided key. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -136,22 +158,229 @@ pub struct ArchiveStorageMethodOk { } /// The error of a storage call. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageMethodErr { /// Reported error. pub error: String, } +/// The type of the archive storage difference query. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffType { + /// The result is provided as value of the key. + Value, + /// The result the hash of the value of the key. + Hash, +} + +/// The storage item to query. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffItem { + /// The provided key. + pub key: Key, + /// The type of the storage query. + pub return_type: ArchiveStorageDiffType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The result of a storage difference call. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffMethodResult { + /// Reported results. + pub result: Vec, +} + +/// The result of a storage difference call operation type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffOperationType { + /// The key is added. + Added, + /// The key is modified. + Modified, + /// The key is removed. + Deleted, +} + +/// The result of an individual storage difference key. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffResult { + /// The hex-encoded key of the result. + pub key: String, + /// The result of the query. + #[serde(flatten)] + pub result: StorageResultType, + /// The operation type. + #[serde(rename = "type")] + pub operation_type: ArchiveStorageDiffOperationType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The event generated by the `archive_storageDiff` method. +/// +/// The `archive_storageDiff` can generate the following events: +/// - `storageDiff` event - generated when a `ArchiveStorageDiffResult` is produced. +/// - `storageDiffError` event - generated when an error is produced. +/// - `storageDiffDone` event - generated when the `archive_storageDiff` method completed. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageDiffEvent { + /// The `storageDiff` event. + StorageDiff(ArchiveStorageDiffResult), + /// The `storageDiffError` event. + StorageDiffError(ArchiveStorageMethodErr), + /// The `storageDiffDone` event. + StorageDiffDone, +} + +impl ArchiveStorageDiffEvent { + /// Create a new `ArchiveStorageDiffEvent::StorageDiffError` event. + pub fn err(error: String) -> Self { + Self::StorageDiffError(ArchiveStorageMethodErr { error }) + } + + /// Checks if the event is a `StorageDiffDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDiffDone) + } + + /// Checks if the event is a `StorageDiffError` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageDiffError(_)) + } +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn archive_diff_input() { + // Item with Value. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Value and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + + #[test] + fn archive_diff_output() { + // Item with Value. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","value":"res","type":"added"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"modified"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash, child trie key and removed. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: Some("0x2".into()), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"deleted","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -161,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -175,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 2e24a8da8ca8..a1e34d51530e 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -97,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -120,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -149,6 +148,7 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,53 +199,111 @@ where } } - /// Iterate over at most the provided number of keys. - /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( + /// Raw iterator over the keys. + pub fn raw_keys_iter( &self, - query: QueryIter, hash: Block::Hash, - child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { - let QueryIter { ty, query_key, pagination_start_key } = query; - - let mut keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys( - hash, - child_key.to_owned(), - Some(&query_key), - pagination_start_key.as_ref(), - ) + child_key: Option, + ) -> Result, String> { + let keys_iter = if let Some(child_key) = child_key { + self.client.child_storage_keys(hash, child_key, None, None) } else { - self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; + self.client.storage_keys(hash, None, None) + }; + + keys_iter.map_err(|err| err.to_string()) + } +} - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} - next_pagination_key = Some(key.clone()); +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} - let result = match ty { - IterQueryType::Value => self.query_value(hash, &key, child_key), - IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} - if let Some(value) = result { - ret.push(value); +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } } - } + }) + .await?; - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) + Ok(()) } } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index f2fc65ef2439..3981395d9768 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,8 +20,6 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] -# exposes the client type -test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index ac9371a8941b..a47a05c0a190 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -755,8 +755,7 @@ where client.clone(), backend.clone(), genesis_hash, - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), + task_executor.clone(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index ce5b92551bf2..eddbb9260c05 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,10 +85,8 @@ use std::{ sync::Arc, }; -#[cfg(feature = "test-helpers")] -use { - super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, -}; +use super::call_executor::LocalCallExecutor; +use sp_core::traits::CodeExecutor; type NotificationSinks = Mutex>>; @@ -152,39 +150,6 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } - -/// Create an instance of in-memory client. -#[cfg(feature = "test-helpers")] -pub fn new_in_mem( - backend: Arc>, - executor: E, - genesis_block_builder: G, - prometheus_registry: Option, - telemetry: Option, - spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result< - Client, LocalCallExecutor, E>, Block, RA>, -> -where - E: CodeExecutor + sc_executor::RuntimeVersionOf, - Block: BlockT, - G: BuildGenesisBlock< - Block, - BlockImportOperation = as backend::Backend>::BlockImportOperation, - >, -{ - new_with_backend( - backend, - executor, - genesis_block_builder, - spawn_handle, - prometheus_registry, - telemetry, - config, - ) -} - /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -218,7 +183,6 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index ec77a92f162f..3020b3d296f4 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,5 +56,4 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -#[cfg(feature = "test-helpers")] -pub use self::client::{new_in_mem, new_with_backend}; +pub use self::client::new_with_backend; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 9c01d7288a81..b5a38d875e3b 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,14 +23,11 @@ #![recursion_limit = "1024"] pub mod chain_ops; +pub mod client; pub mod config; pub mod error; mod builder; -#[cfg(feature = "test-helpers")] -pub mod client; -#[cfg(not(feature = "test-helpers"))] -mod client; mod metrics; mod task_manager; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 0edfc5b19314..632b98104f6b 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -31,7 +31,7 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 55bbfcdd8594..ead90c4c65d8 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -2087,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::<_, Block, _, RuntimeApi>( + let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(); diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index bca2c3ffb198..8f4ba922984c 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -25,7 +25,7 @@ substrate-test-runtime-client = { workspace = true } sp-api = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } merkleized-metadata = { workspace = true } -frame-metadata = { features = ["current"], workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 3d0c5b900579..ea635bf3ef77 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,14 +17,13 @@ //! Preimage pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Preimage; +use crate::*; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -43,206 +42,225 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -benchmarks! { +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} + +#[benchmarks] +mod benchmarks { + use super::*; + // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( + assert_ok!(Pallet::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + note_preimage(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + note_preimage(o as T::RuntimeOrigin, preimage); + + assert!(Pallet::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - unnote_preimage { + #[benchmark] + fn unnote_preimage() { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hash); + + assert!(!Pallet::::have_preimage(&hash)); } + // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { + #[benchmark] + fn unnote_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + unnote_preimage(o as T::RuntimeOrigin, hash); + + assert!(!Pallet::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - request_preimage { + #[benchmark] + fn request_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + + let ticket = + TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { + maybe_ticket: Some((noter, ticket)), + count: 1, + maybe_len: Some(MAX_SIZE), + }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { + #[benchmark] + fn request_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + + let s = + RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { + #[benchmark] + fn request_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { + #[benchmark] + fn request_requested_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { + #[benchmark] + fn unrequest_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { + #[benchmark] + fn unrequest_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { + #[benchmark] + fn unrequest_multi_referenced_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - + #[benchmark] + fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hashes); + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index edb2eed9c75a..a3aec7e7546e 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/preimage/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -84,10 +82,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -102,10 +100,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -154,8 +152,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +165,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,8 +178,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -193,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -234,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -247,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,10 +265,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) @@ -295,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -313,10 +311,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,10 +329,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -350,8 +348,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -365,8 +363,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -378,8 +376,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,8 +389,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -404,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -417,8 +415,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -432,8 +430,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -445,8 +443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -458,8 +456,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -478,10 +476,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 81fbbc8cf38e..677ef0e1367f 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] environmental = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.13.0", default-features = false } +polkavm = { version = "0.17.0", default-features = false } bitflags = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 7a5452853d65..798ed8c75a5a 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -18,10 +18,8 @@ anyhow = { workspace = true, default-features = true, optional = true } log = { workspace = true } [build-dependencies] -parity-wasm = { workspace = true } -tempfile = { workspace = true } toml = { workspace = true } -polkavm-linker = { version = "0.14.0" } +polkavm-linker = { version = "0.17.0" } anyhow = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 3472e0846efd..46cd5760ca4e 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -20,7 +20,8 @@ use anyhow::Result; use anyhow::{bail, Context}; use std::{ - cfg, env, fs, + env, fs, + io::Write, path::{Path, PathBuf}, process::Command, }; @@ -82,7 +83,7 @@ fn create_cargo_toml<'a>( entries: impl Iterator, output_dir: &Path, ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; let mut set_dep = |name, path| -> Result<()> { cargo_toml["dependencies"][name]["path"] = toml::Value::String( fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), @@ -108,21 +109,24 @@ fn create_cargo_toml<'a>( let cargo_toml = toml::to_string_pretty(&cargo_toml)?; fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) .with_context(|| format!("Failed to write {cargo_toml:?}"))?; + fs::copy( + fixtures_dir.join("build/_rust-toolchain.toml"), + output_dir.join("rust-toolchain.toml"), + ) + .context("Failed to write toolchain file")?; Ok(()) } -fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { +fn invoke_build(current_dir: &Path) -> Result<()> { let encoded_rustflags = ["-Dwarnings"].join("\x1f"); - let mut build_command = Command::new(env::var("CARGO")?); + let mut build_command = Command::new("cargo"); build_command .current_dir(current_dir) .env_clear() .env("PATH", env::var("PATH").unwrap_or_default()) .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) - .env("RUSTC_BOOTSTRAP", "1") .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) - .env("RUSTUP_TOOLCHAIN", env::var("RUSTUP_TOOLCHAIN").unwrap_or_default()) .args([ "build", "--release", @@ -130,7 +134,7 @@ fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { "-Zbuild-std-features=panic_immediate_abort", ]) .arg("--target") - .arg(target); + .arg(polkavm_linker::target_json_64_path().unwrap()); if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { build_command.env("RUSTUP_TOOLCHAIN", &toolchain); @@ -168,7 +172,7 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result for entry in entries { post_process( &build_dir - .join("target/riscv32emac-unknown-none-polkavm/release") + .join("target/riscv64emac-unknown-none-polkavm/release") .join(entry.name()), &out_dir.join(entry.out_filename()), )?; @@ -177,11 +181,61 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result Ok(()) } +/// Create a directory in the `target` as output directory +fn create_out_dir() -> Result { + let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); + + // this is set in case the user has overriden the target directory + let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { + path.into() + } else { + // otherwise just traverse up from the out dir + let mut out_dir: PathBuf = temp_dir.clone(); + loop { + if !out_dir.pop() { + bail!("Cannot find project root.") + } + if out_dir.join("Cargo.lock").exists() { + break; + } + } + out_dir.join("target") + } + .join("pallet-revive-fixtures"); + + // clean up some leftover symlink from previous versions of this script + if out_dir.exists() && !out_dir.is_dir() { + fs::remove_file(&out_dir)?; + } + fs::create_dir_all(&out_dir).context("Failed to create output directory")?; + + // write the location of the out dir so it can be found later + let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) + .context("Failed to create fixture_location.rs")?; + write!( + file, + r#" + #[allow(dead_code)] + const FIXTURE_DIR: &str = "{0}"; + macro_rules! fixture {{ + ($name: literal) => {{ + include_bytes!(concat!("{0}", "/", $name, ".polkavm")) + }}; + }} + "#, + out_dir.display() + ) + .context("Failed to write to fixture_location.rs")?; + + Ok(out_dir) +} + pub fn main() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let out_dir: PathBuf = env::var("OUT_DIR")?.into(); - let target = fixtures_dir.join("riscv32emac-unknown-none-polkavm.json"); + let out_dir = create_out_dir().context("Cannot determine output directory")?; + let build_dir = out_dir.join("build"); + fs::create_dir_all(&build_dir).context("Failed to create build directory")?; println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); @@ -199,25 +253,9 @@ pub fn main() -> Result<()> { return Ok(()) } - let tmp_dir = tempfile::tempdir()?; - let tmp_dir_path = tmp_dir.path(); - - create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; - invoke_build(&target, tmp_dir_path)?; - - write_output(tmp_dir_path, &out_dir, entries)?; - - #[cfg(unix)] - if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { - let symlink_dir: PathBuf = symlink_dir.into(); - let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); - if symlink_dir.is_symlink() { - fs::remove_file(&symlink_dir) - .with_context(|| format!("Failed to remove_file {symlink_dir:?}"))?; - } - std::os::unix::fs::symlink(&out_dir, &symlink_dir) - .with_context(|| format!("Failed to symlink {out_dir:?} -> {symlink_dir:?}"))?; - } + create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; + invoke_build(&build_dir)?; + write_output(&build_dir, &out_dir, entries)?; Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml similarity index 80% rename from substrate/frame/revive/fixtures/build/Cargo.toml rename to substrate/frame/revive/fixtures/build/_Cargo.toml index 5d0e256e2e73..beaabd83403e 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/_Cargo.toml @@ -4,6 +4,9 @@ publish = false version = "1.0.0" edition = "2021" +# Make sure this is not included into the workspace +[workspace] + # Binary targets are injected dynamically by the build script. [[bin]] @@ -11,7 +14,7 @@ edition = "2021" [dependencies] uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.14.0" } +polkavm-derive = { version = "0.17.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml new file mode 100644 index 000000000000..4c757c708d58 --- /dev/null +++ b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-11-19" +components = ["rust-src"] +profile = "minimal" diff --git a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json deleted file mode 100644 index bbd54cdefbac..000000000000 --- a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "arch": "riscv32", - "cpu": "generic-rv32", - "crt-objects-fallback": "false", - "data-layout": "e-m:e-p:32:32-i64:64-n32-S32", - "eh-frame-header": false, - "emit-debug-gdb-scripts": false, - "features": "+e,+m,+a,+c,+lui-addi-fusion,+fast-unaligned-access,+xtheadcondmov", - "linker": "rust-lld", - "linker-flavor": "ld.lld", - "llvm-abiname": "ilp32e", - "llvm-target": "riscv32", - "max-atomic-width": 32, - "panic-strategy": "abort", - "relocation-model": "pie", - "target-pointer-width": "32", - "singlethread": true, - "pre-link-args": { - "ld": [ - "--emit-relocs", - "--unique", - "--relocatable" - ] - }, - "env": "polkavm" -} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index cc84daec9b59..24f6ee547dc7 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,10 +19,13 @@ extern crate alloc; +// generated file that tells us where to find the fixtures +include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); + /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; @@ -36,12 +39,6 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; - - macro_rules! fixture { - ($name: literal) => { - include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) - }; - } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -61,7 +58,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 012b4bfab9a9..6814add128d9 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -79,6 +79,7 @@ use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, F /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. +/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -127,6 +128,7 @@ struct HostFn { enum HostFnReturn { Unit, U32, + U64, ReturnCode, } @@ -134,8 +136,7 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - Self::U32 => quote! { |ret_val| Some(ret_val) }, - Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, + _ => quote! { |ret_val| Some(ret_val.into()) }, } } @@ -143,6 +144,7 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, + Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -243,7 +245,8 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result"#; + - Result, + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -305,6 +308,7 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), + "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; @@ -339,48 +343,61 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: u32 = 6; - let mut registers_used = 0; - let mut bindings = vec![]; - for (idx, (name, ty)) in param_names.clone().zip(param_types.clone()).enumerate() { + const ALLOWED_REGISTERS: usize = 6; + + // all of them take one register but we truncate them before passing into the function + // it is important to not allow any type which has illegal bit patterns like 'bool' + if !param_types.clone().all(|ty| { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = if ident == "i8" || - ident == "i16" || - ident == "i32" || - ident == "u8" || - ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; - registers_used += size; - if registers_used > ALLOWED_REGISTERS { - return quote! { - let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; - } - } - let this_reg = quote::format_ident!("__a{}__", idx); - let next_reg = quote::format_ident!("__a{}__", idx + 1); - let binding = if size == 1 { + matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") + }) { + panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); + } + + // too many arguments: pass as pointer to a struct in memory + if param_names.clone().count() > ALLOWED_REGISTERS { + let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { quote! { - let #name = #this_reg as #ty; + #name: #ty, } - } else { - quote! { - let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); + }); + return quote! { + #[derive(Default)] + #[repr(C)] + struct Args { + #(#fields)* } - }; - bindings.push(binding); + let Args { #(#param_names,)* } = { + let len = ::core::mem::size_of::(); + let mut args = Args::default(); + let ptr = &mut args as *mut Args as *mut u8; + // Safety + // 1. The struct is initialized at all times. + // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. + // 3. The reference doesn't outlive the args field. + // 4. There is only the single reference to the args field. + // 5. The length of the generated slice is the same as the struct. + let reference = unsafe { + ::core::slice::from_raw_parts_mut(ptr, len) + }; + memory.read_into_buf(__a0__ as _, reference)?; + args + }; + } } + + // otherwise: one argument per register + let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { + let reg = quote::format_ident!("__a{}__", idx); + quote! { + let #name = #reg as #ty; + } + }); quote! { #( #bindings )* } @@ -407,7 +424,7 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { memory: &mut M, __syscall_symbol__: &[u8], __available_api_version__: ApiVersion, - ) -> Result, TrapReason> + ) -> Result, TrapReason> { #impls } diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 7734c8c57209..920318b26f71 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -218,6 +218,8 @@ async fn deploy_and_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn revert_call() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -240,6 +242,8 @@ async fn revert_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn event_logs() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -279,6 +283,8 @@ async fn invalid_transaction() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn native_evm_ratio_works() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index ccea12945054..5b3e886a5628 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::{Decode, MaxEncodedLen}; +use codec::Decode; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,16 +304,6 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// This function is secure and recommended for all input types of fixed size - /// as long as the cost of reading the memory is included in the overall already charged - /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. - pub fn read_as(&mut self) -> Result { - self.memory.read_as(self.input_ptr) - } - /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 40c210304ca2..b5dc9a36065b 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -92,8 +92,12 @@ impl ExtrinsicLike impl ExtrinsicMetadata for UncheckedExtrinsic { - const VERSION: u8 = - generic::UncheckedExtrinsic::, Signature, E::Extension>::VERSION; + const VERSIONS: &'static [u8] = generic::UncheckedExtrinsic::< + Address, + CallOf, + Signature, + E::Extension, + >::VERSIONS; type TransactionExtensions = E::Extension; } diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 64e66382b9ab..5ce96f59c14d 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -129,23 +129,36 @@ pub mod code { Error::::CodeRejected })?; + if !program.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + // This scans the whole program but we only do it once on code deployment. // It is safe to do unchecked math in u32 because the size of the program // was already checked above. - use polkavm::program::ISA32_V1_NoSbrk as ISA; + use polkavm::program::ISA64_V1 as ISA; let mut num_instructions: u32 = 0; let mut max_basic_block_size: u32 = 0; let mut basic_block_size: u32 = 0; for inst in program.instructions(ISA) { + use polkavm::program::Instruction; num_instructions += 1; basic_block_size += 1; if inst.kind.opcode().starts_new_basic_block() { max_basic_block_size = max_basic_block_size.max(basic_block_size); basic_block_size = 0; } - if matches!(inst.kind, polkavm::program::Instruction::invalid) { - log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) + match inst.kind { + Instruction::invalid => { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + Instruction::sbrk(_, _) => { + log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + _ => (), } } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index f10c4f5fddf8..d87ec7112286 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -293,8 +293,15 @@ impl WasmBlob { ) -> Result, ExecError> { let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - let engine = - polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); + config.set_cache_enabled(false); + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + config.set_backend(Some(polkavm::BackendKind::Compiler)); + } + let engine = polkavm::Engine::new(&config).expect( + "on-chain (no_std) use of interpreter is hard coded. + interpreter is available on all plattforms; qed", + ); let mut module_config = polkavm::ModuleConfig::new(); module_config.set_page_size(limits::PAGE_SIZE); @@ -306,6 +313,15 @@ impl WasmBlob { Error::::CodeRejected })?; + // This is checked at deploy time but we also want to reject pre-existing + // 32bit programs. + // TODO: Remove when we reset the test net. + // https://github.com/paritytech/contract-issues/issues/11 + if !module.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 3e2c83db1ebd..7ea518081e23 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -27,7 +27,7 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode}; use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, @@ -126,34 +126,13 @@ pub trait Memory { /// /// # Note /// - /// There must be an extra benchmark for determining the influence of `len` with - /// regard to the overall weight. + /// Make sure to charge a proportional amount of weight if `len` is not fixed. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } - - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// # Only use on fixed size types - /// - /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise - /// this implementation will out of bound access the buffer declared by the guest. Some examples - /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded - /// integer. - /// - /// # Note - /// - /// The weight of reading a fixed value is included in the overall weight of any - /// contract callable function. - fn read_as(&self, ptr: u32) -> Result { - let buf = self.read(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) - .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - Ok(decoded) - } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -164,8 +143,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); - fn write_output(&mut self, output: u32); + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); + fn write_output(&mut self, output: u64); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -214,7 +193,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -225,7 +204,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u32) { + fn write_output(&mut self, output: u64) { self.set_reg(polkavm::Reg::A0, output); } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 0c7461a35d69..b55391dd5d6c 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -20,11 +20,11 @@ codec = { features = [ "max-encoded-len", ], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.14.0" } +[target.'cfg(target_arch = "riscv64")'.dependencies] +polkavm-derive = { version = "0.17.0" } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +default-target = ["riscv64imac-unknown-none-elf"] [features] default = ["scale"] diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 6b3a8b07f040..d3fd4ac8d03e 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -14,8 +14,8 @@ use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; use paste::paste; -#[cfg(target_arch = "riscv32")] -mod riscv32; +#[cfg(target_arch = "riscv64")] +mod riscv64; macro_rules! hash_fn { ( $name:ident, $bytes:literal ) => { diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs similarity index 93% rename from substrate/frame/revive/uapi/src/host/riscv32.rs rename to substrate/frame/revive/uapi/src/host/riscv64.rs index e8b27057ed18..3cba14db6a04 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -26,10 +26,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u32,); + type Regs = (u64,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0) + ReturnCode(a0 as _) } } @@ -207,33 +207,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { - code_hash: *const u8, + code_hash: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - address: *const u8, - output: *mut u8, - output_len: *mut u32, - salt: *const u8, + address: u32, + output: u32, + output_len: u32, + salt: u32, } let args = Args { - code_hash: code_hash.as_ptr(), + code_hash: code_hash.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - address, - output: output_ptr, - output_len: &mut output_len as *mut _, - salt: salt_ptr, + address: address as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, + salt: salt_ptr as _, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -257,31 +257,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - callee: *const u8, + callee: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr(), + callee: callee.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -308,29 +308,29 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - address: *const u8, + address: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - input: *const u8, + deposit_limit: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - address: address.as_ptr(), + address: address.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index e660ce36ef75..91c2543bb719 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -65,6 +65,12 @@ impl From for u32 { } } +impl From for u64 { + fn from(error: ReturnErrorCode) -> Self { + u32::from(error).into() + } +} + define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 3fe5abb81031..61323b70b33d 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -249,13 +249,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()) + return Ok(()); } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e) + return Err(e); } } @@ -332,7 +332,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()) + return Ok(()); }, }; @@ -374,7 +374,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()) + return Ok(()); }, }; @@ -669,7 +669,7 @@ pub mod pallet { // ensure that the migration witness data was correct. if real_size_upper < task.dyn_size { Self::slash(who, deposit)?; - return Ok(().into()) + return Ok(().into()); } Self::deposit_event(Event::::Migrated { @@ -957,6 +957,7 @@ pub mod pallet { mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; use alloc::vec; + use frame_benchmarking::v2::*; use frame_support::traits::fungible::{Inspect, Mutate}; // The size of the key seemingly makes no difference in the read/write time, so we make it @@ -970,8 +971,12 @@ mod benchmarks { stash } - frame_benchmarking::benchmarks! { - continue_migrate { + #[benchmarks] + mod inner_benchmarks { + use super::*; + + #[benchmark] + fn continue_migrate() -> Result<(), BenchmarkError> { // note that this benchmark should migrate nothing, as we only want the overhead weight // of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight` // function. @@ -980,116 +985,151 @@ mod benchmarks { let stash = set_balance_for_deposit::(&caller, null.item); // Allow signed migrations. SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); - }: _(frame_system::RawOrigin::Signed(caller.clone()), null, 0, StateTrieMigration::::migration_process()) - verify { + + #[extrinsic_call] + _( + frame_system::RawOrigin::Signed(caller.clone()), + null, + 0, + StateTrieMigration::::migration_process(), + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - continue_migrate_wrong_witness { + #[benchmark] + fn continue_migrate_wrong_witness() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let bad_witness = MigrationTask { progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), ..Default::default() }; - }: { - assert!( - StateTrieMigration::::continue_migrate( + let bad_witness = MigrationTask { + progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), + ..Default::default() + }; + #[block] + { + assert!(StateTrieMigration::::continue_migrate( frame_system::RawOrigin::Signed(caller).into(), null, 0, bad_witness, ) - .is_err() - ) - } - verify { - assert_eq!(StateTrieMigration::::migration_process(), Default::default()) + .is_err()); + } + + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + + Ok(()) } - migrate_custom_top_success { + #[benchmark] + fn migrate_custom_top_success() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); - }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) - verify { + #[extrinsic_call] + migrate_custom_top( + frame_system::RawOrigin::Signed(caller.clone()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + Ok(()) } - migrate_custom_top_fail { + #[benchmark] + fn migrate_custom_top_fail() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_top( + sp_io::storage::set(b"foo", vec![1u8; 33].as_ref()); + #[block] + { + assert!(StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_ok() - ); + ) + .is_ok()); + + frame_system::Pallet::::assert_last_event( + ::RuntimeEvent::from(crate::Event::Slashed { + who: caller.clone(), + amount: StateTrieMigration::::calculate_deposit_for(1u32), + }) + .into(), + ); + } - frame_system::Pallet::::assert_last_event( - ::RuntimeEvent::from(crate::Event::Slashed { - who: caller.clone(), - amount: StateTrieMigration::::calculate_deposit_for(1u32), - }).into(), - ); - } - verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + + Ok(()) } - migrate_custom_child_success { + #[benchmark] + fn migrate_custom_child_success() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 0); - }: migrate_custom_child( - frame_system::RawOrigin::Signed(caller.clone()), - StateTrieMigration::::childify(Default::default()), - Default::default(), - 0 - ) - verify { + + #[extrinsic_call] + migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()), + StateTrieMigration::::childify(Default::default()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - migrate_custom_child_fail { + #[benchmark] + fn migrate_custom_child_fail() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 1); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::default_child_storage::set(b"top", b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_child( + sp_io::default_child_storage::set(b"top", b"foo", vec![1u8; 33].as_ref()); + + #[block] + { + assert!(StateTrieMigration::::migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()).into(), StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_ok() - ) - } - verify { + ) + .is_ok()); + } assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + Ok(()) } - process_top_key { - let v in 1 .. (4 * 1024 * 1024); - + #[benchmark] + fn process_top_key(v: Linear<1, { 4 * 1024 * 1024 }>) -> Result<(), BenchmarkError> { let value = alloc::vec![1u8; v as usize]; sp_io::storage::set(KEY, &value); - }: { - let data = sp_io::storage::get(KEY).unwrap(); - sp_io::storage::set(KEY, &data); - let _next = sp_io::storage::next_key(KEY); - assert_eq!(data, value); + #[block] + { + let data = sp_io::storage::get(KEY).unwrap(); + sp_io::storage::set(KEY, &data); + let _next = sp_io::storage::next_key(KEY); + assert_eq!(data, value); + } + + Ok(()) } impl_benchmark_test_suite!( @@ -1741,7 +1781,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight) + return (true, weight); } duration += One::one(); now += One::one(); @@ -1768,7 +1808,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break + break; } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index d7da034b3492..d48c80510581 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -28,6 +28,7 @@ scale-info = { features = [ ], workspace = true } frame-metadata = { features = [ "current", + "unstable", ], workspace = true } sp-api = { features = [ "frame-metadata", diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index c12fc20bc8b8..0b3bd5168865 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -113,11 +113,13 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, - version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, + versions: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSIONS.into_iter().map(|ref_version| *ref_version).collect(), address_ty, call_ty, signature_ty, diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 17042c248780..087faf37252d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,7 +466,6 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. - #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -477,6 +476,8 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #outer_event #outer_error diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 2187ee22b395..17ee3130b741 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -19,7 +19,7 @@ static_assertions = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index de7f7eb4bc97..9df1f461bba2 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -53,6 +53,9 @@ parameter_types! { /// Latest stable metadata version used for testing. const LATEST_METADATA_VERSION: u32 = 15; +/// Unstable metadata version. +const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; + pub struct SomeType1; impl From for u64 { fn from(_t: SomeType1) -> Self { @@ -1977,7 +1980,10 @@ fn metadata_at_version() { #[test] fn metadata_versions() { - assert_eq!(vec![14, LATEST_METADATA_VERSION], Runtime::metadata_versions()); + assert_eq!( + vec![14, LATEST_METADATA_VERSION, UNSTABLE_METADATA_VERSION], + Runtime::metadata_versions() + ); } #[test] diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 7523a415d458..a098643abb91 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,34 +80,39 @@ sp_api::decl_runtime_apis! { } } -sp_api::impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } +// Module to emulate having the implementation in a different file. +mod apis { + use super::{Block, BlockT, Runtime}; - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + sp_api::impl_runtime_apis! { + impl crate::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn same_name() {} + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn wild_card(_: u32) {} - } + fn same_name() {} - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() + fn wild_card(_: u32) {} } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } } } } diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 6be396339259..1706f8ca6fbb 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -298,18 +298,14 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 1d21f23eb804..27f6dafa24bf 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -21,6 +21,7 @@ sp-version = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } codec = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } @@ -40,5 +41,5 @@ name = "bench" harness = false [features] -"enable-staging-api" = [] +enable-staging-api = [] disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 890cf6eccdbc..2e5a078cb382 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -309,6 +309,8 @@ fn mock_runtime_api_works_with_advanced() { #[test] fn runtime_api_metadata_matches_version_implemented() { + use sp_metadata_ir::InternalImplRuntimeApis; + let rt = Runtime {}; let runtime_metadata = rt.runtime_metadata(); diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index d7786347dd02..046441104b88 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } scale-info = { features = ["derive"], workspace = true } [features] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index 4bd13b935afd..dc01f7eaadb3 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -30,6 +30,7 @@ mod types; use frame_metadata::RuntimeMetadataPrefixed; pub use types::*; +mod unstable; mod v14; mod v15; @@ -39,23 +40,33 @@ const V14: u32 = 14; /// Metadata V15. const V15: u32 = 15; +/// Unstable metadata V16. +const UNSTABLE_V16: u32 = u32::MAX; + /// Transform the IR to the specified version. /// /// Use [`supported_versions`] to find supported versions. pub fn into_version(metadata: MetadataIR, version: u32) -> Option { // Note: Unstable metadata version is `u32::MAX` until stabilized. match version { - // Latest stable version. + // Version V14. This needs to be around until the + // deprecation of the `Metadata_metadata` runtime call in favor of + // `Metadata_metadata_at_version. V14 => Some(into_v14(metadata)), - // Unstable metadata. + + // Version V15 - latest stable. V15 => Some(into_latest(metadata)), + + // Unstable metadata under `u32::MAX`. + UNSTABLE_V16 => Some(into_unstable(metadata)), + _ => None, } } /// Returns the supported metadata versions. pub fn supported_versions() -> alloc::vec::Vec { - alloc::vec![V14, V15] + alloc::vec![V14, V15, UNSTABLE_V16] } /// Transform the IR to the latest stable metadata version. @@ -70,6 +81,22 @@ pub fn into_v14(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// Transform the IR to unstable metadata version 16. +pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { + let latest: frame_metadata::v16::RuntimeMetadataV16 = metadata.into(); + latest.into() +} + +/// INTERNAL USE ONLY +/// +/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to +/// fetch the runtime api metadata without exploding when there is no runtime api implementation +/// available. +#[doc(hidden)] +pub trait InternalImplRuntimeApis { + fn runtime_metadata(&self) -> alloc::vec::Vec; +} + #[cfg(test)] mod test { use super::*; @@ -81,7 +108,7 @@ mod test { pallets: vec![], extrinsic: ExtrinsicMetadataIR { ty: meta_type::<()>(), - version: 0, + versions: vec![0], address_ty: meta_type::<()>(), call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index 199b692fbd8c..af217ffe16ee 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -170,8 +170,8 @@ pub struct ExtrinsicMetadataIR { /// /// Note: Field used for metadata V14 only. pub ty: T::Type, - /// Extrinsic version. - pub version: u8, + /// Extrinsic versions. + pub versions: Vec, /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. @@ -191,7 +191,7 @@ impl IntoPortable for ExtrinsicMetadataIR { fn into_portable(self, registry: &mut Registry) -> Self::Output { ExtrinsicMetadataIR { ty: registry.register_type(&self.ty), - version: self.version, + versions: self.versions, address_ty: registry.register_type(&self.address_ty), call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), diff --git a/substrate/primitives/metadata-ir/src/unstable.rs b/substrate/primitives/metadata-ir/src/unstable.rs new file mode 100644 index 000000000000..d46ce3ec6a7d --- /dev/null +++ b/substrate/primitives/metadata-ir/src/unstable.rs @@ -0,0 +1,211 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convert the IR to V16 metadata. + +use crate::{ + DeprecationInfoIR, DeprecationStatusIR, OuterEnumsIR, PalletAssociatedTypeMetadataIR, + PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, + PalletStorageMetadataIR, StorageEntryMetadataIR, +}; + +use super::types::{ + ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, +}; + +use frame_metadata::v16::{ + CustomMetadata, DeprecationInfo, DeprecationStatus, ExtrinsicMetadata, OuterEnums, + PalletAssociatedTypeMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, + RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV16, + StorageEntryMetadata, TransactionExtensionMetadata, +}; + +impl From for RuntimeMetadataV16 { + fn from(ir: MetadataIR) -> Self { + RuntimeMetadataV16::new( + ir.pallets.into_iter().map(Into::into).collect(), + ir.extrinsic.into(), + ir.apis.into_iter().map(Into::into).collect(), + ir.outer_enums.into(), + // Substrate does not collect yet the custom metadata fields. + // This allows us to extend the V16 easily. + CustomMetadata { map: Default::default() }, + ) + } +} + +impl From for RuntimeApiMetadata { + fn from(ir: RuntimeApiMetadataIR) -> Self { + RuntimeApiMetadata { + name: ir.name, + methods: ir.methods.into_iter().map(Into::into).collect(), + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodMetadata { + fn from(ir: RuntimeApiMethodMetadataIR) -> Self { + RuntimeApiMethodMetadata { + name: ir.name, + inputs: ir.inputs.into_iter().map(Into::into).collect(), + output: ir.output, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodParamMetadata { + fn from(ir: RuntimeApiMethodParamMetadataIR) -> Self { + RuntimeApiMethodParamMetadata { name: ir.name, ty: ir.ty } + } +} + +impl From for PalletMetadata { + fn from(ir: PalletMetadataIR) -> Self { + PalletMetadata { + name: ir.name, + storage: ir.storage.map(Into::into), + calls: ir.calls.map(Into::into), + event: ir.event.map(Into::into), + constants: ir.constants.into_iter().map(Into::into).collect(), + error: ir.error.map(Into::into), + index: ir.index, + docs: ir.docs, + associated_types: ir.associated_types.into_iter().map(Into::into).collect(), + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletStorageMetadata { + fn from(ir: PalletStorageMetadataIR) -> Self { + PalletStorageMetadata { + prefix: ir.prefix, + entries: ir.entries.into_iter().map(Into::into).collect(), + } + } +} + +impl From for StorageEntryMetadata { + fn from(ir: StorageEntryMetadataIR) -> Self { + StorageEntryMetadata { + name: ir.name, + modifier: ir.modifier.into(), + ty: ir.ty.into(), + default: ir.default, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletAssociatedTypeMetadata { + fn from(ir: PalletAssociatedTypeMetadataIR) -> Self { + PalletAssociatedTypeMetadata { name: ir.name, ty: ir.ty, docs: ir.docs } + } +} + +impl From for PalletErrorMetadata { + fn from(ir: PalletErrorMetadataIR) -> Self { + PalletErrorMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletEventMetadata { + fn from(ir: PalletEventMetadataIR) -> Self { + PalletEventMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletCallMetadata { + fn from(ir: PalletCallMetadataIR) -> Self { + PalletCallMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletConstantMetadata { + fn from(ir: PalletConstantMetadataIR) -> Self { + PalletConstantMetadata { + name: ir.name, + ty: ir.ty, + value: ir.value, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for TransactionExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { + TransactionExtensionMetadata { identifier: ir.identifier, ty: ir.ty, implicit: ir.implicit } + } +} + +impl From for ExtrinsicMetadata { + fn from(ir: ExtrinsicMetadataIR) -> Self { + // Assume version 0 for all extensions. + let indexes = (0..ir.extensions.len()).map(|index| index as u32).collect(); + let transaction_extensions_by_version = [(0, indexes)].iter().cloned().collect(); + + ExtrinsicMetadata { + versions: ir.versions, + address_ty: ir.address_ty, + signature_ty: ir.signature_ty, + transaction_extensions_by_version, + transaction_extensions: ir.extensions.into_iter().map(Into::into).collect(), + } + } +} + +impl From for OuterEnums { + fn from(ir: OuterEnumsIR) -> Self { + OuterEnums { + call_enum_ty: ir.call_enum_ty, + event_enum_ty: ir.event_enum_ty, + error_enum_ty: ir.error_enum_ty, + } + } +} + +impl From for DeprecationStatus { + fn from(ir: DeprecationStatusIR) -> Self { + match ir { + DeprecationStatusIR::NotDeprecated => DeprecationStatus::NotDeprecated, + DeprecationStatusIR::DeprecatedWithoutNote => DeprecationStatus::DeprecatedWithoutNote, + DeprecationStatusIR::Deprecated { since, note } => + DeprecationStatus::Deprecated { since, note }, + } + } +} + +impl From for DeprecationInfo { + fn from(ir: DeprecationInfoIR) -> Self { + match ir { + DeprecationInfoIR::NotDeprecated => DeprecationInfo::NotDeprecated, + DeprecationInfoIR::ItemDeprecated(status) => + DeprecationInfo::ItemDeprecated(status.into()), + DeprecationInfoIR::VariantsDeprecated(btree) => DeprecationInfo::VariantsDeprecated( + btree.into_iter().map(|(key, value)| (key.0, value.into())).collect(), + ), + } + } +} diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index 70e84532add9..f3cb5973f5bd 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -149,9 +149,12 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { + let lowest_supported_version = + ir.versions.iter().min().expect("Metadata V14 supports one version; qed"); + ExtrinsicMetadata { ty: ir.ty, - version: ir.version, + version: *lowest_supported_version, signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index 4b3b6106d27f..ed315a31e6dc 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -100,7 +100,7 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { ExtrinsicMetadata { - version: ir.version, + version: *ir.versions.iter().min().expect("Metadata V15 supports only one version"), address_ty: ir.address_ty, call_ty: ir.call_ty, signature_ty: ir.signature_ty, diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 91ba37451909..d8510a60a789 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -389,8 +389,7 @@ where impl> ExtrinsicMetadata for UncheckedExtrinsic { - // TODO: Expose both version 4 and version 5 in metadata v16. - const VERSION: u8 = LEGACY_EXTRINSIC_FORMAT_VERSION; + const VERSIONS: &'static [u8] = &[LEGACY_EXTRINSIC_FORMAT_VERSION, EXTRINSIC_FORMAT_VERSION]; type TransactionExtensions = Extension; } diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 02bc7adc8ba5..cfcc3e5a354d 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1410,10 +1410,10 @@ impl SignaturePayload for () { /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { - /// The format version of the `Extrinsic`. + /// The format versions of the `Extrinsic`. /// - /// By format is meant the encoded representation of the `Extrinsic`. - const VERSION: u8; + /// By format we mean the encoded representation of the `Extrinsic`. + const VERSIONS: &'static [u8]; /// Transaction extensions attached to this `Extrinsic`. type TransactionExtensions; diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index ebd1eab5980d..a67c91fc5f79 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -29,9 +29,7 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { features = [ - "test-helpers", -], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..96a888052876 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -45,7 +45,7 @@ sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-trie = { workspace = true } sp-transaction-pool = { workspace = true } trie-db = { workspace = true } -sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sc-service = { optional = true, workspace = true } sp-state-machine = { workspace = true } sp-externalities = { workspace = true } diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs index a1d5f282d9f8..3a2d8776d1e1 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs @@ -54,13 +54,15 @@ impl> DynamicRemarkBuilder { log::debug!("Found metadata API version {}.", metadata_api_version); let opaque_metadata = if metadata_api_version > 1 { - let Ok(mut supported_metadata_versions) = api.metadata_versions(genesis) else { + let Ok(supported_metadata_versions) = api.metadata_versions(genesis) else { return Err("Unable to fetch metadata versions".to_string().into()); }; let latest = supported_metadata_versions - .pop() - .ok_or("No metadata version supported".to_string())?; + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or("No stable metadata versions supported".to_string())?; api.metadata_at_version(genesis, latest) .map_err(|e| format!("Unable to fetch metadata: {:?}", e))? diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs index c498da38afb0..3081197dc033 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs @@ -35,14 +35,19 @@ pub fn fetch_latest_metadata_from_code_blob( let opaque_metadata: OpaqueMetadata = match version_result { Ok(supported_versions) => { - let latest_version = Vec::::decode(&mut supported_versions.as_slice()) - .map_err(|e| format!("Unable to decode version list: {e}"))? - .pop() - .ok_or("No metadata versions supported".to_string())?; + let supported_versions = Vec::::decode(&mut supported_versions.as_slice()) + .map_err(|e| format!("Unable to decode version list: {e}"))?; + + let latest_stable = supported_versions + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or("No stable metadata versions supported".to_string())?; let encoded = runtime_caller - .call("Metadata_metadata_at_version", latest_version) + .call("Metadata_metadata_at_version", latest_stable) .map_err(|_| "Unable to fetch metadata from blob".to_string())?; + Option::::decode(&mut encoded.as_slice())? .ok_or_else(|| "Metadata not found".to_string())? }, diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 8f0e8a23e54a..fb15e8619a38 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -35,7 +35,7 @@ sc-executor = { optional = true, workspace = true, default-features = true } sp-core = { optional = true, workspace = true, default-features = true } sp-io = { optional = true, workspace = true, default-features = true } sp-version = { optional = true, workspace = true, default-features = true } -frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], optional = true, workspace = true, default-features = true } codec = { optional = true, workspace = true, default-features = true } array-bytes = { optional = true, workspace = true, default-features = true } sp-tracing = { optional = true, workspace = true, default-features = true }