diff --git a/.github/workflows/cargo-audit.yml b/.github/workflows/cargo-audit.yml new file mode 100644 index 000000000..3197150b1 --- /dev/null +++ b/.github/workflows/cargo-audit.yml @@ -0,0 +1,42 @@ +name: cargo audit +on: + pull_request: + types: + - labeled + - unlabeled + - synchronize +concurrency: + group: cargo-audit-${{ github.ref }} + cancel-in-progress: true + +jobs: + cargo-audit: + name: cargo audit + runs-on: SubtensorCI + if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-cargo-audit') }} + steps: + - name: Check-out repositoroy under $GITHUB_WORKSPACE + uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo apt-get update && + sudo apt-get install -y clang curl libssl-dev llvm libudev-dev protobuf-compiler + + - name: Install Rust Stable + uses: actions-rs/toolchain@v1.0.6 + with: + toolchain: stable + components: rustfmt, clippy + profile: minimal + + - name: Utilize Shared Rust Cache + uses: Swatinem/rust-cache@v2.2.1 + with: + key: ubuntu-latest-${{ env.RUST_BIN_DIR }} + + - name: Install cargo-audit + run: cargo install --version 0.20.1 cargo-audit + + - name: cargo audit + run: cargo audit --ignore RUSTSEC-2024-0336 # rustls issue; wait for upstream to resolve this diff --git a/.github/workflows/check-devnet.yml b/.github/workflows/check-devnet.yml index 2cb586348..8f04d78cf 100644 --- a/.github/workflows/check-devnet.yml +++ b/.github/workflows/check-devnet.yml @@ -39,18 +39,3 @@ jobs: echo "network spec_version: $spec_version" if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi echo "$local_spec_version > $spec_version ✅" - - check-devnet-migrations: - name: check devnet migrations - runs-on: ubuntu-22.04 - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Run Try Runtime Checks - uses: "paritytech/try-runtime-gha@v0.1.0" - with: - runtime-package: "node-subtensor-runtime" - node-uri: "wss://dev.chain.opentensor.ai:443" - checks: "pre-and-post" - extra-args: "--disable-spec-version-check --no-weight-warnings" diff --git a/.github/workflows/check-finney.yml b/.github/workflows/check-finney.yml index 665c9c8a9..947b9a902 100644 --- a/.github/workflows/check-finney.yml +++ b/.github/workflows/check-finney.yml @@ -39,17 +39,3 @@ jobs: echo "network spec_version: $spec_version" if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi echo "$local_spec_version > $spec_version ✅" - - check-finney-migrations: - name: check finney migrations - runs-on: SubtensorCI - steps: - - name: Checkout sources - uses: actions/checkout@v4 - - name: Run Try Runtime Checks - uses: "paritytech/try-runtime-gha@v0.1.0" - with: - runtime-package: "node-subtensor-runtime" - node-uri: "wss://entrypoint-finney.opentensor.ai:443" - checks: "pre-and-post" - extra-args: "--disable-spec-version-check --no-weight-warnings" diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index d1796364b..80d543163 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -208,6 +208,7 @@ jobs: - name: cargo clippy --workspace --all-targets --all-features -- -D warnings run: cargo clippy --workspace --all-targets --all-features -- -D warnings + # runs cargo test --workspace cargo-test: name: cargo test diff --git a/.github/workflows/check-testnet.yml b/.github/workflows/check-testnet.yml index 95277c94a..a869129ab 100644 --- a/.github/workflows/check-testnet.yml +++ b/.github/workflows/check-testnet.yml @@ -39,18 +39,3 @@ jobs: echo "network spec_version: $spec_version" if (( $(echo "$local_spec_version <= $spec_version" | bc -l) )); then echo "$local_spec_version ≯ $spec_version ❌"; exit 1; fi echo "$local_spec_version > $spec_version ✅" - - check-testnet-migrations: - name: check testnet migrations - runs-on: ubuntu-22.04 - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - - name: Run Try Runtime Checks - uses: "paritytech/try-runtime-gha@v0.1.0" - with: - runtime-package: "node-subtensor-runtime" - node-uri: "wss://test.chain.opentensor.ai:443" - checks: "pre-and-post" - extra-args: "--disable-spec-version-check --no-weight-warnings" diff --git a/.github/workflows/try-runtime.yml b/.github/workflows/try-runtime.yml new file mode 100644 index 000000000..174e6db37 --- /dev/null +++ b/.github/workflows/try-runtime.yml @@ -0,0 +1,54 @@ +name: Try Runtime + +on: + pull_request: + branches: [main, devnet-ready, devnet, testnet, finney] + types: [labeled, unlabeled, synchronize] + +env: + CARGO_TERM_COLOR: always + +jobs: + check-devnet: + name: check devnet + runs-on: SubtensorCI + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Run Try Runtime Checks + uses: "paritytech/try-runtime-gha@v0.1.0" + with: + runtime-package: "node-subtensor-runtime" + node-uri: "wss://dev.chain.opentensor.ai:443" + checks: "all" + extra-args: "--disable-spec-version-check --no-weight-warnings" + + check-testnet: + name: check testnet + runs-on: SubtensorCI + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Run Try Runtime Checks + uses: "paritytech/try-runtime-gha@v0.1.0" + with: + runtime-package: "node-subtensor-runtime" + node-uri: "wss://test.chain.opentensor.ai:443" + checks: "all" + extra-args: "--disable-spec-version-check --no-weight-warnings" + + check-finney: + name: check finney + runs-on: SubtensorCI + steps: + - name: Checkout sources + uses: actions/checkout@v4 + - name: Run Try Runtime Checks + uses: "paritytech/try-runtime-gha@v0.1.0" + with: + runtime-package: "node-subtensor-runtime" + node-uri: "wss://archive.chain.opentensor.ai:443" + checks: "all" + extra-args: "--disable-spec-version-check --no-weight-warnings" diff --git a/Cargo.lock b/Cargo.lock index b1536c2b5..7e78684b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -200,7 +200,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -214,18 +214,6 @@ dependencies = [ "ark-std", ] -[[package]] -name = "ark-bls12-377-ext" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c7021f180a0cbea0380eba97c2af3c57074cdaffe0eef7e840e1c9f2841e55" -dependencies = [ - "ark-bls12-377", - "ark-ec", - "ark-models-ext", - "ark-std", -] - [[package]] name = "ark-bls12-381" version = "0.4.0" @@ -238,45 +226,6 @@ dependencies = [ "ark-std", ] -[[package]] -name = "ark-bls12-381-ext" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dc4b3d08f19e8ec06e949712f95b8361e43f1391d94f65e4234df03480631c" -dependencies = [ - "ark-bls12-381", - "ark-ec", - "ark-ff", - "ark-models-ext", - "ark-serialize", - "ark-std", -] - -[[package]] -name = "ark-bw6-761" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e0605daf0cc5aa2034b78d008aaf159f56901d92a52ee4f6ecdfdac4f426700" -dependencies = [ - "ark-bls12-377", - "ark-ec", - "ark-ff", - "ark-std", -] - -[[package]] -name = "ark-bw6-761-ext" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccee5fba47266f460067588ee1bf070a9c760bf2050c1c509982c5719aadb4f2" -dependencies = [ - "ark-bw6-761", - "ark-ec", - "ark-ff", - "ark-models-ext", - "ark-std", -] - [[package]] name = "ark-ec" version = "0.4.2" @@ -291,60 +240,9 @@ dependencies = [ "hashbrown 0.13.2", "itertools 0.10.5", "num-traits", - "rayon", "zeroize", ] -[[package]] -name = "ark-ed-on-bls12-377" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b10d901b9ac4b38f9c32beacedfadcdd64e46f8d7f8e88c1ae1060022cf6f6c6" -dependencies = [ - "ark-bls12-377", - "ark-ec", - "ark-ff", - "ark-std", -] - -[[package]] -name = "ark-ed-on-bls12-377-ext" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524a4fb7540df2e1a8c2e67a83ba1d1e6c3947f4f9342cc2359fc2e789ad731d" -dependencies = [ - "ark-ec", - "ark-ed-on-bls12-377", - "ark-ff", - "ark-models-ext", - "ark-std", -] - -[[package]] -name = "ark-ed-on-bls12-381-bandersnatch" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" -dependencies = [ - "ark-bls12-381", - "ark-ec", - "ark-ff", - "ark-std", -] - -[[package]] -name = "ark-ed-on-bls12-381-bandersnatch-ext" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15185f1acb49a07ff8cbe5f11a1adc5a93b19e211e325d826ae98e98e124346" -dependencies = [ - "ark-ec", - "ark-ed-on-bls12-381-bandersnatch", - "ark-ff", - "ark-models-ext", - "ark-std", -] - [[package]] name = "ark-ff" version = "0.4.2" @@ -388,19 +286,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "ark-models-ext" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9eab5d4b5ff2f228b763d38442adc9b084b0a465409b059fac5c2308835ec2" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", - "derivative", -] - [[package]] name = "ark-poly" version = "0.4.2" @@ -414,35 +299,6 @@ dependencies = [ "hashbrown 0.13.2", ] -[[package]] -name = "ark-scale" -version = "0.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f69c00b3b529be29528a6f2fd5fa7b1790f8bed81b9cdca17e326538545a179" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "ark-secret-scalar" -version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=0fef826#0fef8266d851932ad25d6b41bc4b34d834d1e11d" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-serialize", - "ark-std", - "ark-transcript", - "digest 0.10.7", - "getrandom_or_panic", - "zeroize", -] - [[package]] name = "ark-serialize" version = "0.4.2" @@ -474,20 +330,6 @@ checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand", - "rayon", -] - -[[package]] -name = "ark-transcript" -version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=0fef826#0fef8266d851932ad25d6b41bc4b34d834d1e11d" -dependencies = [ - "ark-ff", - "ark-serialize", - "ark-std", - "digest 0.10.7", - "rand_core", - "sha3", ] [[package]] @@ -560,7 +402,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "synstructure 0.13.1", ] @@ -583,9 +425,15 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + [[package]] name = "async-channel" version = "1.9.0" @@ -629,13 +477,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -651,6 +499,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -668,11 +525,22 @@ dependencies = [ "url", ] +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backtrace" @@ -689,27 +557,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "bandersnatch_vrfs" -version = "0.0.4" -source = "git+https://github.com/w3f/ring-vrf?rev=0fef826#0fef8266d851932ad25d6b41bc4b34d834d1e11d" -dependencies = [ - "ark-bls12-381", - "ark-ec", - "ark-ed-on-bls12-381-bandersnatch", - "ark-ff", - "ark-serialize", - "ark-std", - "dleq_vrf", - "rand_chacha", - "rand_core", - "ring 0.1.0", - "sha2 0.10.8", - "sp-ark-bls12-381", - "sp-ark-ed-on-bls12-381-bandersnatch", - "zeroize", -] - [[package]] name = "base-x" version = "0.2.11" @@ -773,7 +620,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1017,9 +864,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" dependencies = [ "jobserver", "libc", @@ -1164,9 +1011,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -1174,9 +1021,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -1187,14 +1034,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1240,22 +1087,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "common" -version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof?rev=665f5f5#665f5f51af5734c7b6d90b985dd6861d4c5b4752" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", - "ark-std", - "fflonk", - "getrandom_or_panic", - "merlin", - "rand_chacha", -] - [[package]] name = "common-path" version = "1.0.0" @@ -1512,6 +1343,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.20" @@ -1600,7 +1440,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1627,7 +1467,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1644,7 +1484,7 @@ checksum = "98532a60dedaebc4848cb2cba5023337cc9ea3af16a5b062633fabfd9f18fb60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1668,7 +1508,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1679,7 +1519,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1788,7 +1628,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1801,7 +1641,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1890,23 +1730,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", -] - -[[package]] -name = "dleq_vrf" -version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=0fef826#0fef8266d851932ad25d6b41bc4b34d834d1e11d" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-scale", - "ark-secret-scalar", - "ark-serialize", - "ark-std", - "ark-transcript", - "arrayvec", - "zeroize", + "syn 2.0.79", ] [[package]] @@ -1930,12 +1754,18 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.77", + "syn 2.0.79", "termcolor", "toml 0.8.19", "walkdir", ] +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "downcast" version = "0.11.0" @@ -2035,6 +1865,9 @@ name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -2083,7 +1916,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2103,7 +1936,7 @@ checksum = "de0d48a183585823424a4ce1aa132d174a6a81bd540895822eb4c8373a8e49e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2149,11 +1982,31 @@ checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", + "impl-codec", "impl-rlp", "impl-serde", + "scale-info", "tiny-keccak", ] +[[package]] +name = "ethereum" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e04d24d20b8ff2235cffbf242d5092de3aa45f77c5270ddbfadd2778ca13fea" +dependencies = [ + "bytes", + "ethereum-types", + "hash-db", + "hash256-std-hasher", + "parity-scale-codec", + "rlp", + "scale-info", + "serde", + "sha3", + "trie-root", +] + [[package]] name = "ethereum-types" version = "0.14.1" @@ -2162,9 +2015,11 @@ checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ "ethbloom", "fixed-hash", + "impl-codec", "impl-rlp", "impl-serde", "primitive-types", + "scale-info", "uint", ] @@ -2195,6 +2050,64 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "evm" +version = "0.41.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "767f43e9630cc36cf8ff2777cbb0121b055f0d1fd6eaaa13b46a1808f0d0e7e9" +dependencies = [ + "auto_impl", + "environmental", + "ethereum", + "evm-core", + "evm-gasometer", + "evm-runtime", + "log", + "parity-scale-codec", + "primitive-types", + "rlp", + "scale-info", + "serde", + "sha3", +] + +[[package]] +name = "evm-core" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1da6cedc5cedb4208e59467106db0d1f50db01b920920589f8e672c02fdc04f" +dependencies = [ + "parity-scale-codec", + "primitive-types", + "scale-info", + "serde", +] + +[[package]] +name = "evm-gasometer" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dc0eb591abc5cd7b05bef6a036c2bb6c66ab6c5e0c5ce94bfe377ab670b1fd7" +dependencies = [ + "environmental", + "evm-core", + "evm-runtime", + "primitive-types", +] + +[[package]] +name = "evm-runtime" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84bbe09b64ae13a29514048c1bb6fda6374ac0b4f6a1f15a443348ab88ef42cd" +dependencies = [ + "auto_impl", + "environmental", + "evm-core", + "primitive-types", + "sha3", +] + [[package]] name = "exit-future" version = "0.2.0" @@ -2216,7 +2129,7 @@ dependencies = [ "prettyplease 0.2.22", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2237,6 +2150,173 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fc-api" +version = "1.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "async-trait", + "fp-storage", + "parity-scale-codec", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "fc-consensus" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "async-trait", + "fp-consensus", + "fp-rpc", + "sc-consensus", + "sp-api", + "sp-block-builder", + "sp-consensus", + "sp-runtime", + "thiserror", +] + +[[package]] +name = "fc-db" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "async-trait", + "ethereum", + "fc-api", + "fc-storage", + "fp-consensus", + "fp-rpc", + "fp-storage", + "futures", + "kvdb-rocksdb", + "log", + "parity-db", + "parity-scale-codec", + "parking_lot 0.12.3", + "sc-client-api", + "sc-client-db", + "smallvec", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-database", + "sp-runtime", + "sqlx", + "tokio", +] + +[[package]] +name = "fc-mapping-sync" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "fc-db", + "fc-storage", + "fp-consensus", + "fp-rpc", + "futures", + "futures-timer", + "log", + "parking_lot 0.12.3", + "sc-client-api", + "sc-utils", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-runtime", + "tokio", +] + +[[package]] +name = "fc-rpc" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "ethereum", + "ethereum-types", + "evm", + "fc-api", + "fc-mapping-sync", + "fc-rpc-core", + "fc-storage", + "fp-evm", + "fp-rpc", + "fp-storage", + "futures", + "hex", + "jsonrpsee", + "libsecp256k1", + "log", + "pallet-evm", + "parity-scale-codec", + "prometheus", + "rand", + "rlp", + "sc-client-api", + "sc-consensus-aura", + "sc-network", + "sc-network-sync", + "sc-rpc", + "sc-service", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sc-utils", + "schnellru", + "serde", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-aura", + "sp-core", + "sp-externalities", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-storage", + "sp-timestamp", + "substrate-prometheus-endpoint", + "thiserror", + "tokio", +] + +[[package]] +name = "fc-rpc-core" +version = "1.1.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "ethereum", + "ethereum-types", + "jsonrpsee", + "rlp", + "rustc-hex", + "serde", + "serde_json", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", +] + +[[package]] +name = "fc-storage" +version = "1.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "ethereum", + "ethereum-types", + "fp-rpc", + "fp-storage", + "parity-scale-codec", + "sc-client-api", + "sp-api", + "sp-io", + "sp-runtime", + "sp-storage", +] + [[package]] name = "fdlimit" version = "0.3.0" @@ -2257,19 +2337,6 @@ dependencies = [ "subtle 2.6.1", ] -[[package]] -name = "fflonk" -version = "0.1.0" -source = "git+https://github.com/w3f/fflonk#1e854f35e9a65d08b11a86291405cdc95baa0a35" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", - "ark-std", - "merlin", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -2337,66 +2404,181 @@ dependencies = [ ] [[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "float-cmp" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "spin 0.9.8", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "fork-tree" +version = "13.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror", +] + +[[package]] +name = "fp-account" +version = "1.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "hex", + "impl-serde", + "libsecp256k1", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-runtime-interface", + "staging-xcm", +] + +[[package]] +name = "fp-consensus" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ - "num-traits", + "ethereum", + "parity-scale-codec", + "sp-core", + "sp-runtime", ] [[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +name = "fp-dynamic-fee" +version = "1.0.0" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "async-trait", + "sp-core", + "sp-inherents", +] [[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +name = "fp-ethereum" +version = "1.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ - "foreign-types-shared", + "ethereum", + "ethereum-types", + "fp-evm", + "frame-support", + "parity-scale-codec", ] [[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +name = "fp-evm" +version = "3.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "evm", + "frame-support", + "num_enum", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-runtime", +] [[package]] -name = "fork-tree" -version = "12.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +name = "fp-rpc" +version = "3.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ + "ethereum", + "ethereum-types", + "fp-evm", "parity-scale-codec", + "scale-info", + "sp-api", + "sp-core", + "sp-runtime", + "sp-state-machine", ] [[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +name = "fp-self-contained" +version = "1.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ - "percent-encoding", + "frame-support", + "parity-scale-codec", + "scale-info", + "serde", + "sp-runtime", ] [[package]] -name = "forwarded-header-value" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +name = "fp-storage" +version = "2.0.0" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ - "nonempty", - "thiserror", + "parity-scale-codec", + "serde", ] [[package]] @@ -2407,8 +2589,8 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "frame-support-procedural", @@ -2424,15 +2606,15 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-runtime-interface", + "sp-storage", "static_assertions", ] [[package]] name = "frame-benchmarking-cli" -version = "32.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "43.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "Inflector", "array-bytes", @@ -2465,24 +2647,24 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-database", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-storage", "sp-trie", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-wasm-interface", "thiserror", "thousands", ] [[package]] name = "frame-executive" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "aquamarine", "frame-support", @@ -2494,7 +2676,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-tracing", ] [[package]] @@ -2511,8 +2693,8 @@ dependencies = [ [[package]] name = "frame-metadata-hash-extension" -version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.6.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "docify", @@ -2526,8 +2708,8 @@ dependencies = [ [[package]] name = "frame-support" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "aquamarine", "array-bytes", @@ -2550,7 +2732,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-crypto-hashing-proc-macro", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-debug-derive", "sp-genesis-builder", "sp-inherents", "sp-io", @@ -2558,8 +2740,8 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", + "sp-tracing", "sp-weights", "static_assertions", "tt-call", @@ -2567,22 +2749,22 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "30.0.3" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "Inflector", "cfg-expr", "derive-syn-parse", "docify", "expander", - "frame-support-procedural-tools 10.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "frame-support-procedural-tools 13.0.0", "itertools 0.11.0", "macro_magic", "proc-macro-warning 1.0.2", "proc-macro2", "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "syn 2.0.77", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "syn 2.0.79", ] [[package]] @@ -2591,23 +2773,23 @@ version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3363df38464c47a73eb521a4f648bfcc7537a82d70347ef8af3f73b6d019e910" dependencies = [ - "frame-support-procedural-tools-derive 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "frame-support-procedural-tools-derive 11.0.0", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "frame-support-procedural-tools" -version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "13.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ - "frame-support-procedural-tools-derive 11.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "frame-support-procedural-tools-derive 12.0.0", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2618,23 +2800,23 @@ checksum = "68672b9ec6fe72d259d3879dc212c5e42e977588cdac830c76f54d9f492aeb58" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "frame-support-procedural-tools-derive" -version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "12.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "frame-system" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "cfg-if", "docify", @@ -2646,15 +2828,15 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", "sp-version", "sp-weights", ] [[package]] name = "frame-system-benchmarking" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -2667,8 +2849,8 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "parity-scale-codec", @@ -2677,8 +2859,8 @@ dependencies = [ [[package]] name = "frame-try-runtime" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "parity-scale-codec", @@ -2764,6 +2946,17 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.12.3", +] + [[package]] name = "futures-io" version = "0.3.30" @@ -2788,7 +2981,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2983,7 +3176,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3002,7 +3195,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3066,6 +3259,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "hashlink" version = "0.8.4" @@ -3075,6 +3274,15 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "heck" version = "0.4.1" @@ -3234,9 +3442,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3312,9 +3520,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-util", @@ -3323,7 +3531,6 @@ dependencies = [ "hyper 1.4.1", "pin-project-lite", "tokio", - "tower", "tower-service", ] @@ -3505,12 +3712,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -3645,9 +3852,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd1ead9fb95614e8dc5556d12a8681c2f6d352d0c1d3efc8708c7ccbba47bc6" +checksum = "126b48a5acc3c52fbd5381a77898cb60e145123179588a29e7ac48f9c06e401b" dependencies = [ "jsonrpsee-core", "jsonrpsee-proc-macros", @@ -3659,9 +3866,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff79651479f69ada7bda604ef2acf3f1aa50755d97cc36d25ff04c2664f9d96f" +checksum = "b0e503369a76e195b65af35058add0e6900b794a4e9a9316900ddd3a87a80477" dependencies = [ "async-trait", "bytes", @@ -3682,22 +3889,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d4c6bec4909c966f59f52db3655c0e9d4685faae8b49185973d9d7389bb884" +checksum = "fc660a9389e2748e794a40673a4155d501f32db667757cdb80edeff0306b489b" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "jsonrpsee-server" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe2198e5fd96cf2153ecc123364f699b6e2151317ea09c7bf799c43c2fe1415" +checksum = "af6e6c9b6d975edcb443565d648b605f3e85a04ec63aa6941811a8894cc9cded" dependencies = [ "futures-util", "http 1.1.0", @@ -3722,9 +3929,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.4" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531e386460425e49679587871a056f2895a47dade21457324ad1262cd78ef6d9" +checksum = "d8fb16314327cbc94fdf7965ef7e4422509cd5597f76d137bd104eb34aeede67" dependencies = [ "http 1.1.0", "serde", @@ -3734,9 +3941,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa", @@ -3808,9 +4015,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -3859,7 +4066,7 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multiaddr 0.18.1", + "multiaddr 0.18.2", "pin-project", "rw-stream-sink", "thiserror", @@ -3902,7 +4109,7 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.18.1", + "multiaddr 0.18.2", "multihash 0.19.1", "multistream-select", "once_cell", @@ -4053,7 +4260,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "multiaddr 0.18.1", + "multiaddr 0.18.2", "multihash 0.19.1", "once_cell", "quick-protobuf", @@ -4159,7 +4366,7 @@ dependencies = [ "proc-macro-warning 0.4.2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4270,7 +4477,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.4", + "redox_syscall 0.5.7", ] [[package]] @@ -4336,6 +4543,17 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.20" @@ -4418,7 +4636,7 @@ dependencies = [ "futures", "futures-timer", "hex-literal", - "indexmap 2.5.0", + "indexmap 2.6.0", "libc", "mockall 0.12.1", "multiaddr 0.17.1", @@ -4504,18 +4722,18 @@ dependencies = [ [[package]] name = "lz4" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a231296ca742e418c43660cb68e082486ff2538e8db432bc818580f3965025ed" +checksum = "4d1febb2b4a79ddd1980eede06a8f7902197960aa0383ffcfdd62fe723036725" dependencies = [ "lz4-sys", ] [[package]] name = "lz4-sys" -version = "1.11.0" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", @@ -4539,7 +4757,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4553,7 +4771,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4564,7 +4782,7 @@ checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4575,7 +4793,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4726,7 +4944,7 @@ dependencies = [ "c2-chacha", "curve25519-dalek", "either", - "hashlink", + "hashlink 0.8.4", "lioness", "log", "parking_lot 0.12.3", @@ -4789,7 +5007,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4813,9 +5031,9 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -4826,7 +5044,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", "url", ] @@ -4949,7 +5167,7 @@ checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4961,6 +5179,23 @@ dependencies = [ "rand", ] +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "ndarray" version = "0.15.6" @@ -5073,11 +5308,22 @@ checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" name = "node-subtensor" version = "4.0.0-dev" dependencies = [ + "async-trait", "clap", + "fc-api", + "fc-consensus", + "fc-db", + "fc-mapping-sync", + "fc-rpc", + "fc-rpc-core", + "fc-storage", + "fp-dynamic-fee", + "fp-rpc", "frame-benchmarking", "frame-benchmarking-cli", "frame-metadata-hash-extension", "frame-system", + "frame-system-rpc-runtime-api", "futures", "jsonrpsee", "memmap2 0.9.5", @@ -5085,6 +5331,8 @@ dependencies = [ "pallet-commitments", "pallet-transaction-payment", "pallet-transaction-payment-rpc", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", "sc-basic-authorship", "sc-chain-spec", "sc-chain-spec-derive", @@ -5094,10 +5342,12 @@ dependencies = [ "sc-consensus-aura", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", + "sc-consensus-manual-seal", "sc-consensus-slots", "sc-executor", "sc-keystore", "sc-network", + "sc-network-sync", "sc-offchain", "sc-rpc", "sc-rpc-api", @@ -5117,10 +5367,14 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-offchain", "sp-runtime", + "sp-session", "sp-timestamp", + "sp-transaction-pool", "substrate-build-script-utils", "substrate-frame-rpc-system", + "substrate-prometheus-endpoint", "subtensor-custom-rpc", "subtensor-custom-rpc-runtime-api", ] @@ -5129,6 +5383,11 @@ dependencies = [ name = "node-subtensor-runtime" version = "4.0.0-dev" dependencies = [ + "ed25519-dalek", + "fp-account", + "fp-evm", + "fp-rpc", + "fp-self-contained", "frame-benchmarking", "frame-executive", "frame-metadata", @@ -5142,9 +5401,18 @@ dependencies = [ "pallet-admin-utils", "pallet-aura", "pallet-balances", + "pallet-base-fee", "pallet-collective", "pallet-commitments", + "pallet-dynamic-fee", + "pallet-ethereum", + "pallet-evm", + "pallet-evm-chain-id", + "pallet-evm-precompile-modexp", + "pallet-evm-precompile-sha3fips", + "pallet-evm-precompile-simple", "pallet-grandpa", + "pallet-hotfix-sufficients", "pallet-insecure-randomness-collective-flip", "pallet-membership", "pallet-multisig", @@ -5173,9 +5441,9 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", + "sp-storage", + "sp-tracing", "sp-transaction-pool", "sp-version", "substrate-wasm-builder", @@ -5227,6 +5495,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.6" @@ -5271,12 +5553,24 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ + "num-bigint", "num-integer", "num-traits", ] @@ -5301,6 +5595,27 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "object" version = "0.30.4" @@ -5351,9 +5666,12 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] [[package]] name = "opaque-debug" @@ -5390,7 +5708,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5450,8 +5768,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", + "sp-tracing", "sp-weights", "substrate-fixed", "subtensor-macros", @@ -5459,8 +5777,8 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "frame-system", @@ -5475,8 +5793,8 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "frame-system", @@ -5488,58 +5806,171 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "39.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "frame-benchmarking", "frame-support", "frame-system", - "log", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime", +] + +[[package]] +name = "pallet-base-fee" +version = "1.0.0" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "fp-evm", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "pallet-collective" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "subtensor-macros", +] + +[[package]] +name = "pallet-commitments" +version = "4.0.0-dev" +dependencies = [ + "enumflags2", + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "subtensor-macros", +] + +[[package]] +name = "pallet-dynamic-fee" +version = "4.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "fp-dynamic-fee", + "fp-evm", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-inherents", +] + +[[package]] +name = "pallet-ethereum" +version = "4.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "ethereum", + "ethereum-types", + "evm", + "fp-consensus", + "fp-ethereum", + "fp-evm", + "fp-rpc", + "fp-storage", + "frame-support", + "frame-system", + "pallet-evm", "parity-scale-codec", "scale-info", + "sp-io", "sp-runtime", ] [[package]] -name = "pallet-collective" -version = "4.0.0-dev" +name = "pallet-evm" +version = "6.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ + "environmental", + "evm", + "fp-account", + "fp-evm", "frame-benchmarking", "frame-support", "frame-system", + "hash-db", + "hex-literal", + "impl-trait-for-tuples", "log", "parity-scale-codec", "scale-info", "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "subtensor-macros", ] [[package]] -name = "pallet-commitments" -version = "4.0.0-dev" +name = "pallet-evm-chain-id" +version = "1.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" dependencies = [ - "enumflags2", - "frame-benchmarking", "frame-support", "frame-system", - "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core", +] + +[[package]] +name = "pallet-evm-precompile-modexp" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "fp-evm", + "num", +] + +[[package]] +name = "pallet-evm-precompile-sha3fips" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "fp-evm", + "tiny-keccak", +] + +[[package]] +name = "pallet-evm-precompile-simple" +version = "2.0.0-dev" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "fp-evm", + "ripemd", "sp-io", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "subtensor-macros", ] [[package]] name = "pallet-grandpa" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5558,10 +5989,25 @@ dependencies = [ "sp-staking", ] +[[package]] +name = "pallet-hotfix-sufficients" +version = "1.0.0" +source = "git+https://github.com/gztensor/frontier?rev=b8e3025#b8e3025aa30ea65144372bd68d26090c0f31bea2" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-evm", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", +] + [[package]] name = "pallet-insecure-randomness-collective-flip" -version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "26.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "frame-system", @@ -5573,8 +6019,8 @@ dependencies = [ [[package]] name = "pallet-membership" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5589,8 +6035,8 @@ dependencies = [ [[package]] name = "pallet-multisig" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5604,8 +6050,8 @@ dependencies = [ [[package]] name = "pallet-preimage" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5620,8 +6066,8 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5645,14 +6091,14 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", "subtensor-macros", ] [[package]] name = "pallet-safe-mode" -version = "9.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "19.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "frame-benchmarking", @@ -5669,8 +6115,8 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "39.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "frame-benchmarking", @@ -5686,8 +6132,8 @@ dependencies = [ [[package]] name = "pallet-session" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "frame-system", @@ -5736,8 +6182,8 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", + "sp-tracing", "sp-version", "substrate-fixed", "subtensor-macros", @@ -5745,8 +6191,8 @@ dependencies = [ [[package]] name = "pallet-sudo" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "frame-benchmarking", @@ -5760,8 +6206,8 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "frame-benchmarking", @@ -5773,14 +6219,14 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-storage", "sp-timestamp", ] [[package]] name = "pallet-transaction-payment" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-support", "frame-system", @@ -5794,8 +6240,8 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "41.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -5810,8 +6256,8 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5822,8 +6268,8 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5980,7 +6426,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.4", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -6041,9 +6487,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -6052,9 +6498,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664d22978e2815783adbdd2c588b455b1bd625299ce36b2a99881ac9627e6d8d" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" dependencies = [ "pest", "pest_generator", @@ -6062,22 +6508,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d5487022d5d33f4c30d91c22afa240ce2a644e87fe08caad974d4eab6badbe" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "pest_meta" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0091754bbd0ea592c4deb3a122ce8ecbb0753b738aa82bc055fcc2eccc8d8174" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" dependencies = [ "once_cell", "pest", @@ -6091,7 +6537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] @@ -6111,7 +6557,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6138,9 +6584,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polkavm" @@ -6191,7 +6637,7 @@ dependencies = [ "polkavm-common", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6201,7 +6647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6265,9 +6711,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -6341,7 +6787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6409,7 +6855,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6420,7 +6866,7 @@ checksum = "834da187cfe638ae8abb0203f0b33e5ccdb02a28e7199f2f47b3e2754f50edca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6440,7 +6886,7 @@ dependencies = [ "cfg-expr", "derive-syn-parse", "expander", - "frame-support-procedural-tools 10.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "frame-support-procedural-tools 10.0.0", "itertools 0.10.5", "macro_magic", "proc-macro-warning 1.0.2", @@ -6448,7 +6894,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6485,7 +6931,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6547,7 +6993,7 @@ dependencies = [ "prost 0.12.6", "prost-types 0.12.6", "regex", - "syn 2.0.77", + "syn 2.0.79", "tempfile", ] @@ -6574,7 +7020,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6810,9 +7256,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.1.0" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ "bitflags 2.6.0", ] @@ -6866,9 +7312,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -6901,7 +7347,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6931,14 +7377,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -6952,13 +7398,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -6969,9 +7415,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "resolv-conf" @@ -6993,23 +7439,6 @@ dependencies = [ "subtle 2.6.1", ] -[[package]] -name = "ring" -version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof?rev=665f5f5#665f5f51af5734c7b6d90b985dd6861d4c5b4752" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-poly", - "ark-serialize", - "ark-std", - "arrayvec", - "blake2 0.10.6", - "common", - "fflonk", - "merlin", -] - [[package]] name = "ring" version = "0.16.20" @@ -7040,6 +7469,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rlp" version = "0.5.2" @@ -7047,9 +7485,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", + "rlp-derive", "rustc-hex", ] +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -7286,19 +7736,19 @@ dependencies = [ [[package]] name = "sc-allocator" -version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "29.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "log", "sp-core", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-wasm-interface", "thiserror", ] [[package]] name = "sc-basic-authorship" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.45.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "futures", "futures-timer", @@ -7319,8 +7769,8 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.42.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "sp-api", @@ -7334,8 +7784,8 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "docify", @@ -7351,29 +7801,29 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "sp-genesis-builder", "sp-io", "sp-runtime", "sp-state-machine", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-tracing", ] [[package]] name = "sc-chain-spec-derive" -version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "12.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "sc-cli" -version = "0.36.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.47.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "chrono", @@ -7413,8 +7863,8 @@ dependencies = [ [[package]] name = "sc-client-api" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "fnv", "futures", @@ -7429,19 +7879,19 @@ dependencies = [ "sp-consensus", "sp-core", "sp-database", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-runtime", "sp-state-machine", "sp-statement-store", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-storage", "sp-trie", "substrate-prometheus-endpoint", ] [[package]] name = "sc-client-db" -version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "hash-db", "kvdb", @@ -7466,8 +7916,8 @@ dependencies = [ [[package]] name = "sc-consensus" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "futures", @@ -7490,8 +7940,8 @@ dependencies = [ [[package]] name = "sc-consensus-aura" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.45.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "futures", @@ -7517,10 +7967,59 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-babe" +version = "0.45.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "num-bigint", + "num-rational", + "num-traits", + "parity-scale-codec", + "parking_lot 0.12.3", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-telemetry", + "sc-transaction-pool-api", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-consensus-slots", + "sp-core", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "substrate-prometheus-endpoint", + "thiserror", +] + +[[package]] +name = "sc-consensus-epochs" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "fork-tree", + "parity-scale-codec", + "sc-client-api", + "sc-consensus", + "sp-blockchain", + "sp-runtime", +] + [[package]] name = "sc-consensus-grandpa" -version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.30.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "ahash 0.8.11", "array-bytes", @@ -7554,7 +8053,7 @@ dependencies = [ "sp-consensus", "sp-consensus-grandpa", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", @@ -7563,8 +8062,8 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" -version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.30.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "finality-grandpa", "futures", @@ -7581,10 +8080,45 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-manual-seal" +version = "0.46.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "assert_matches", + "async-trait", + "futures", + "futures-timer", + "jsonrpsee", + "log", + "parity-scale-codec", + "sc-client-api", + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-epochs", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "thiserror", +] + [[package]] name = "sc-consensus-slots" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "futures", @@ -7606,8 +8140,8 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.40.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", @@ -7617,44 +8151,44 @@ dependencies = [ "schnellru", "sp-api", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-io", "sp-panic-handler", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-runtime-interface", "sp-trie", "sp-version", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-wasm-interface", "tracing", ] [[package]] name = "sc-executor-common" -version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.35.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "polkavm", "sc-allocator", "sp-maybe-compressed-blob", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-wasm-interface", "thiserror", "wasm-instrument", ] [[package]] name = "sc-executor-polkavm" -version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.32.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "log", "polkavm", "sc-executor-common", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-wasm-interface", ] [[package]] name = "sc-executor-wasmtime" -version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.35.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "anyhow", "cfg-if", @@ -7664,15 +8198,15 @@ dependencies = [ "rustix 0.36.17", "sc-allocator", "sc-executor-common", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-runtime-interface", + "sp-wasm-interface", "wasmtime", ] [[package]] name = "sc-informant" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "console", "futures", @@ -7688,8 +8222,8 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "25.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "33.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "parking_lot 0.12.3", @@ -7702,8 +8236,8 @@ dependencies = [ [[package]] name = "sc-mixnet" -version = "0.4.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.15.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "arrayvec", @@ -7713,7 +8247,7 @@ dependencies = [ "futures-timer", "log", "mixnet", - "multiaddr 0.18.1", + "multiaddr 0.18.2", "parity-scale-codec", "parking_lot 0.12.3", "sc-client-api", @@ -7731,8 +8265,8 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.45.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "async-channel", @@ -7782,8 +8316,8 @@ dependencies = [ [[package]] name = "sc-network-common" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -7800,8 +8334,8 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.45.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "ahash 0.8.11", "futures", @@ -7819,8 +8353,8 @@ dependencies = [ [[package]] name = "sc-network-light" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "async-channel", @@ -7840,8 +8374,8 @@ dependencies = [ [[package]] name = "sc-network-sync" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "async-channel", @@ -7877,8 +8411,8 @@ dependencies = [ [[package]] name = "sc-network-transactions" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "futures", @@ -7896,15 +8430,15 @@ dependencies = [ [[package]] name = "sc-network-types" -version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.12.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "bs58 0.5.1", "ed25519-dalek", "libp2p-identity", "litep2p", "log", - "multiaddr 0.18.1", + "multiaddr 0.18.2", "multihash 0.19.1", "rand", "thiserror", @@ -7913,8 +8447,8 @@ dependencies = [ [[package]] name = "sc-offchain" -version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "40.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "bytes", @@ -7937,7 +8471,7 @@ dependencies = [ "sc-utils", "sp-api", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-keystore", "sp-offchain", "sp-runtime", @@ -7947,8 +8481,8 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.18.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -7956,8 +8490,8 @@ dependencies = [ [[package]] name = "sc-rpc" -version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "40.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "futures", "jsonrpsee", @@ -7988,8 +8522,8 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.44.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -8008,8 +8542,8 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "17.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "dyn-clone", "forwarded-header-value", @@ -8032,8 +8566,8 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.45.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "futures", @@ -8064,8 +8598,8 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.46.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "directories", @@ -8107,12 +8641,12 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-keystore", "sp-runtime", "sp-session", "sp-state-machine", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-storage", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", @@ -8128,8 +8662,8 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.30.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.36.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "log", "parity-scale-codec", @@ -8139,8 +8673,8 @@ dependencies = [ [[package]] name = "sc-sysinfo" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "derive_more", "futures", @@ -8153,15 +8687,15 @@ dependencies = [ "serde", "serde_json", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", ] [[package]] name = "sc-telemetry" -version = "15.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "25.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "chrono", "futures", @@ -8180,8 +8714,8 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "chrono", "console", @@ -8200,7 +8734,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-tracing", "thiserror", "tracing", "tracing-log", @@ -8210,18 +8744,18 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "sc-transaction-pool" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "futures", @@ -8237,9 +8771,9 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", "sp-runtime", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-tracing", "sp-transaction-pool", "substrate-prometheus-endpoint", "thiserror", @@ -8247,8 +8781,8 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "futures", @@ -8263,8 +8797,8 @@ dependencies = [ [[package]] name = "sc-utils" -version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "17.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-channel", "futures", @@ -8464,9 +8998,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -8546,7 +9080,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8563,10 +9097,22 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ + "form_urlencoded", + "itoa", + "ryu", "serde", ] @@ -8595,7 +9141,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8824,8 +9370,8 @@ dependencies = [ [[package]] name = "sp-api" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "hash-db", @@ -8834,10 +9380,10 @@ dependencies = [ "scale-info", "sp-api-proc-macro", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-metadata-ir", "sp-runtime", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-runtime-interface", "sp-state-machine", "sp-trie", "sp-version", @@ -8846,8 +9392,8 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "15.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "20.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "Inflector", "blake2 0.10.6", @@ -8855,13 +9401,13 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "sp-application-crypto" -version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "scale-info", @@ -8872,40 +9418,22 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "26.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "integer-sqrt", "num-traits", "parity-scale-codec", - "scale-info", - "serde", - "static_assertions", -] - -[[package]] -name = "sp-ark-bls12-381" -version = "0.4.2" -source = "git+https://github.com/paritytech/arkworks-substrate#caa2eed74beb885dd07c7db5f916f2281dad818f" -dependencies = [ - "ark-bls12-381-ext", - "sp-crypto-ec-utils", -] - -[[package]] -name = "sp-ark-ed-on-bls12-381-bandersnatch" -version = "0.4.2" -source = "git+https://github.com/paritytech/arkworks-substrate#caa2eed74beb885dd07c7db5f916f2281dad818f" -dependencies = [ - "ark-ed-on-bls12-381-bandersnatch-ext", - "sp-crypto-ec-utils", + "scale-info", + "serde", + "static_assertions", ] [[package]] name = "sp-block-builder" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "sp-api", "sp-inherents", @@ -8914,8 +9442,8 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "futures", "parity-scale-codec", @@ -8933,8 +9461,8 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.40.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "futures", @@ -8948,15 +9476,33 @@ dependencies = [ [[package]] name = "sp-consensus-aura" -version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.40.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "async-trait", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-inherents", + "sp-runtime", + "sp-timestamp", +] + +[[package]] +name = "sp-consensus-babe" +version = "0.40.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "parity-scale-codec", "scale-info", + "serde", "sp-api", "sp-application-crypto", "sp-consensus-slots", + "sp-core", "sp-inherents", "sp-runtime", "sp-timestamp", @@ -8964,8 +9510,8 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" -version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "21.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "finality-grandpa", "log", @@ -8981,8 +9527,8 @@ dependencies = [ [[package]] name = "sp-consensus-slots" -version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.40.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "scale-info", @@ -8992,11 +9538,10 @@ dependencies = [ [[package]] name = "sp-core" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", - "bandersnatch_vrfs", "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", @@ -9023,12 +9568,12 @@ dependencies = [ "secp256k1", "secrecy", "serde", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", "ss58-registry", "substrate-bip39", "thiserror", @@ -9037,26 +9582,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "sp-crypto-ec-utils" -version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" -dependencies = [ - "ark-bls12-377", - "ark-bls12-377-ext", - "ark-bls12-381", - "ark-bls12-381-ext", - "ark-bw6-761", - "ark-bw6-761-ext", - "ark-ec", - "ark-ed-on-bls12-377", - "ark-ed-on-bls12-377-ext", - "ark-ed-on-bls12-381-bandersnatch", - "ark-ed-on-bls12-381-bandersnatch-ext", - "ark-scale", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk)", -] - [[package]] name = "sp-crypto-hashing" version = "0.1.0" @@ -9074,7 +9599,7 @@ dependencies = [ [[package]] name = "sp-crypto-hashing" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "blake2b_simd", "byteorder", @@ -9087,17 +9612,17 @@ dependencies = [ [[package]] name = "sp-crypto-hashing-proc-macro" version = "0.1.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "quote", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "syn 2.0.77", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "syn 2.0.79", ] [[package]] name = "sp-database" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "kvdb", "parking_lot 0.12.3", @@ -9106,47 +9631,27 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "sp-debug-derive" -version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", -] - -[[package]] -name = "sp-externalities" -version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "syn 2.0.79", ] [[package]] name = "sp-externalities" -version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +version = "0.29.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "environmental", "parity-scale-codec", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-storage", ] [[package]] name = "sp-genesis-builder" -version = "0.8.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.15.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "scale-info", @@ -9157,8 +9662,8 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -9170,8 +9675,8 @@ dependencies = [ [[package]] name = "sp-io" -version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "38.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "bytes", "docify", @@ -9183,12 +9688,12 @@ dependencies = [ "rustversion", "secp256k1", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-externalities", "sp-keystore", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-runtime-interface", "sp-state-machine", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-tracing", "sp-trie", "tracing", "tracing-core", @@ -9196,8 +9701,8 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "31.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "39.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "sp-core", "sp-runtime", @@ -9206,19 +9711,19 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.40.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", ] [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "thiserror", "zstd 0.12.4", @@ -9226,8 +9731,8 @@ dependencies = [ [[package]] name = "sp-metadata-ir" -version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -9236,8 +9741,8 @@ dependencies = [ [[package]] name = "sp-mixnet" -version = "0.4.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.12.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "scale-info", @@ -9247,8 +9752,8 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "sp-api", "sp-core", @@ -9258,7 +9763,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "backtrace", "lazy_static", @@ -9267,8 +9772,8 @@ dependencies = [ [[package]] name = "sp-rpc" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "32.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "rustc-hash 1.1.0", "serde", @@ -9277,8 +9782,8 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "31.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "39.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "either", @@ -9296,79 +9801,47 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-io", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", "sp-weights", "tracing", ] [[package]] name = "sp-runtime-interface" -version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" -dependencies = [ - "bytes", - "impl-trait-for-tuples", - "parity-scale-codec", - "polkavm-derive", - "primitive-types", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-runtime-interface-proc-macro 17.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface" -version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +version = "28.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", "polkavm-derive", "primitive-types", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-runtime-interface-proc-macro 17.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-wasm-interface 20.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", "static_assertions", ] [[package]] name = "sp-runtime-interface-proc-macro" -version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" -dependencies = [ - "Inflector", - "expander", - "proc-macro-crate 3.2.0", - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +version = "18.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "sp-session" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "36.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "scale-info", @@ -9381,8 +9854,8 @@ dependencies = [ [[package]] name = "sp-staking" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "36.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -9394,8 +9867,8 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.43.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "hash-db", "log", @@ -9404,7 +9877,7 @@ dependencies = [ "rand", "smallvec", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "sp-panic-handler", "sp-trie", "thiserror", @@ -9414,8 +9887,8 @@ dependencies = [ [[package]] name = "sp-statement-store" -version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "18.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "aes-gcm", "curve25519-dalek", @@ -9428,10 +9901,10 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409)", + "sp-externalities", "sp-runtime", - "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-runtime-interface", "thiserror", "x25519-dalek", ] @@ -9439,41 +9912,24 @@ dependencies = [ [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" - -[[package]] -name = "sp-std" -version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" [[package]] name = "sp-storage" -version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", -] - -[[package]] -name = "sp-storage" -version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +version = "21.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", + "sp-debug-derive", ] [[package]] name = "sp-timestamp" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "parity-scale-codec", @@ -9484,19 +9940,8 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" -dependencies = [ - "parity-scale-codec", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sp-tracing" -version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" +version = "17.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "tracing", @@ -9506,8 +9951,8 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "sp-api", "sp-runtime", @@ -9515,8 +9960,8 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "34.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "async-trait", "parity-scale-codec", @@ -9529,8 +9974,8 @@ dependencies = [ [[package]] name = "sp-trie" -version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "ahash 0.8.11", "hash-db", @@ -9543,7 +9988,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", - "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-externalities", "thiserror", "tracing", "trie-db", @@ -9552,8 +9997,8 @@ dependencies = [ [[package]] name = "sp-version" -version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "37.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9562,26 +10007,26 @@ dependencies = [ "serde", "sp-crypto-hashing-proc-macro", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-std", "sp-version-proc-macro", "thiserror", ] [[package]] name = "sp-version-proc-macro" -version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "14.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "sp-wasm-interface" -version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "21.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -9590,21 +10035,10 @@ dependencies = [ "wasmtime", ] -[[package]] -name = "sp-wasm-interface" -version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#310ef5ce1086affdc522c4d1736211de2a7dd99e" -dependencies = [ - "anyhow", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", -] - [[package]] name = "sp-weights" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "31.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "bounded-collections", "parity-scale-codec", @@ -9612,7 +10046,7 @@ dependencies = [ "serde", "smallvec", "sp-arithmetic", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-debug-derive", ] [[package]] @@ -9626,6 +10060,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spinning_top" @@ -9646,6 +10083,126 @@ dependencies = [ "der", ] +[[package]] +name = "sqlformat" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" +dependencies = [ + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" +dependencies = [ + "atoi", + "byteorder", + "bytes", + "crc", + "crossbeam-queue", + "either", + "event-listener 5.3.1", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashbrown 0.14.5", + "hashlink 0.9.1", + "hex", + "indexmap 2.6.0", + "log", + "memchr", + "native-tls", + "once_cell", + "paste", + "percent-encoding", + "serde", + "sha2 0.10.8", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 2.0.79", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" +dependencies = [ + "dotenvy", + "either", + "heck 0.5.0", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2 0.10.8", + "sqlx-core", + "sqlx-sqlite", + "syn 2.0.79", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "serde_urlencoded", + "sqlx-core", + "tracing", + "url", +] + [[package]] name = "ss58-registry" version = "1.50.0" @@ -9667,6 +10224,25 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "staging-xcm" +version = "14.2.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "array-bytes", + "bounded-collections", + "derivative", + "environmental", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-runtime", + "sp-weights", + "xcm-procedural", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -9765,13 +10341,13 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "substrate-bip39" -version = "0.4.7" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "0.6.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "hmac 0.12.1", "pbkdf2", @@ -9783,7 +10359,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" [[package]] name = "substrate-fixed" @@ -9798,8 +10374,8 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "39.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "docify", "frame-system-rpc-runtime-api", @@ -9819,7 +10395,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "http-body-util", "hyper 1.4.1", @@ -9832,8 +10408,8 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" -version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1#a427d8fb677b62635dfb78a6e530facdd2c362ec" +version = "24.0.1" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" dependencies = [ "array-bytes", "build-helper", @@ -9850,7 +10426,7 @@ dependencies = [ "sp-core", "sp-io", "sp-maybe-compressed-blob", - "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=v1.16.0-rc1)", + "sp-tracing", "sp-version", "strum 0.26.3", "tempfile", @@ -9869,7 +10445,7 @@ dependencies = [ "quote", "rayon", "subtensor-linting", - "syn 2.0.77", + "syn 2.0.79", "walkdir", ] @@ -9905,7 +10481,7 @@ dependencies = [ "proc-macro2", "procedural-fork", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9915,7 +10491,7 @@ dependencies = [ "ahash 0.8.11", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9953,9 +10529,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -9982,7 +10558,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10020,9 +10596,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -10042,12 +10618,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ "rustix 0.38.37", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -10058,22 +10634,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10192,7 +10768,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10278,11 +10854,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -10299,7 +10875,6 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", - "tokio", "tower-layer", "tower-service", "tracing", @@ -10353,7 +10928,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10561,9 +11136,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -10579,9 +11154,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -10600,15 +11175,21 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "unicode_categories" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" [[package]] name = "universal-hash" @@ -10772,7 +11353,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-shared", ] @@ -10806,7 +11387,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11430,9 +12011,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -11502,6 +12083,17 @@ dependencies = [ "time", ] +[[package]] +name = "xcm-procedural" +version = "10.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2409#87971b3e92721bdf10bf40b410eaae779d494ca0" +dependencies = [ + "Inflector", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "xml-rs" version = "0.8.22" @@ -11559,7 +12151,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -11579,7 +12171,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e2af5f69f..8f3e35e03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,9 +46,11 @@ unwrap-used = "deny" manual_inspect = "allow" [workspace.dependencies] +async-trait = "0.1" cargo-husky = { version = "1", default-features = false } clap = "4.5.4" codec = { version = "3.2.2", default-features = false } +ed25519-dalek = { version = "2.1.0", default-features = false, features = ["alloc"] } enumflags2 = "0.7.9" futures = "0.3.30" hex = { version = "0.4", default-features = false } @@ -59,14 +61,15 @@ memmap2 = "0.9.4" ndarray = { version = "0.15.6", default-features = false } parity-util-mem = "0.12.0" rand = "0.8.5" +scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.2", default-features = false } serde = { version = "1.0.199", default-features = false } serde-tuple-vec-map = { version = "1.0.1", default-features = false } serde_bytes = { version = "0.11.14", default-features = false } -serde_json = { version = "1.0.116", default-features = false } +serde_json = { version = "1.0.121", default-features = false } serde_with = { version = "=2.0.0", default-features = false } smallvec = "1.13.2" -litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" } +litep2p = { git = "https://github.com/paritytech/litep2p", tag = "v0.7.0" } syn = { version = "2", features = [ "full", "visit-mut", @@ -80,81 +83,113 @@ walkdir = "2" subtensor-macros = { path = "support/macros" } -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } - -pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } - -sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } - -sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1", default-features = false } - -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } + +pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-insecure-randomness-collective-flip = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-safe-mode = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } + +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-consensus-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-chain-spec-derive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-consensus-slots = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } + +sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sp-genesis-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } + +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } substrate-fixed = { git = "https://github.com/opentensor/substrate-fixed.git", tag = "v0.5.9" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } -substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "v1.16.0-rc1" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } +substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409" } + +sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2409", default-features = false } + +# Frontier +fp-evm = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fp-rpc = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fp-self-contained = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false, features = [ + "serde", +] } +fp-account = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-storage = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-db = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-consensus = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fp-dynamic-fee = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-api = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-rpc = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-rpc-core = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +fc-mapping-sync = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } + +# Frontier FRAME +pallet-base-fee = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-dynamic-fee = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-ethereum = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-evm = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-evm-chain-id = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-evm-precompile-modexp = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-evm-precompile-sha3fips = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-evm-precompile-simple = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } +pallet-hotfix-sufficients = { git = "https://github.com/gztensor/frontier", rev = "b8e3025", default-features = false } + frame-metadata = "16" [profile.release] diff --git a/build.rs b/build.rs index 85388fd6c..7261a28e1 100644 --- a/build.rs +++ b/build.rs @@ -59,6 +59,7 @@ fn main() { } }; + track_lint(ForbidAsPrimitiveConversion::lint(&parsed_file)); track_lint(RequireFreezeStruct::lint(&parsed_file)); track_lint(RequireExplicitPalletIndex::lint(&parsed_file)); }); diff --git a/node/Cargo.toml b/node/Cargo.toml index 3c5c91b92..2aba49c96 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -20,8 +20,10 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-subtensor" [dependencies] +async-trait = { workspace = true } clap = { workspace = true, features = ["derive"] } futures = { workspace = true, features = ["thread-pool"] } +scale-codec = { workspace = true } serde = { workspace = true, features = ["derive"] } # Storage import @@ -52,8 +54,11 @@ sc-client-api = { workspace = true } sp-runtime = { workspace = true } sp-io = { workspace = true } sp-timestamp = { workspace = true } +sp-transaction-pool = { workspace = true, features = ["default"] } sp-inherents = { workspace = true } sp-keyring = { workspace = true } +sp-offchain = { workspace = true } +sp-session = { workspace = true } frame-metadata-hash-extension = { workspace = true } frame-system = { workspace = true } pallet-transaction-payment = { workspace = true } @@ -69,11 +74,29 @@ sp-block-builder = { workspace = true } sc-basic-authorship = { workspace = true } substrate-frame-rpc-system = { workspace = true } pallet-transaction-payment-rpc = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } # These dependencies are used for runtime benchmarking frame-benchmarking = { workspace = true } frame-benchmarking-cli = { workspace = true } +# Needed for Frontier +sc-consensus-manual-seal = { workspace = true } +sc-network-sync = { workspace = true } +substrate-prometheus-endpoint = { workspace = true } + +# Frontier +fc-storage = { workspace = true } +fc-db = { workspace = true } +fc-consensus = { workspace = true } +fp-dynamic-fee = { workspace = true } +fc-api = { workspace = true } +fc-rpc = { workspace = true } +fc-rpc-core = { workspace = true } +fp-rpc = { workspace = true } +fc-mapping-sync = { workspace = true } + # Local Dependencies node-subtensor-runtime = { path = "../runtime" } subtensor-custom-rpc = { path = "../pallets/subtensor/rpc" } @@ -83,7 +106,26 @@ subtensor-custom-rpc-runtime-api = { path = "../pallets/subtensor/runtime-api" } substrate-build-script-utils = { workspace = true } [features] -default = [] +default = [ + "rocksdb", + "sql", + "txpool", +] +sql = [ + "fc-db/sql", + "fc-mapping-sync/sql", +] +rocksdb = [ + "sc-service/rocksdb", + "fc-db/rocksdb", + "fc-mapping-sync/rocksdb", + "fc-rpc/rocksdb" +] +txpool = [ + "fc-rpc/txpool", + "fc-rpc-core/txpool" +] + # Dependencies that are only required if runtime benchmarking should be build. runtime-benchmarks = [ "node-subtensor-runtime/runtime-benchmarks", diff --git a/node/src/benchmarking.rs b/node/src/benchmarking.rs index cf48df62f..a97550071 100644 --- a/node/src/benchmarking.rs +++ b/node/src/benchmarking.rs @@ -2,7 +2,7 @@ //! //! Should only be used for benchmarking as it may break in other contexts. -use crate::service::FullClient; +use crate::service::Client; use node_subtensor_runtime as runtime; use node_subtensor_runtime::check_nonce; @@ -21,12 +21,12 @@ use std::{sync::Arc, time::Duration}; // // Note: Should only be used for benchmarking. pub struct RemarkBuilder { - client: Arc, + client: Arc, } impl RemarkBuilder { // Creates a new [`Self`] from the given client. - pub fn new(client: Arc) -> Self { + pub fn new(client: Arc) -> Self { Self { client } } } @@ -58,14 +58,14 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { // // Note: Should only be used for benchmarking. pub struct TransferKeepAliveBuilder { - client: Arc, + client: Arc, dest: AccountId, value: Balance, } impl TransferKeepAliveBuilder { // Creates a new [`Self`] from the given client. - pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self { + pub fn new(client: Arc, dest: AccountId, value: Balance) -> Self { Self { client, dest, @@ -105,7 +105,7 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { // // Note: Should only be used for benchmarking. pub fn create_benchmark_extrinsic( - client: &FullClient, + client: &Client, sender: sp_core::sr25519::Pair, call: runtime::RuntimeCall, nonce: u32, diff --git a/node/src/cli.rs b/node/src/cli.rs index 2c9c4c9fd..d9744d44d 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -1,3 +1,4 @@ +use crate::ethereum::EthConfiguration; use sc_cli::RunCmd; #[derive(Debug, clap::Parser)] @@ -7,6 +8,13 @@ pub struct Cli { #[clap(flatten)] pub run: RunCmd, + + /// Choose sealing method. + #[arg(long, value_enum, ignore_case = true)] + pub sealing: Option, + + #[command(flatten)] + pub eth: EthConfiguration, } #[allow(clippy::large_enum_variant)] @@ -45,3 +53,13 @@ pub enum Subcommand { // Db meta columns information. ChainInfo(sc_cli::ChainInfoCmd), } + +/// Available Sealing methods. +#[derive(Copy, Clone, Debug, Default, clap::ValueEnum)] +pub enum Sealing { + /// Seal using rpc method. + #[default] + Manual, + /// Seal when transaction is executed. + Instant, +} diff --git a/node/src/client.rs b/node/src/client.rs new file mode 100644 index 000000000..c7196b5a9 --- /dev/null +++ b/node/src/client.rs @@ -0,0 +1,76 @@ +use scale_codec::Codec; +// Substrate +use sc_executor::WasmExecutor; +use sp_runtime::traits::{Block as BlockT, MaybeDisplay}; + +use crate::ethereum::EthCompatRuntimeApiCollection; + +/// Full backend. +pub type FullBackend = sc_service::TFullBackend; +/// Full client. +pub type FullClient = sc_service::TFullClient>; + +/// A set of APIs that every runtime must implement. +pub trait BaseRuntimeApiCollection: + sp_api::ApiExt + + sp_api::Metadata + + sp_block_builder::BlockBuilder + + sp_offchain::OffchainWorkerApi + + sp_session::SessionKeys + + sp_transaction_pool::runtime_api::TaggedTransactionQueue +{ +} + +impl BaseRuntimeApiCollection for Api +where + Block: BlockT, + Api: sp_api::ApiExt + + sp_api::Metadata + + sp_block_builder::BlockBuilder + + sp_offchain::OffchainWorkerApi + + sp_session::SessionKeys + + sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ +} + +/// A set of APIs that Subtensor runtime must implement. +pub trait RuntimeApiCollection< + Block: BlockT, + AuraId: Codec, + AccountId: Codec, + Nonce: Codec, + Balance: Codec + MaybeDisplay, +>: + BaseRuntimeApiCollection + + EthCompatRuntimeApiCollection + + sp_consensus_aura::AuraApi + + sp_consensus_grandpa::GrandpaApi + + frame_system_rpc_runtime_api::AccountNonceApi + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + subtensor_custom_rpc_runtime_api::DelegateInfoRuntimeApi + + subtensor_custom_rpc_runtime_api::NeuronInfoRuntimeApi + + subtensor_custom_rpc_runtime_api::SubnetInfoRuntimeApi + + subtensor_custom_rpc_runtime_api::SubnetRegistrationRuntimeApi +{ +} + +impl + RuntimeApiCollection for Api +where + Block: BlockT, + AuraId: Codec, + AccountId: Codec, + Nonce: Codec, + Balance: Codec + MaybeDisplay, + Api: BaseRuntimeApiCollection + + EthCompatRuntimeApiCollection + + sp_consensus_aura::AuraApi + + sp_consensus_grandpa::GrandpaApi + + frame_system_rpc_runtime_api::AccountNonceApi + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + subtensor_custom_rpc_runtime_api::DelegateInfoRuntimeApi + + subtensor_custom_rpc_runtime_api::NeuronInfoRuntimeApi + + subtensor_custom_rpc_runtime_api::SubnetInfoRuntimeApi + + subtensor_custom_rpc_runtime_api::SubnetRegistrationRuntimeApi, +{ +} diff --git a/node/src/command.rs b/node/src/command.rs index 0f3914239..0839649bd 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -1,23 +1,17 @@ use crate::{ chain_spec, cli::{Cli, Subcommand}, + ethereum::db_config_dir, service, }; +use fc_db::{kv::frontier_database_dir, DatabaseSource}; -#[cfg(feature = "runtime-benchmarks")] -pub use crate::benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}; -#[cfg(feature = "runtime-benchmarks")] -pub use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; -#[cfg(feature = "runtime-benchmarks")] -pub use node_subtensor_runtime::EXISTENTIAL_DEPOSIT; -#[cfg(feature = "runtime-benchmarks")] -pub use sp_keyring::Sr25519Keyring; - +use futures::TryFutureExt; use node_subtensor_runtime::Block; use sc_cli::SubstrateCli; use sc_service::{ config::{ExecutorConfiguration, RpcConfiguration}, - Configuration, PartialComponents, + Configuration, }; impl SubstrateCli for Cli { @@ -69,64 +63,89 @@ pub fn run() -> sc_cli::Result<()> { } Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = service::new_partial(&config)?; + runner.async_run(|mut config| { + let (client, _, import_queue, task_manager, _) = + service::new_chain_ops(&mut config, &cli.eth)?; Ok((cmd.run(client, import_queue), task_manager)) }) } Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - .. - } = service::new_partial(&config)?; + runner.async_run(|mut config| { + let (client, _, _, task_manager, _) = + service::new_chain_ops(&mut config, &cli.eth)?; Ok((cmd.run(client, config.database), task_manager)) }) } Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - .. - } = service::new_partial(&config)?; + runner.async_run(|mut config| { + let (client, _, _, task_manager, _) = + service::new_chain_ops(&mut config, &cli.eth)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) } Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = service::new_partial(&config)?; + runner.async_run(|mut config| { + let (client, _, import_queue, task_manager, _) = + service::new_chain_ops(&mut config, &cli.eth)?; Ok((cmd.run(client, import_queue), task_manager)) }) } Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.database)) + runner.sync_run(|config| { + // Remove Frontier offchain db + let db_config_dir = db_config_dir(&config); + match cli.eth.frontier_backend_type { + crate::ethereum::BackendType::KeyValue => { + let frontier_database_config = match config.database { + DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { + path: frontier_database_dir(&db_config_dir, "db"), + cache_size: 0, + }, + DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { + path: frontier_database_dir(&db_config_dir, "paritydb"), + }, + _ => { + return Err(format!( + "Cannot purge `{:?}` database", + config.database + ) + .into()) + } + }; + cmd.run(frontier_database_config)?; + } + crate::ethereum::BackendType::Sql => { + let db_path = db_config_dir.join("sql"); + match std::fs::remove_dir_all(&db_path) { + Ok(_) => { + println!("{:?} removed.", &db_path); + } + Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => { + eprintln!("{:?} did not exist.", &db_path); + } + Err(err) => { + return Err(format!( + "Cannot purge `{:?}` database: {:?}", + db_path, err, + ) + .into()) + } + }; + } + }; + cmd.run(config.database) + }) } Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - backend, - .. - } = service::new_partial(&config)?; - let aux_revert = Box::new(|client, _, blocks| { + runner.async_run(|mut config| { + let (client, backend, _, task_manager, _) = + service::new_chain_ops(&mut config, &cli.eth)?; + let aux_revert = Box::new(move |client, _, blocks| { sc_consensus_grandpa::revert(client, blocks)?; Ok(()) }); @@ -135,46 +154,42 @@ pub fn run() -> sc_cli::Result<()> { } #[cfg(feature = "runtime-benchmarks")] Some(Subcommand::Benchmark(cmd)) => { + use crate::benchmarking::{ + inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder, + }; + use frame_benchmarking_cli::{ + BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE, + }; + use node_subtensor_runtime::EXISTENTIAL_DEPOSIT; + use sc_service::PartialComponents; + use sp_keyring::Sr25519Keyring; + use sp_runtime::traits::HashingFor; + let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { + let PartialComponents { + client, backend, .. + } = crate::service::new_partial( + &config, + &cli.eth, + crate::service::build_manual_seal_import_queue, + )?; + // This switch needs to be in the client, since the client decides // which sub-commands it wants to support. match cmd { BenchmarkCmd::Pallet(cmd) => { - if !cfg!(feature = "runtime-benchmarks") { - return Err( - "Runtime benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - .into(), - ); - } - - cmd.run_with_spec::, ()>(Some( - config.chain_spec, - )) - } - BenchmarkCmd::Block(cmd) => { - let PartialComponents { client, .. } = service::new_partial(&config)?; - cmd.run(client) + cmd.run_with_spec::, ()>(Some(config.chain_spec)) } - #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => Err( - "Storage benchmarking can be enabled with `--features runtime-benchmarks`." - .into(), - ), - #[cfg(feature = "runtime-benchmarks")] + BenchmarkCmd::Block(cmd) => cmd.run(client), BenchmarkCmd::Storage(cmd) => { - let PartialComponents { - client, backend, .. - } = service::new_partial(&config)?; let db = backend.expose_db(); let storage = backend.expose_storage(); cmd.run(config, client, db, storage) } BenchmarkCmd::Overhead(cmd) => { - let PartialComponents { client, .. } = service::new_partial(&config)?; let ext_builder = RemarkBuilder::new(client.clone()); cmd.run( @@ -186,7 +201,6 @@ pub fn run() -> sc_cli::Result<()> { ) } BenchmarkCmd::Extrinsic(cmd) => { - let PartialComponents { client, .. } = service::new_partial(&config)?; // Register the *Remark* and *TKA* builders. let ext_factory = ExtrinsicFactory(vec![ Box::new(RemarkBuilder::new(client.clone())), @@ -213,18 +227,9 @@ pub fn run() -> sc_cli::Result<()> { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { let config = override_default_heap_pages(config, 60_000); - match config.network.network_backend { - sc_network::config::NetworkBackendType::Libp2p => service::new_full::< - sc_network::NetworkWorker< - node_subtensor_runtime::opaque::Block, - ::Hash, - >, - >(config) - .map_err(sc_cli::Error::Service), - sc_network::config::NetworkBackendType::Litep2p => - service::new_full::(config) - .map_err(sc_cli::Error::Service), - } + service::build_full(config, cli.eth, cli.sealing) + .map_err(Into::into) + .await }) } } diff --git a/node/src/ethereum.rs b/node/src/ethereum.rs new file mode 100644 index 000000000..337013a56 --- /dev/null +++ b/node/src/ethereum.rs @@ -0,0 +1,488 @@ +use crate::rpc::EthDeps; +use fc_rpc::{ + pending::AuraConsensusDataProvider, Debug, DebugApiServer, Eth, EthApiServer, EthConfig, + EthDevSigner, EthFilter, EthFilterApiServer, EthPubSub, EthPubSubApiServer, EthSigner, EthTask, + Net, NetApiServer, Web3, Web3ApiServer, +}; +use fp_rpc::{ConvertTransaction, ConvertTransactionRuntimeApi, EthereumRuntimeRPCApi}; +use futures::future; +use futures::StreamExt; +use jsonrpsee::RpcModule; +use sc_client_api::{ + backend::{Backend, StorageProvider}, + client::BlockchainEvents, + AuxStore, UsageProvider, +}; +use sc_executor::HostFunctions; +use sc_network_sync::SyncingService; +use sc_rpc::SubscriptionTaskExecutor; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{CallApiAt, ConstructRuntimeApi, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_consensus_aura::AuraApi; +use sp_core::H256; +use sp_inherents::CreateInherentDataProviders; +use sp_runtime::traits::Block as BlockT; +use std::path::PathBuf; +use std::time::Duration; +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, +}; + +pub use fc_consensus::FrontierBlockImport; +pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; +/// Frontier DB backend type. +pub use fc_storage::{StorageOverride, StorageOverrideHandler}; + +use crate::client::{FullBackend, FullClient}; + +pub type FrontierBackend = fc_db::Backend; + +/// Avalailable frontier backend types. +#[derive(Debug, Copy, Clone, Default, clap::ValueEnum)] +pub enum BackendType { + /// Either RocksDb or ParityDb as per inherited from the global backend settings. + #[default] + KeyValue, + /// Sql database with custom log indexing. + Sql, +} + +/// The ethereum-compatibility configuration used to run a node. +#[derive(Clone, Debug, clap::Parser)] +pub struct EthConfiguration { + /// Maximum number of logs in a query. + #[arg(long, default_value = "10000")] + pub max_past_logs: u32, + + /// Maximum fee history cache size. + #[arg(long, default_value = "2048")] + pub fee_history_limit: u64, + + #[arg(long)] + pub enable_dev_signer: bool, + + /// The dynamic-fee pallet target gas price set by block author + #[arg(long, default_value = "1")] + pub target_gas_price: u64, + + /// Maximum allowed gas limit will be `block.gas_limit * execute_gas_limit_multiplier` + /// when using eth_call/eth_estimateGas. + #[arg(long, default_value = "10")] + pub execute_gas_limit_multiplier: u64, + + /// Size in bytes of the LRU cache for block data. + #[arg(long, default_value = "50")] + pub eth_log_block_cache: usize, + + /// Size in bytes of the LRU cache for transactions statuses data. + #[arg(long, default_value = "50")] + pub eth_statuses_cache: usize, + + /// Sets the frontier backend type (KeyValue or Sql) + #[arg(long, value_enum, ignore_case = true, default_value_t = BackendType::default())] + pub frontier_backend_type: BackendType, + + // Sets the SQL backend's pool size. + #[arg(long, default_value = "100")] + pub frontier_sql_backend_pool_size: u32, + + /// Sets the SQL backend's query timeout in number of VM ops. + #[arg(long, default_value = "10000000")] + pub frontier_sql_backend_num_ops_timeout: u32, + + /// Sets the SQL backend's auxiliary thread limit. + #[arg(long, default_value = "4")] + pub frontier_sql_backend_thread_count: u32, + + /// Sets the SQL backend's query timeout in number of VM ops. + /// Default value is 200MB. + #[arg(long, default_value = "209715200")] + pub frontier_sql_backend_cache_size: u64, +} + +pub fn db_config_dir(config: &Configuration) -> PathBuf { + config.base_path.config_dir(config.chain_spec.id()) +} + +pub struct FrontierPartialComponents { + pub filter_pool: Option, + pub fee_history_cache: FeeHistoryCache, + pub fee_history_cache_limit: FeeHistoryCacheLimit, +} + +pub fn new_frontier_partial( + config: &EthConfiguration, +) -> Result { + Ok(FrontierPartialComponents { + filter_pool: Some(Arc::new(Mutex::new(BTreeMap::new()))), + fee_history_cache: Arc::new(Mutex::new(BTreeMap::new())), + fee_history_cache_limit: config.fee_history_limit, + }) +} + +/// A set of APIs that ethereum-compatible runtimes must implement. +pub trait EthCompatRuntimeApiCollection: + sp_api::ApiExt + + fp_rpc::ConvertTransactionRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi +{ +} + +impl EthCompatRuntimeApiCollection for Api +where + Block: BlockT, + Api: sp_api::ApiExt + + fp_rpc::ConvertTransactionRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi, +{ +} + +#[allow(clippy::too_many_arguments)] +pub async fn spawn_frontier_tasks( + task_manager: &TaskManager, + client: Arc>, + backend: Arc>, + frontier_backend: Arc>>, + filter_pool: Option, + storage_override: Arc>, + fee_history_cache: FeeHistoryCache, + fee_history_cache_limit: FeeHistoryCacheLimit, + sync: Arc>, + pubsub_notification_sinks: Arc< + fc_mapping_sync::EthereumBlockNotificationSinks< + fc_mapping_sync::EthereumBlockNotification, + >, + >, +) where + B: BlockT, + RA: ConstructRuntimeApi>, + RA: Send + Sync + 'static, + RA::RuntimeApi: EthCompatRuntimeApiCollection, + HF: HostFunctions + 'static, +{ + // Spawn main mapping sync worker background task. + match &*frontier_backend { + fc_db::Backend::KeyValue(b) => { + task_manager.spawn_essential_handle().spawn( + "frontier-mapping-sync-worker", + Some("frontier"), + fc_mapping_sync::kv::MappingSyncWorker::new( + client.import_notification_stream(), + Duration::new(6, 0), + client.clone(), + backend, + storage_override.clone(), + b.clone(), + 3, + 0u32.into(), + fc_mapping_sync::SyncStrategy::Normal, + sync, + pubsub_notification_sinks, + ) + .for_each(|()| future::ready(())), + ); + } + fc_db::Backend::Sql(b) => { + task_manager.spawn_essential_handle().spawn_blocking( + "frontier-mapping-sync-worker", + Some("frontier"), + fc_mapping_sync::sql::SyncWorker::run( + client.clone(), + backend, + b.clone(), + client.import_notification_stream(), + fc_mapping_sync::sql::SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(30), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + fc_mapping_sync::SyncStrategy::Parachain, + sync, + pubsub_notification_sinks, + ), + ); + } + } + + // Spawn Frontier EthFilterApi maintenance task. + if let Some(filter_pool) = filter_pool { + // Each filter is allowed to stay in the pool for 100 blocks. + const FILTER_RETAIN_THRESHOLD: u64 = 100; + task_manager.spawn_essential_handle().spawn( + "frontier-filter-pool", + Some("frontier"), + EthTask::filter_pool_task(client.clone(), filter_pool, FILTER_RETAIN_THRESHOLD), + ); + } + + // Spawn Frontier FeeHistory cache maintenance task. + task_manager.spawn_essential_handle().spawn( + "frontier-fee-history", + Some("frontier"), + EthTask::fee_history_task( + client, + storage_override, + fee_history_cache, + fee_history_cache_limit, + ), + ); +} + +fn extend_rpc_aet_api( + io: &mut RpcModule<()>, + deps: &EthDeps, +) -> Result<(), Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + Clone + 'static, + CIDP: CreateInherentDataProviders + Send + Clone + 'static, + EC: EthConfig, +{ + let mut signers = Vec::new(); + if deps.enable_dev_signer { + signers.push(Box::new(EthDevSigner::new()) as Box); + } + + io.merge( + Eth::::new( + deps.client.clone(), + deps.pool.clone(), + deps.graph.clone(), + deps.converter.clone(), + deps.sync.clone(), + signers, + deps.storage_override.clone(), + deps.frontier_backend.clone(), + deps.is_authority, + deps.block_data_cache.clone(), + deps.fee_history_cache.clone(), + deps.fee_history_cache_limit, + deps.execute_gas_limit_multiplier, + deps.forced_parent_hashes.clone(), + deps.pending_create_inherent_data_providers.clone(), + Some(Box::new(AuraConsensusDataProvider::new( + deps.client.clone(), + ))), + ) + .replace_config::() + .into_rpc(), + )?; + Ok(()) +} + +fn extend_rpc_eth_filter( + io: &mut RpcModule<()>, + deps: &EthDeps, +) -> Result<(), Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + Clone + 'static, + CIDP: CreateInherentDataProviders + Send + Clone + 'static, +{ + if let Some(filter_pool) = deps.filter_pool.clone() { + io.merge( + EthFilter::new( + deps.client.clone(), + deps.frontier_backend.clone(), + deps.graph.clone(), + filter_pool, + 500_usize, // max stored filters + deps.max_past_logs, + deps.block_data_cache.clone(), + ) + .into_rpc(), + )?; + } + Ok(()) +} + +// Function for EthPubSub merge +fn extend_rpc_eth_pubsub( + io: &mut RpcModule<()>, + deps: &EthDeps, + subscription_task_executor: SubscriptionTaskExecutor, + pubsub_notification_sinks: Arc< + fc_mapping_sync::EthereumBlockNotificationSinks< + fc_mapping_sync::EthereumBlockNotification, + >, + >, +) -> Result<(), Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + 'static, +{ + io.merge( + EthPubSub::new( + deps.pool.clone(), + deps.client.clone(), + deps.sync.clone(), + subscription_task_executor, + deps.storage_override.clone(), + pubsub_notification_sinks, + ) + .into_rpc(), + )?; + Ok(()) +} + +fn extend_rpc_net( + io: &mut RpcModule<()>, + deps: &EthDeps, +) -> Result<(), Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + 'static, +{ + io.merge( + Net::new( + deps.client.clone(), + deps.network.clone(), + true, // Whether to format the `peer_count` response as Hex (default) or not. + ) + .into_rpc(), + )?; + Ok(()) +} + +fn extend_rpc_web3( + io: &mut RpcModule<()>, + deps: &EthDeps, +) -> Result<(), Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + 'static, +{ + io.merge(Web3::new(deps.client.clone()).into_rpc())?; + Ok(()) +} + +fn extend_rpc_debug( + io: &mut RpcModule<()>, + deps: &EthDeps, +) -> Result<(), Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + 'static, +{ + io.merge( + Debug::new( + deps.client.clone(), + deps.frontier_backend.clone(), + deps.storage_override.clone(), + deps.block_data_cache.clone(), + ) + .into_rpc(), + )?; + Ok(()) +} + +/// Extend RpcModule with Eth RPCs +pub fn create_eth( + mut io: RpcModule<()>, + deps: EthDeps, + subscription_task_executor: SubscriptionTaskExecutor, + pubsub_notification_sinks: Arc< + fc_mapping_sync::EthereumBlockNotificationSinks< + fc_mapping_sync::EthereumBlockNotification, + >, + >, +) -> Result, Box> +where + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: AuraApi + + BlockBuilderApi + + ConvertTransactionRuntimeApi + + EthereumRuntimeRPCApi, + C: HeaderBackend + HeaderMetadata, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider + 'static, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CT: ConvertTransaction<::Extrinsic> + Send + Sync + Clone + 'static, + CIDP: CreateInherentDataProviders + Send + Clone + 'static, + EC: EthConfig, +{ + extend_rpc_aet_api::(&mut io, &deps)?; + extend_rpc_eth_filter::(&mut io, &deps)?; + extend_rpc_eth_pubsub::( + &mut io, + &deps, + subscription_task_executor, + pubsub_notification_sinks, + )?; + extend_rpc_net::(&mut io, &deps)?; + extend_rpc_web3::(&mut io, &deps)?; + extend_rpc_debug::(&mut io, &deps)?; + + Ok(io) +} diff --git a/node/src/lib.rs b/node/src/lib.rs index f117b8aae..81cae5145 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -1,3 +1,6 @@ pub mod chain_spec; +pub mod cli; +pub mod client; +pub mod ethereum; pub mod rpc; pub mod service; diff --git a/node/src/main.rs b/node/src/main.rs index a79d48b1b..bb8cd8e8b 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -7,7 +7,9 @@ mod service; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; mod cli; +mod client; mod command; +mod ethereum; mod rpc; fn main() -> sc_cli::Result<()> { diff --git a/node/src/rpc.rs b/node/src/rpc.rs index 7563bf834..4f2063215 100644 --- a/node/src/rpc.rs +++ b/node/src/rpc.rs @@ -5,62 +5,125 @@ #![warn(missing_docs)] -use std::sync::Arc; +use std::{collections::BTreeMap, sync::Arc}; +use futures::channel::mpsc; + +use crate::{client::RuntimeApiCollection, ethereum::create_eth}; +pub use fc_rpc::EthBlockDataCacheTask; +pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; +use fc_storage::StorageOverride; use jsonrpsee::RpcModule; -use node_subtensor_runtime::{opaque::Block, AccountId, Balance, BlockNumber, Hash, Index}; -use sc_consensus_grandpa::FinalityProofProvider; +use node_subtensor_runtime::{AccountId, Balance, Hash, Nonce}; +use sc_client_api::{ + backend::{Backend, StorageProvider}, + client::BlockchainEvents, + AuxStore, UsageProvider, +}; +use sc_consensus_manual_seal::EngineCommand; +use sc_network::service::traits::NetworkService; +use sc_network_sync::SyncingService; +use sc_rpc::SubscriptionTaskExecutor; +use sc_transaction_pool::{ChainApi, Pool}; use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; +use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::H256; +use sp_inherents::CreateInherentDataProviders; +use sp_runtime::traits::Block as BlockT; -/// Dependencies for GRANDPA -pub struct GrandpaDeps { - /// Voting round info. - pub shared_voter_state: sc_consensus_grandpa::SharedVoterState, - /// Authority set info. - pub shared_authority_set: sc_consensus_grandpa::SharedAuthoritySet, - /// Receives notifications about justification events from Grandpa. - pub justification_stream: sc_consensus_grandpa::GrandpaJustificationStream, - /// Executor to drive the subscription manager in the Grandpa RPC handler. - pub subscription_executor: sc_rpc::SubscriptionTaskExecutor, - /// Finality proof provider. - pub finality_provider: Arc>, +/// Extra dependencies for Ethereum compatibility. +pub struct EthDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Graph pool instance. + pub graph: Arc>, + /// Ethereum transaction converter. + pub converter: Option, + /// The Node authority flag + pub is_authority: bool, + /// Whether to enable dev signer + pub enable_dev_signer: bool, + /// Network service + pub network: Arc, + /// Chain syncing service + pub sync: Arc>, + /// Frontier Backend. + pub frontier_backend: Arc>, + /// Ethereum data access overrides. + pub storage_override: Arc>, + /// Cache for Ethereum block data. + pub block_data_cache: Arc>, + /// EthFilterApi pool. + pub filter_pool: Option, + /// Maximum number of logs in a query. + pub max_past_logs: u32, + /// Fee history cache. + pub fee_history_cache: FeeHistoryCache, + /// Maximum fee history cache size. + pub fee_history_cache_limit: FeeHistoryCacheLimit, + /// Maximum allowed gas limit will be ` block.gas_limit * execute_gas_limit_multiplier` when + /// using eth_call/eth_estimateGas. + pub execute_gas_limit_multiplier: u64, + /// Mandated parent hashes for a given block hash. + pub forced_parent_hashes: Option>, + /// Something that can create the inherent data providers for pending state + pub pending_create_inherent_data_providers: CIDP, +} + +/// Default Eth RPC configuration +pub struct DefaultEthConfig(std::marker::PhantomData<(C, BE)>); + +impl fc_rpc::EthConfig for DefaultEthConfig +where + B: BlockT, + C: StorageProvider + Sync + Send + 'static, + BE: Backend + 'static, +{ + type EstimateGasAdapter = (); + type RuntimeStorageOverride = + fc_rpc::frontier_backend_client::SystemAccountId20StorageOverride; } /// Full client dependencies. -pub struct FullDeps { +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. pub pool: Arc

, - /// Grandpa block import setup. - pub grandpa: GrandpaDeps, - /// Backend used by the node. - pub _backend: Arc, + /// Manual seal command sink + pub command_sink: Option>>, + /// Ethereum-compatibility specific dependencies. + pub eth: EthDeps, } /// Instantiate all full RPC extensions. -pub fn create_full( - deps: FullDeps, +pub fn create_full( + deps: FullDeps, + subscription_task_executor: SubscriptionTaskExecutor, + pubsub_notification_sinks: Arc< + fc_mapping_sync::EthereumBlockNotificationSinks< + fc_mapping_sync::EthereumBlockNotification, + >, + >, ) -> Result, Box> where - C: ProvideRuntimeApi, - C: HeaderBackend + HeaderMetadata + 'static, - C: Send + Sync + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BlockBuilder, - C::Api: subtensor_custom_rpc_runtime_api::DelegateInfoRuntimeApi, - C::Api: subtensor_custom_rpc_runtime_api::NeuronInfoRuntimeApi, - C::Api: subtensor_custom_rpc_runtime_api::SubnetInfoRuntimeApi, - C::Api: subtensor_custom_rpc_runtime_api::SubnetRegistrationRuntimeApi, - B: sc_client_api::Backend + Send + Sync + 'static, - P: TransactionPool + 'static, + B: BlockT, + C: CallApiAt + ProvideRuntimeApi, + C::Api: RuntimeApiCollection, + C: HeaderBackend + HeaderMetadata + 'static, + C: BlockchainEvents + AuxStore + UsageProvider + StorageProvider, + BE: Backend + 'static, + P: TransactionPool + 'static, + A: ChainApi + 'static, + CIDP: CreateInherentDataProviders + Send + Clone + 'static, + CT: fp_rpc::ConvertTransaction<::Extrinsic> + Send + Sync + Clone + 'static, { use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; - use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer}; + use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; use substrate_frame_rpc_system::{System, SystemApiServer}; use subtensor_custom_rpc::{SubtensorCustom, SubtensorCustomApiServer}; @@ -68,8 +131,8 @@ where let FullDeps { client, pool, - grandpa, - _backend: _, + command_sink, + eth, } = deps; // Custom RPC methods for Paratensor @@ -78,29 +141,26 @@ where module.merge(System::new(client.clone(), pool.clone()).into_rpc())?; module.merge(TransactionPayment::new(client).into_rpc())?; - let GrandpaDeps { - shared_voter_state, - shared_authority_set, - justification_stream, - subscription_executor, - finality_provider, - } = grandpa; - - module.merge( - Grandpa::new( - subscription_executor, - shared_authority_set.clone(), - shared_voter_state, - justification_stream, - finality_provider, - ) - .into_rpc(), - )?; - // Extend this RPC with a custom API by using the following syntax. // `YourRpcStruct` should have a reference to a client, which is needed // to call into the runtime. // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;` + if let Some(command_sink) = command_sink { + module.merge( + // We provide the rpc handler with the sending end of the channel to allow the rpc + // send EngineCommands to the background block authorship task. + ManualSeal::new(command_sink).into_rpc(), + )?; + } + + // Ethereum compatibility RPCs + let module = create_eth::<_, _, _, _, _, _, _, DefaultEthConfig>( + module, + eth, + subscription_task_executor, + pubsub_notification_sinks, + )?; + Ok(module) } diff --git a/node/src/service.rs b/node/src/service.rs index cc0bf2862..fef86dee6 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -1,49 +1,94 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use futures::FutureExt; -use node_subtensor_runtime::{opaque::Block, RuntimeApi}; -use sc_client_api::{Backend, BlockBackend}; -use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; -use sc_consensus_grandpa::SharedVoterState; +use crate::cli::Sealing; +use crate::client::{FullBackend, FullClient, RuntimeApiCollection}; +use crate::ethereum::{ + db_config_dir, new_frontier_partial, spawn_frontier_tasks, BackendType, EthConfiguration, + FrontierBackend, FrontierBlockImport, FrontierPartialComponents, StorageOverride, + StorageOverrideHandler, +}; +use futures::{channel::mpsc, future, FutureExt}; +use sc_client_api::{Backend as BackendT, BlockBackend}; +use sc_consensus::{BasicQueue, BoxBlockImport}; +use sc_consensus_grandpa::BlockNumberOps; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; -pub use sc_executor::WasmExecutor; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncConfig}; -use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_executor::HostFunctions as HostFunctionsT; +use sc_network_sync::strategy::warp::{WarpSyncConfig, WarpSyncProvider}; +use sc_service::{error::Error as ServiceError, Configuration, PartialComponents, TaskManager}; +use sc_telemetry::{log, Telemetry, TelemetryHandle, TelemetryWorker}; +use sc_transaction_pool::FullPool; use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use sp_api::ConstructRuntimeApi; +use sp_consensus_aura::sr25519::{AuthorityId as AuraId, AuthorityPair as AuraPair}; +use sp_core::{H256, U256}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{cell::RefCell, path::Path}; use std::{sync::Arc, time::Duration}; +use substrate_prometheus_endpoint::Registry; + +// Runtime +use node_subtensor_runtime::{ + opaque::Block, AccountId, Balance, Nonce, RuntimeApi, TransactionConverter, +}; /// The minimum period of blocks on which justifications will be /// imported and generated. const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; -pub(crate) type FullClient = - sc_service::TFullClient>; -type FullBackend = sc_service::TFullBackend; -type FullSelectChain = sc_consensus::LongestChain; - -pub fn new_partial( +/// Only enable the benchmarking host functions when we actually want to benchmark. +#[cfg(feature = "runtime-benchmarks")] +pub type HostFunctions = ( + sp_io::SubstrateHostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); +/// Otherwise we use empty host functions for ext host functions. +#[cfg(not(feature = "runtime-benchmarks"))] +pub type HostFunctions = sp_io::SubstrateHostFunctions; + +pub type Backend = FullBackend; +pub type Client = FullClient; + +type FullSelectChain = sc_consensus::LongestChain, B>; +type GrandpaBlockImport = + sc_consensus_grandpa::GrandpaBlockImport, B, C, FullSelectChain>; +type GrandpaLinkHalf = sc_consensus_grandpa::LinkHalf>; + +pub fn new_partial( config: &Configuration, + eth_config: &EthConfiguration, + build_import_queue: BIQ, ) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - FullSelectChain, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + BasicQueue, + FullPool>, ( - sc_consensus_grandpa::GrandpaBlockImport< - FullBackend, - Block, - FullClient, - FullSelectChain, - >, - sc_consensus_grandpa::LinkHalf, Option, + BoxBlockImport, + GrandpaLinkHalf>, + FrontierBackend>, + Arc>, ), >, ServiceError, -> { +> +where + B: BlockT, + RA: ConstructRuntimeApi>, + RA: Send + Sync + 'static, + RA::RuntimeApi: RuntimeApiCollection, + HF: HostFunctionsT + 'static, + BIQ: FnOnce( + Arc>, + &Configuration, + &EthConfiguration, + &TaskManager, + Option, + GrandpaBlockImport>, + ) -> Result<(BasicQueue, BoxBlockImport), ServiceError>, +{ let telemetry = config .telemetry_endpoints .clone() @@ -55,14 +100,13 @@ pub fn new_partial( }) .transpose()?; - let executor = sc_service::new_wasm_executor::(&config.executor); + let executor = sc_service::new_wasm_executor(&config.executor); - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; let client = Arc::new(client); let telemetry = telemetry.map(|(worker, telemetry)| { @@ -73,67 +117,180 @@ pub fn new_partial( }); let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( client.clone(), GRANDPA_JUSTIFICATION_PERIOD, - &(client.clone() as Arc<_>), + &client, select_chain.clone(), telemetry.as_ref().map(|x| x.handle()), )?; - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - - let import_queue = - sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import.clone())), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let storage_override = Arc::new(StorageOverrideHandler::::new(client.clone())); + let frontier_backend = match eth_config.frontier_backend_type { + BackendType::KeyValue => FrontierBackend::KeyValue(Arc::new(fc_db::kv::Backend::open( + Arc::clone(&client), + &config.database, + &db_config_dir(config), + )?)), + BackendType::Sql => { + let db_path = db_config_dir(config).join("sql"); + std::fs::create_dir_all(&db_path).expect("failed creating sql db directory"); + let backend = futures::executor::block_on(fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(db_path) + .join("frontier.db3") + .to_str() + .unwrap_or(""), + create_if_missing: true, + thread_count: eth_config.frontier_sql_backend_thread_count, + cache_size: eth_config.frontier_sql_backend_cache_size, + }), + eth_config.frontier_sql_backend_pool_size, + std::num::NonZeroU32::new(eth_config.frontier_sql_backend_num_ops_timeout), + storage_override.clone(), + )) + .unwrap_or_else(|err| panic!("failed creating sql backend: {:?}", err)); + FrontierBackend::Sql(Arc::new(backend)) + } + }; - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); + let (import_queue, block_import) = build_import_queue( + client.clone(), + config, + eth_config, + &task_manager, + telemetry.as_ref().map(|x| x.handle()), + grandpa_block_import, + )?; - Ok((slot, timestamp)) - }, - spawner: &task_manager.spawn_essential_handle(), - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - compatibility_mode: Default::default(), - })?; + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); - Ok(sc_service::PartialComponents { + Ok(PartialComponents { client, backend, - task_manager, - import_queue, keystore_container, + task_manager, select_chain, + import_queue, transaction_pool, - other: (grandpa_block_import, grandpa_link, telemetry), + other: ( + telemetry, + block_import, + grandpa_link, + frontier_backend, + storage_override, + ), }) } -// Builds a new service for a full client. -pub fn new_full< - N: sc_network::NetworkBackend::Hash>, ->( - config: Configuration, -) -> Result { - let sc_service::PartialComponents { +/// Build the import queue for the template runtime (aura + grandpa). +pub fn build_aura_grandpa_import_queue( + client: Arc>, + config: &Configuration, + eth_config: &EthConfiguration, + task_manager: &TaskManager, + telemetry: Option, + grandpa_block_import: GrandpaBlockImport>, +) -> Result<(BasicQueue, BoxBlockImport), ServiceError> +where + B: BlockT, + NumberFor: BlockNumberOps, + RA: ConstructRuntimeApi>, + RA: Send + Sync + 'static, + RA::RuntimeApi: RuntimeApiCollection, + HF: HostFunctionsT + 'static, +{ + let frontier_block_import = + FrontierBlockImport::new(grandpa_block_import.clone(), client.clone()); + + let slot_duration = sc_consensus_aura::slot_duration(&*client)?; + let target_gas_price = eth_config.target_gas_price; + let create_inherent_data_providers = move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price)); + Ok((slot, timestamp, dynamic_fee)) + }; + + let import_queue = sc_consensus_aura::import_queue::( + sc_consensus_aura::ImportQueueParams { + block_import: frontier_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import)), + client, + create_inherent_data_providers, + spawner: &task_manager.spawn_essential_handle(), + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry, + compatibility_mode: sc_consensus_aura::CompatibilityMode::None, + }, + ) + .map_err::(Into::into)?; + + Ok((import_queue, Box::new(frontier_block_import))) +} + +/// Build the import queue for the template runtime (manual seal). +pub fn build_manual_seal_import_queue( + client: Arc>, + config: &Configuration, + _eth_config: &EthConfiguration, + task_manager: &TaskManager, + _telemetry: Option, + _grandpa_block_import: GrandpaBlockImport>, +) -> Result<(BasicQueue, BoxBlockImport), ServiceError> +where + B: BlockT, + RA: ConstructRuntimeApi>, + RA: Send + Sync + 'static, + RA::RuntimeApi: RuntimeApiCollection, + HF: HostFunctionsT + 'static, +{ + let frontier_block_import = FrontierBlockImport::new(client.clone(), client); + Ok(( + sc_consensus_manual_seal::import_queue( + Box::new(frontier_block_import.clone()), + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ), + Box::new(frontier_block_import), + )) +} + +/// Builds a new service for a full client. +pub async fn new_full( + mut config: Configuration, + eth_config: EthConfiguration, + sealing: Option, +) -> Result +where + B: BlockT, + NumberFor: BlockNumberOps, + ::Header: Unpin, + RA: ConstructRuntimeApi>, + RA: Send + Sync + 'static, + RA::RuntimeApi: RuntimeApiCollection, + HF: HostFunctionsT + 'static, + NB: sc_network::NetworkBackend::Hash>, +{ + let build_import_queue = if sealing.is_some() { + build_manual_seal_import_queue:: + } else { + build_aura_grandpa_import_queue:: + }; + + let PartialComponents { client, backend, mut task_manager, @@ -141,39 +298,49 @@ pub fn new_full< keystore_container, select_chain, transaction_pool, - other: (block_import, grandpa_link, mut telemetry), - } = new_partial(&config)?; - - let mut net_config = sc_network::config::FullNetworkConfiguration::< - Block, - ::Hash, - N, - >::new(&config.network, config.prometheus_registry().cloned()); - let metrics = N::register_notification_metrics(config.prometheus_registry()); + other: (mut telemetry, block_import, grandpa_link, frontier_backend, storage_override), + } = new_partial(&config, ð_config, build_import_queue)?; + + let FrontierPartialComponents { + filter_pool, + fee_history_cache, + fee_history_cache_limit, + } = new_frontier_partial(ð_config)?; + + let maybe_registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + let mut net_config = sc_network::config::FullNetworkConfiguration::<_, _, NB>::new( + &config.network, + maybe_registry.cloned(), + ); + let peer_store_handle = net_config.peer_store_handle(); + let metrics = NB::register_notification_metrics(maybe_registry); let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( &client - .block_hash(0) - .ok() - .flatten() + .block_hash(0u32.into())? .expect("Genesis block exists; qed"), &config.chain_spec, ); - let peer_store_handle = net_config.peer_store_handle(); let (grandpa_protocol_config, grandpa_notification_service) = - sc_consensus_grandpa::grandpa_peers_set_config::<_, N>( + sc_consensus_grandpa::grandpa_peers_set_config::<_, NB>( grandpa_protocol_name.clone(), metrics.clone(), peer_store_handle, ); - net_config.add_notification_protocol(grandpa_protocol_config); - let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( - backend.clone(), - grandpa_link.shared_authority_set().clone(), - Vec::default(), - )); + let warp_sync_config = if sealing.is_some() { + None + } else { + net_config.add_notification_protocol(grandpa_protocol_config); + let warp_sync: Arc> = + Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + Vec::new(), + )); + Some(WarpSyncConfig::WithProvider(warp_sync)) + }; let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -184,7 +351,7 @@ pub fn new_full< spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, - warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)), + warp_sync_config, block_relay: None, metrics, })?; @@ -210,64 +377,165 @@ pub fn new_full< ); } - let finality_proof_provider = sc_consensus_grandpa::FinalityProofProvider::new_for_service( - backend.clone(), - Some(grandpa_link.shared_authority_set().clone()), - ); - let rpc_backend = backend.clone(); - let justification_stream = grandpa_link.justification_stream(); - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = SharedVoterState::empty(); - let role = config.role; let force_authoring = config.force_authoring; - let backoff_authoring_blocks = Some(BackoffAuthoringOnFinalizedHeadLagging { - unfinalized_slack: 6, + let backoff_authoring_blocks = Some(BackoffAuthoringOnFinalizedHeadLagging::> { + unfinalized_slack: 6u32.into(), ..Default::default() }); let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; + let frontier_backend = Arc::new(frontier_backend); + let enable_grandpa = !config.disable_grandpa && sealing.is_none(); let prometheus_registry = config.prometheus_registry().cloned(); - let rpc_extensions_builder = { + // Channel for the rpc handler to communicate with the authorship task. + let (command_sink, commands_stream) = mpsc::channel(1000); + + // Sinks for pubsub notifications. + // Everytime a new subscription is created, a new mpsc channel is added to the sink pool. + // The MappingSyncWorker sends through the channel on block import and the subscription emits a notification to the subscriber on receiving a message through this channel. + // This way we avoid race conditions when using native substrate block import notification stream. + let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks< + fc_mapping_sync::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + + // for ethereum-compatibility rpc. + config.rpc.id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); + + let rpc_builder = { let client = client.clone(); let pool = transaction_pool.clone(); + let network = network.clone(); + let sync_service = sync_service.clone(); + + let is_authority = role.is_authority(); + let enable_dev_signer = eth_config.enable_dev_signer; + let max_past_logs = eth_config.max_past_logs; + let execute_gas_limit_multiplier = eth_config.execute_gas_limit_multiplier; + let filter_pool = filter_pool.clone(); + let frontier_backend = frontier_backend.clone(); + let pubsub_notification_sinks = pubsub_notification_sinks.clone(); + let storage_override = storage_override.clone(); + let fee_history_cache = fee_history_cache.clone(); + let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( + task_manager.spawn_handle(), + storage_override.clone(), + eth_config.eth_log_block_cache, + eth_config.eth_statuses_cache, + prometheus_registry.clone(), + )); - Box::new( - move |subscription_executor: sc_rpc::SubscriptionTaskExecutor| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: pool.clone(), - grandpa: crate::rpc::GrandpaDeps { - shared_voter_state: shared_voter_state.clone(), - shared_authority_set: shared_authority_set.clone(), - justification_stream: justification_stream.clone(), - subscription_executor: subscription_executor.clone(), - finality_provider: finality_proof_provider.clone(), - }, - _backend: rpc_backend.clone(), - }; - crate::rpc::create_full(deps).map_err(Into::into) - }, - ) + let slot_duration = sc_consensus_aura::slot_duration(&*client)?; + let target_gas_price = eth_config.target_gas_price; + let pending_create_inherent_data_providers = move |_, ()| async move { + let current = sp_timestamp::InherentDataProvider::from_system_time(); + let next_slot = current + .timestamp() + .as_millis() + .saturating_add(slot_duration.as_millis()); + let timestamp = sp_timestamp::InherentDataProvider::new(next_slot.into()); + let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price)); + Ok((slot, timestamp, dynamic_fee)) + }; + + Box::new(move |subscription_task_executor| { + let eth_deps = crate::rpc::EthDeps { + client: client.clone(), + pool: pool.clone(), + graph: pool.pool().clone(), + converter: Some(TransactionConverter::::default()), + is_authority, + enable_dev_signer, + network: network.clone(), + sync: sync_service.clone(), + frontier_backend: match &*frontier_backend { + fc_db::Backend::KeyValue(b) => b.clone(), + fc_db::Backend::Sql(b) => b.clone(), + }, + storage_override: storage_override.clone(), + block_data_cache: block_data_cache.clone(), + filter_pool: filter_pool.clone(), + max_past_logs, + fee_history_cache: fee_history_cache.clone(), + fee_history_cache_limit, + execute_gas_limit_multiplier, + forced_parent_hashes: None, + pending_create_inherent_data_providers, + }; + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: pool.clone(), + command_sink: if sealing.is_some() { + Some(command_sink.clone()) + } else { + None + }, + eth: eth_deps, + }; + crate::rpc::create_full( + deps, + subscription_task_executor, + pubsub_notification_sinks.clone(), + ) + .map_err(Into::into) + }) }; let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), + config, client: client.clone(), - keystore: keystore_container.keystore(), + backend: backend.clone(), task_manager: &mut task_manager, + keystore: keystore_container.keystore(), transaction_pool: transaction_pool.clone(), - rpc_builder: rpc_extensions_builder, - backend, + rpc_builder, + network: network.clone(), system_rpc_tx, tx_handler_controller, sync_service: sync_service.clone(), - config, telemetry: telemetry.as_mut(), })?; + spawn_frontier_tasks( + &task_manager, + client.clone(), + backend, + frontier_backend, + filter_pool, + storage_override, + fee_history_cache, + fee_history_cache_limit, + sync_service.clone(), + pubsub_notification_sinks, + ) + .await; + if role.is_authority() { + // manual-seal authorship + if let Some(sealing) = sealing { + run_manual_seal_authorship( + ð_config, + sealing, + client, + transaction_pool, + select_chain, + block_import, + &task_manager, + prometheus_registry.as_ref(), + telemetry.as_ref(), + commands_stream, + )?; + + network_starter.start_network(); + log::info!("Manual Seal Ready"); + return Ok(task_manager); + } + let proposer_factory = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), @@ -277,37 +545,36 @@ pub fn new_full< ); let slot_duration = sc_consensus_aura::slot_duration(&*client)?; + let target_gas_price = eth_config.target_gas_price; + let create_inherent_data_providers = move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price)); + Ok((slot, timestamp, dynamic_fee)) + }; let aura = sc_consensus_aura::start_aura::( - StartAuraParams { + sc_consensus_aura::StartAuraParams { slot_duration, client, select_chain, block_import, proposer_factory, - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), + create_inherent_data_providers, force_authoring, backoff_authoring_blocks, keystore: keystore_container.keystore(), - sync_oracle: sync_service.clone(), - justification_sync_link: sync_service.clone(), - block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + block_proposal_slot_portion: sc_consensus_aura::SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), - compatibility_mode: Default::default(), + compatibility_mode: sc_consensus_aura::CompatibilityMode::None, }, )?; - // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. task_manager @@ -342,28 +609,176 @@ pub fn new_full< // and vote data availability than the observer. The observer has not // been tested extensively yet and having most nodes in a network run it // could lead to finality stalls. - let grandpa_config = sc_consensus_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network, - voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, - shared_voter_state: SharedVoterState::empty(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), - sync: Arc::new(sync_service), - notification_service: grandpa_notification_service, - }; + let grandpa_voter = + sc_consensus_grandpa::run_grandpa_voter(sc_consensus_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network, + sync: sync_service, + notification_service: grandpa_notification_service, + voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state: sc_consensus_grandpa::SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), + })?; // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking( - "grandpa-voter", - None, - sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?, - ); + task_manager + .spawn_essential_handle() + .spawn_blocking("grandpa-voter", None, grandpa_voter); } network_starter.start_network(); Ok(task_manager) } + +pub async fn build_full( + config: Configuration, + eth_config: EthConfiguration, + sealing: Option, +) -> Result { + match config.network.network_backend { + sc_network::config::NetworkBackendType::Libp2p => { + new_full::>( + config, eth_config, sealing, + ) + .await + } + sc_network::config::NetworkBackendType::Litep2p => { + new_full::( + config, eth_config, sealing, + ) + .await + } + } +} + +pub fn new_chain_ops( + config: &mut Configuration, + eth_config: &EthConfiguration, +) -> Result< + ( + Arc, + Arc, + BasicQueue, + TaskManager, + FrontierBackend, + ), + ServiceError, +> { + config.keystore = sc_service::config::KeystoreConfig::InMemory; + let PartialComponents { + client, + backend, + import_queue, + task_manager, + other, + .. + } = new_partial::( + config, + eth_config, + build_aura_grandpa_import_queue, + )?; + Ok((client, backend, import_queue, task_manager, other.3)) +} + +#[allow(clippy::too_many_arguments)] +fn run_manual_seal_authorship( + eth_config: &EthConfiguration, + sealing: Sealing, + client: Arc>, + transaction_pool: Arc>>, + select_chain: FullSelectChain, + block_import: BoxBlockImport, + task_manager: &TaskManager, + prometheus_registry: Option<&Registry>, + telemetry: Option<&Telemetry>, + commands_stream: mpsc::Receiver< + sc_consensus_manual_seal::rpc::EngineCommand<::Hash>, + >, +) -> Result<(), ServiceError> +where + B: BlockT, + RA: ConstructRuntimeApi>, + RA: Send + Sync + 'static, + RA::RuntimeApi: RuntimeApiCollection, + HF: HostFunctionsT + 'static, +{ + let proposer_factory = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry, + telemetry.as_ref().map(|x| x.handle()), + ); + + thread_local!(static TIMESTAMP: RefCell = const { RefCell::new(0) }); + + /// Provide a mock duration starting at 0 in millisecond for timestamp inherent. + /// Each call will increment timestamp by slot_duration making Aura think time has passed. + struct MockTimestampInherentDataProvider; + + #[async_trait::async_trait] + impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { + async fn provide_inherent_data( + &self, + inherent_data: &mut sp_inherents::InherentData, + ) -> Result<(), sp_inherents::Error> { + TIMESTAMP.with(|x| { + let mut x_ref = x.borrow_mut(); + *x_ref = x_ref.saturating_add(node_subtensor_runtime::SLOT_DURATION); + inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow()) + }) + } + + async fn try_handle_error( + &self, + _identifier: &sp_inherents::InherentIdentifier, + _error: &[u8], + ) -> Option> { + // The pallet never reports error. + None + } + } + + let target_gas_price = eth_config.target_gas_price; + let create_inherent_data_providers = move |_, ()| async move { + let timestamp = MockTimestampInherentDataProvider; + let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price)); + Ok((timestamp, dynamic_fee)) + }; + + let manual_seal = match sealing { + Sealing::Manual => future::Either::Left(sc_consensus_manual_seal::run_manual_seal( + sc_consensus_manual_seal::ManualSealParams { + block_import, + env: proposer_factory, + client, + pool: transaction_pool, + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers, + }, + )), + Sealing::Instant => future::Either::Right(sc_consensus_manual_seal::run_instant_seal( + sc_consensus_manual_seal::InstantSealParams { + block_import, + env: proposer_factory, + client, + pool: transaction_pool, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers, + }, + )), + }; + + // we spawn the future on a background thread managed by service. + task_manager + .spawn_essential_handle() + .spawn_blocking("manual-seal", None, manual_seal); + Ok(()) +} diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 7515525f0..65ccf629e 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -235,7 +235,7 @@ mod benchmarks { ); #[extrinsic_call] - _(RawOrigin::Root, 1u16/*netuid*/, 3u64/*interval*/)/*set_commit_reveal_weights_interval()*/; + _(RawOrigin::Root, 1u16/*netuid*/, 3u64/*interval*/)/*sudo_set_commit_reveal_weights_interval()*/; } #[benchmark] diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 3e06b822e..85c7ef62c 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -960,32 +960,6 @@ pub mod pallet { Ok(()) } - /// The extrinsic sets the commit/reveal interval for a subnet. - /// It is only callable by the root account or subnet owner. - /// The extrinsic will call the Subtensor pallet to set the interval. - #[pallet::call_index(48)] - #[pallet::weight(T::WeightInfo::sudo_set_commit_reveal_weights_interval())] - pub fn sudo_set_commit_reveal_weights_interval( - origin: OriginFor, - netuid: u16, - interval: u64, - ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - - ensure!( - pallet_subtensor::Pallet::::if_subnet_exist(netuid), - Error::::SubnetDoesNotExist - ); - - pallet_subtensor::Pallet::::set_commit_reveal_weights_interval(netuid, interval); - log::debug!( - "SetWeightCommitInterval( netuid: {:?}, interval: {:?} ) ", - netuid, - interval - ); - Ok(()) - } - /// The extrinsic enabled/disables commit/reaveal for a given subnet. /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the value. @@ -1196,6 +1170,45 @@ pub mod pallet { Ok(()) } + + /// Sets the commit-reveal weights periods for a specific subnet. + /// + /// This extrinsic allows the subnet owner or root account to set the duration (in epochs) during which committed weights must be revealed. + /// The commit-reveal mechanism ensures that users commit weights in advance and reveal them only within a specified period. + /// + /// # Arguments + /// * `origin` - The origin of the call, which must be the subnet owner or the root account. + /// * `netuid` - The unique identifier of the subnet for which the periods are being set. + /// * `periods` - The number of epochs that define the commit-reveal period. + /// + /// # Errors + /// * `BadOrigin` - If the caller is neither the subnet owner nor the root account. + /// * `SubnetDoesNotExist` - If the specified subnet does not exist. + /// + /// # Weight + /// Weight is handled by the `#[pallet::weight]` attribute. + #[pallet::call_index(57)] + #[pallet::weight(T::WeightInfo::sudo_set_commit_reveal_weights_interval())] + pub fn sudo_set_commit_reveal_weights_interval( + origin: OriginFor, + netuid: u16, + interval: u64, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + + ensure!( + pallet_subtensor::Pallet::::if_subnet_exist(netuid), + Error::::SubnetDoesNotExist + ); + + pallet_subtensor::Pallet::::set_reveal_period(netuid, interval); + log::debug!( + "SetWeightCommitInterval( netuid: {:?}, interval: {:?} ) ", + netuid, + interval + ); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/weights.rs b/pallets/admin-utils/src/weights.rs index 84fe058f8..bda9c7916 100644 --- a/pallets/admin-utils/src/weights.rs +++ b/pallets/admin-utils/src/weights.rs @@ -415,10 +415,10 @@ impl WeightInfo for SubstrateWeight { } fn sudo_set_commit_reveal_weights_interval() -> Weight { // Proof Size summary in bytes: - // Measured: `1111` - // Estimated: `4697` - // Minimum execution time: 46_450_000 picoseconds. - Weight::from_parts(47_279_000, 4697) + // Measured: `456` + // Estimated: `3921` + // Minimum execution time: 19_070_000 picoseconds. + Weight::from_parts(19_380_000, 456) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -784,14 +784,14 @@ impl WeightInfo for () { fn sudo_set_commit_reveal_weights_interval() -> Weight { // -- Extrinsic Time -- // Model: - // Time ~= 20.42 - // µs + // Time ~= 19.38 + // µs // Reads = 1 // Writes = 1 // Recorded proof Size = 456 - Weight::from_parts(20_420_000, 456) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + Weight::from_parts(19_380_000, 456) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) } fn sudo_set_commit_reveal_weights_enabled() -> Weight { // -- Extrinsic Time -- diff --git a/pallets/admin-utils/tests/tests.rs b/pallets/admin-utils/tests/tests.rs index 8ab85f177..442275052 100644 --- a/pallets/admin-utils/tests/tests.rs +++ b/pallets/admin-utils/tests/tests.rs @@ -1113,29 +1113,6 @@ fn test_sudo_set_min_delegate_take() { }); } -#[test] -fn test_sudo_set_weight_commit_interval() { - new_test_ext().execute_with(|| { - let netuid: u16 = 1; - add_network(netuid, 10); - - let to_be_set = 55; - let init_value = SubtensorModule::get_commit_reveal_weights_interval(netuid); - - assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( - <::RuntimeOrigin>::root(), - netuid, - to_be_set - )); - - assert!(init_value != to_be_set); - assert_eq!( - SubtensorModule::get_commit_reveal_weights_interval(netuid), - to_be_set - ); - }); -} - #[test] fn test_sudo_set_commit_reveal_weights_enabled() { new_test_ext().execute_with(|| { @@ -1435,3 +1412,23 @@ fn test_sudo_set_dissolve_network_schedule_duration() { System::assert_last_event(Event::DissolveNetworkScheduleDurationSet(new_duration).into()); }); } + +#[test] +fn sudo_set_commit_reveal_weights_interval() { + new_test_ext().execute_with(|| { + let netuid: u16 = 1; + add_network(netuid, 10); + + let to_be_set = 55; + let init_value = SubtensorModule::get_reveal_period(netuid); + + assert_ok!(AdminUtils::sudo_set_commit_reveal_weights_interval( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + + assert!(init_value != to_be_set); + assert_eq!(SubtensorModule::get_reveal_period(netuid), to_be_set); + }); +} diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index 2445a5eda..d99388193 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -41,7 +41,6 @@ pub trait SubtensorCustomApi { fn get_neurons(&self, netuid: u16, at: Option) -> RpcResult>; #[method(name = "neuronInfo_getNeuron")] fn get_neuron(&self, netuid: u16, uid: u16, at: Option) -> RpcResult>; - #[method(name = "subnetInfo_getSubnetInfo")] fn get_subnet_info(&self, netuid: u16, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getSubnetsInfo")] diff --git a/pallets/subtensor/src/benchmarks.rs b/pallets/subtensor/src/benchmarks.rs index 4915bb3ac..6fd1cbf8b 100644 --- a/pallets/subtensor/src/benchmarks.rs +++ b/pallets/subtensor/src/benchmarks.rs @@ -416,7 +416,6 @@ reveal_weights { ); Subtensor::::set_validator_permit_for_uid(netuid, 0, true); - Subtensor::::set_commit_reveal_weights_interval(netuid, 0); let commit_hash: H256 = BlakeTwo256::hash_of(&( hotkey.clone(), @@ -521,4 +520,82 @@ reveal_weights { // Benchmark setup complete, now execute the extrinsic }: swap_coldkey(RawOrigin::Root, old_coldkey.clone(), new_coldkey.clone()) +batch_reveal_weights { + let tempo: u16 = 0; + let netuid: u16 = 1; + let num_commits: usize = 10; + + let hotkey: T::AccountId = account("hot", 0, 1); + let coldkey: T::AccountId = account("cold", 0, 2); + + Subtensor::::init_new_network(netuid, tempo); + Subtensor::::set_network_registration_allowed(netuid, true); + Subtensor::::set_network_pow_registration_allowed(netuid, true); + Subtensor::::set_commit_reveal_weights_enabled(netuid, true); + Subtensor::::set_weights_set_rate_limit(netuid, 0); // Disable rate limiting for benchmarking + + let block_number: u64 = Subtensor::::get_current_block_as_u64(); + let (nonce, work): (u64, Vec) = Subtensor::::create_work_for_block_number( + netuid, + block_number, + 3, + &hotkey, + ); + + let origin = T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())); + assert_ok!(Subtensor::::register( + origin.clone(), + netuid, + block_number, + nonce, + work.clone(), + hotkey.clone(), + coldkey.clone(), + )); + + let uid: u16 = 0; + + Subtensor::::set_validator_permit_for_uid(netuid, uid, true); + + let mut uids_list = Vec::new(); + let mut values_list = Vec::new(); + let mut salts_list = Vec::new(); + let mut version_keys = Vec::new(); + + for i in 0..num_commits { + let uids: Vec = vec![uid]; + let values: Vec = vec![i as u16]; + let salt: Vec = vec![i as u16]; + let version_key_i: u64 = i as u64; + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey.clone(), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key_i, + )); + + assert_ok!(Subtensor::::commit_weights( + T::RuntimeOrigin::from(RawOrigin::Signed(hotkey.clone())), + netuid, + commit_hash, + )); + + uids_list.push(uids); + values_list.push(values); + salts_list.push(salt); + version_keys.push(version_key_i); + } +}: batch_reveal_weights( + RawOrigin::Signed(hotkey.clone()), + netuid, + uids_list, + values_list, + salts_list, + version_keys +) + + } diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 9974087b0..6689b7060 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -285,13 +285,10 @@ impl Pallet { PendingdHotkeyEmission::::insert(hotkey, 0); PendingdHotkeyEmissionUntouchable::::insert(hotkey, 0); - // --- 2 Retrieve the last time this hotkey's emissions were drained. - let last_emission_drain: u64 = LastHotkeyEmissionDrain::::get(hotkey); - - // --- 3 Update the block value to the current block number. + // --- 2 Update the block value to the current block number. LastHotkeyEmissionDrain::::insert(hotkey, block_number); - // --- 4 Retrieve the total stake for the hotkey from all nominations. + // --- 3 Retrieve the total stake for the hotkey from all nominations. let total_hotkey_stake: u64 = Self::get_total_stake_for_hotkey(hotkey); // --- 4 Calculate the emission take for the hotkey. @@ -306,41 +303,40 @@ impl Pallet { // --- 5 Compute the remaining emission after deducting the hotkey's take and untouchable_emission. let emission_minus_take: u64 = emission_to_distribute.saturating_sub(hotkey_take); - // --- 7 Calculate the remaining emission after the hotkey's take. + // --- 6 Calculate the remaining emission after the hotkey's take. let mut remainder: u64 = emission_minus_take; - // --- 8 Iterate over each nominator and get all viable stake. + // --- 7 Iterate over each nominator and get all viable stake. let mut total_viable_nominator_stake: u64 = total_hotkey_stake; - for (nominator, nominator_stake) in Stake::::iter_prefix(hotkey) { - if LastAddStakeIncrease::::get(hotkey, nominator) > last_emission_drain { - total_viable_nominator_stake = - total_viable_nominator_stake.saturating_sub(nominator_stake); - } + for (nominator, _) in Stake::::iter_prefix(hotkey) { + let nonviable_nomintaor_stake = Self::get_nonviable_stake(hotkey, &nominator); + + total_viable_nominator_stake = + total_viable_nominator_stake.saturating_sub(nonviable_nomintaor_stake); } - // --- 9 Iterate over each nominator. + // --- 8 Iterate over each nominator. if total_viable_nominator_stake != 0 { for (nominator, nominator_stake) in Stake::::iter_prefix(hotkey) { - // --- 10 Check if the stake was manually increased by the user since the last emission drain for this hotkey. + // --- 9 Check if the stake was manually increased by the user since the last emission drain for this hotkey. // If it was, skip this nominator as they will not receive their proportion of the emission. - if LastAddStakeIncrease::::get(hotkey, nominator.clone()) > last_emission_drain { - continue; - } + let viable_nominator_stake = + nominator_stake.saturating_sub(Self::get_nonviable_stake(hotkey, &nominator)); - // --- 11 Calculate this nominator's share of the emission. - let nominator_emission: I64F64 = I64F64::from_num(emission_minus_take) - .saturating_mul(I64F64::from_num(nominator_stake)) + // --- 10 Calculate this nominator's share of the emission. + let nominator_emission: I64F64 = I64F64::from_num(viable_nominator_stake) .checked_div(I64F64::from_num(total_viable_nominator_stake)) - .unwrap_or(I64F64::from_num(0)); + .unwrap_or(I64F64::from_num(0)) + .saturating_mul(I64F64::from_num(emission_minus_take)); - // --- 12 Increase the stake for the nominator. + // --- 11 Increase the stake for the nominator. Self::increase_stake_on_coldkey_hotkey_account( &nominator, hotkey, nominator_emission.to_num::(), ); - // --- 13* Record event and Subtract the nominator's emission from the remainder. + // --- 12* Record event and Subtract the nominator's emission from the remainder. total_new_tao = total_new_tao.saturating_add(nominator_emission.to_num::()); remainder = remainder.saturating_sub(nominator_emission.to_num::()); } @@ -353,7 +349,7 @@ impl Pallet { .saturating_add(untouchable_emission); Self::increase_stake_on_hotkey_account(hotkey, hotkey_new_tao); - // --- 15 Record new tao creation event and return the amount created. + // --- 14 Record new tao creation event and return the amount created. total_new_tao = total_new_tao.saturating_add(hotkey_new_tao); total_new_tao } @@ -408,4 +404,18 @@ impl Pallet { let remainder = block_plus_netuid.rem_euclid(tempo_plus_one); (tempo as u64).saturating_sub(remainder) } + + /// Calculates the nonviable stake for a nominator. + /// The nonviable stake is the stake that was added by the nominator since the last emission drain. + /// This stake will not receive emission until the next emission drain. + /// Note: if the stake delta is below zero, we return zero. We don't allow more stake than the nominator has. + pub fn get_nonviable_stake(hotkey: &T::AccountId, nominator: &T::AccountId) -> u64 { + let stake_delta = StakeDeltaSinceLastEmissionDrain::::get(hotkey, nominator); + if stake_delta.is_negative() { + 0 + } else { + // Should never fail the into, but we handle it anyway. + stake_delta.try_into().unwrap_or(u64::MAX) + } + } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index bccf2c1f7..3453bf4b7 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -67,12 +67,15 @@ pub mod pallet { traits::{ tokens::fungible, OriginTrait, QueryPreimage, StorePreimage, UnfilteredDispatchable, }, + BoundedVec, }; use frame_system::pallet_prelude::*; use sp_core::H256; use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; + use sp_std::collections::vec_deque::VecDeque; use sp_std::vec; use sp_std::vec::Vec; + use subtensor_macros::freeze_struct; #[cfg(not(feature = "std"))] use alloc::boxed::Box; @@ -129,6 +132,36 @@ pub mod pallet { pub placeholder2: u8, } + /// Struct for NeuronCertificate. + pub type NeuronCertificateOf = NeuronCertificate; + /// Data structure for NeuronCertificate information. + #[freeze_struct("1c232be200d9ec6c")] + #[derive(Decode, Encode, Default, TypeInfo, PartialEq, Eq, Clone, Debug)] + pub struct NeuronCertificate { + /// The neuron TLS public key + pub public_key: BoundedVec>, + /// The algorithm used to generate the public key + pub algorithm: u8, + } + + impl TryFrom> for NeuronCertificate { + type Error = (); + + fn try_from(value: Vec) -> Result { + if value.len() > 65 { + return Err(()); + } + // take the first byte as the algorithm + let algorithm = value.first().ok_or(())?; + // and the rest as the public_key + let certificate = value.get(1..).ok_or(())?.to_vec(); + Ok(Self { + public_key: BoundedVec::try_from(certificate).map_err(|_| ())?, + algorithm: *algorithm, + }) + } + } + /// Struct for Prometheus. pub type PrometheusInfoOf = PrometheusInfo; @@ -226,6 +259,11 @@ pub mod pallet { 0 } #[pallet::type_value] + /// Default stake delta. + pub fn DefaultStakeDelta() -> i128 { + 0 + } + #[pallet::type_value] /// Default stakes per interval. pub fn DefaultStakesPerInterval() -> (u64, u64) { (0, 0) @@ -536,6 +574,11 @@ pub mod pallet { 0 } #[pallet::type_value] + /// Default Reveal Period Epochs + pub fn DefaultRevealPeriodEpochs() -> u64 { + 1 + } + #[pallet::type_value] /// Value definition for vector of u16. pub fn EmptyU16Vec() -> Vec { vec![] @@ -597,11 +640,6 @@ pub mod pallet { T::InitialServingRateLimit::get() } #[pallet::type_value] - /// Default value for weight commit reveal interval. - pub fn DefaultWeightCommitRevealInterval() -> u64 { - 1000 - } - #[pallet::type_value] /// Default value for weight commit/reveal enabled. pub fn DefaultCommitRevealWeightsEnabled() -> bool { false @@ -768,16 +806,16 @@ pub mod pallet { DefaultAccumulatedEmission, >; #[pallet::storage] - /// Map ( hot, cold ) --> block_number | Last add stake increase. - pub type LastAddStakeIncrease = StorageDoubleMap< + /// Map ( hot, cold ) --> stake: i128 | Stake added/removed since last emission drain. + pub type StakeDeltaSinceLastEmissionDrain = StorageDoubleMap< _, Blake2_128Concat, T::AccountId, Identity, T::AccountId, - u64, + i128, ValueQuery, - DefaultAccountTake, + DefaultStakeDelta, >; #[pallet::storage] /// DMAP ( parent, netuid ) --> Vec<(proportion,child)> @@ -1010,10 +1048,6 @@ pub mod pallet { StorageMap<_, Identity, u16, u64, ValueQuery, DefaultAdjustmentAlpha>; #[pallet::storage] /// --- MAP ( netuid ) --> interval - pub type WeightCommitRevealInterval = - StorageMap<_, Identity, u16, u64, ValueQuery, DefaultWeightCommitRevealInterval>; - #[pallet::storage] - /// --- MAP ( netuid ) --> interval pub type CommitRevealWeightsEnabled = StorageMap<_, Identity, u16, bool, ValueQuery, DefaultCommitRevealWeightsEnabled>; #[pallet::storage] @@ -1172,6 +1206,17 @@ pub mod pallet { /// --- MAP ( netuid, hotkey ) --> axon_info pub type Axons = StorageDoubleMap<_, Identity, u16, Blake2_128Concat, T::AccountId, AxonInfoOf, OptionQuery>; + /// --- MAP ( netuid, hotkey ) --> certificate + #[pallet::storage] + pub type NeuronCertificates = StorageDoubleMap< + _, + Identity, + u16, + Blake2_128Concat, + T::AccountId, + NeuronCertificateOf, + OptionQuery, + >; #[pallet::storage] /// --- MAP ( netuid, hotkey ) --> prometheus_info pub type Prometheus = StorageDoubleMap< @@ -1221,16 +1266,20 @@ pub mod pallet { /// ITEM( weights_min_stake ) pub type WeightsMinStake = StorageValue<_, u64, ValueQuery, DefaultWeightsMinStake>; #[pallet::storage] - /// --- MAP (netuid, who) --> (hash, weight) | Returns the hash and weight committed by an account for a given netuid. + /// --- MAP (netuid, who) --> VecDeque<(hash, commit_block, first_reveal_block, last_reveal_block)> | Stores a queue of commits for an account on a given netuid. pub type WeightCommits = StorageDoubleMap< _, Twox64Concat, u16, Twox64Concat, T::AccountId, - (H256, u64), + VecDeque<(H256, u64, u64, u64)>, OptionQuery, >; + #[pallet::storage] + /// --- Map (netuid) --> Number of epochs allowed for commit reveal periods + pub type RevealPeriodEpochs = + StorageMap<_, Twox64Concat, u16, u64, ValueQuery, DefaultRevealPeriodEpochs>; /// ================== /// ==== Genesis ===== @@ -1427,6 +1476,18 @@ where Err(InvalidTransaction::Custom(2).into()) } } + Some(Call::batch_reveal_weights { netuid, .. }) => { + if Self::check_weights_min_stake(who) { + let priority: u64 = Self::get_priority_set_weights(who, *netuid); + Ok(ValidTransaction { + priority, + longevity: 1, + ..Default::default() + }) + } else { + Err(InvalidTransaction::Custom(6).into()) + } + } Some(Call::set_weights { netuid, .. }) => { if Self::check_weights_min_stake(who) { let priority: u64 = Self::get_priority_set_weights(who, *netuid); @@ -1548,6 +1609,10 @@ where let transaction_fee = 0; Ok((CallType::Serve, transaction_fee, who.clone())) } + Some(Call::serve_axon_tls { .. }) => { + let transaction_fee = 0; + Ok((CallType::Serve, transaction_fee, who.clone())) + } Some(Call::register_network { .. }) => { let transaction_fee = 0; Ok((CallType::RegisterNetwork, transaction_fee, who.clone())) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index a97e4494d..e98ecbd6a 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -102,8 +102,11 @@ mod dispatches { /// - The hash representing the committed weights. /// /// # Raises: - /// * `WeightsCommitNotAllowed`: - /// - Attempting to commit when it is not allowed. + /// * `CommitRevealDisabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] #[pallet::weight((Weight::from_parts(46_000_000, 0) @@ -132,21 +135,27 @@ mod dispatches { /// * `values` (`Vec`): /// - The values of the weights being revealed. /// - /// * `salt` (`Vec`): - /// - The random salt to protect from brute-force guessing attack in case of small weight changes bit-wise. + /// * `salt` (`Vec`): + /// - The salt used to generate the commit hash. /// /// * `version_key` (`u64`): /// - The network version key. /// /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// /// * `NoWeightsCommitFound`: /// - Attempting to reveal weights without an existing commit. /// - /// * `InvalidRevealCommitHashNotMatchTempo`: - /// - Attempting to reveal weights outside the valid tempo. + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. /// /// * `InvalidRevealCommitHashNotMatch`: - /// - The revealed hash does not match the committed hash. + /// - The revealed hash does not match any committed hash. /// #[pallet::call_index(97)] #[pallet::weight((Weight::from_parts(103_000_000, 0) @@ -163,6 +172,67 @@ mod dispatches { Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) } + /// ---- The implementation for batch revealing committed weights. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `uids_list` (`Vec>`): + /// - A list of uids for each set of weights being revealed. + /// + /// * `values_list` (`Vec>`): + /// - A list of values for each set of weights being revealed. + /// + /// * `salts_list` (`Vec>`): + /// - A list of salts used to generate the commit hashes. + /// + /// * `version_keys` (`Vec`): + /// - A list of network version keys. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. + /// + /// * `InvalidInputLengths`: + /// - The input vectors are of mismatched lengths. + #[pallet::call_index(98)] + #[pallet::weight((Weight::from_parts(367_612_000, 0) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(3)), DispatchClass::Normal, Pays::No))] + pub fn batch_reveal_weights( + origin: T::RuntimeOrigin, + netuid: u16, + uids_list: Vec>, + values_list: Vec>, + salts_list: Vec>, + version_keys: Vec, + ) -> DispatchResult { + Self::do_batch_reveal_weights( + origin, + netuid, + uids_list, + values_list, + salts_list, + version_keys, + ) + } + /// # Args: /// * `origin`: (Origin): /// - The caller, a hotkey who wishes to set their weights. @@ -435,7 +505,7 @@ mod dispatches { Self::do_remove_stake(origin, hotkey, amount_unstaked) } - /// Serves or updates axon /promethteus information for the neuron associated with the caller. If the caller is + /// Serves or updates axon /prometheus information for the neuron associated with the caller. If the caller is /// already registered the metadata is updated. If the caller is not registered this call throws NotRegistered. /// /// # Args: @@ -511,6 +581,92 @@ mod dispatches { protocol, placeholder1, placeholder2, + None, + ) + } + + /// Same as `serve_axon` but takes a certificate as an extra optional argument. + /// Serves or updates axon /prometheus information for the neuron associated with the caller. If the caller is + /// already registered the metadata is updated. If the caller is not registered this call throws NotRegistered. + /// + /// # Args: + /// * 'origin': (Origin): + /// - The signature of the caller. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'version' (u64): + /// - The bittensor version identifier. + /// + /// * 'ip' (u64): + /// - The endpoint ip information as a u128 encoded integer. + /// + /// * 'port' (u16): + /// - The endpoint port information as a u16 encoded integer. + /// + /// * 'ip_type' (u8): + /// - The endpoint ip version as a u8, 4 or 6. + /// + /// * 'protocol' (u8): + /// - UDP:1 or TCP:0 + /// + /// * 'placeholder1' (u8): + /// - Placeholder for further extra params. + /// + /// * 'placeholder2' (u8): + /// - Placeholder for further extra params. + /// + /// * 'certificate' (Vec): + /// - TLS certificate for inter neuron communitation. + /// + /// # Event: + /// * AxonServed; + /// - On successfully serving the axon info. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'InvalidIpType': + /// - The ip type is not 4 or 6. + /// + /// * 'InvalidIpAddress': + /// - The numerically encoded ip address does not resolve to a proper ip. + /// + /// * 'ServingRateLimitExceeded': + /// - Attempting to set prometheus information withing the rate limit min. + /// + #[pallet::call_index(40)] + #[pallet::weight((Weight::from_parts(46_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] + pub fn serve_axon_tls( + origin: OriginFor, + netuid: u16, + version: u32, + ip: u128, + port: u16, + ip_type: u8, + protocol: u8, + placeholder1: u8, + placeholder2: u8, + certificate: Vec, + ) -> DispatchResult { + Self::do_serve_axon( + origin, + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + Some(certificate), ) } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 22a0a6f89..aab849994 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -114,12 +114,8 @@ mod errors { DelegateTakeTooLow, /// Delegate take is too high. DelegateTakeTooHigh, - /// Not allowed to commit weights. - WeightsCommitNotAllowed, /// No commit found for the provided hotkey+netuid combination when attempting to reveal the weights. NoWeightsCommitFound, - /// Not the correct block/range to reveal weights. - InvalidRevealCommitTempo, /// Committed hash does not equal the hashed reveal data. InvalidRevealCommitHashNotMatch, /// Attempting to call set_weights when commit/reveal is enabled @@ -184,5 +180,15 @@ mod errors { TxChildkeyTakeRateLimitExceeded, /// Invalid identity. InvalidIdentity, + /// Maximum commit limit reached + TooManyUnrevealedCommits, + /// Attempted to reveal weights that are expired. + ExpiredWeightCommit, + /// Attempted to reveal weights too early. + RevealTooEarly, + /// Attempted to batch reveal weights with mismatched vector input lenghts. + InputLengthsUnequal, + /// A transactor exceeded the rate limit for setting weights. + CommittingWeightsTooFast, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index ac6b69012..f3b03684d 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -204,5 +204,25 @@ mod events { ColdkeySwapScheduleDurationSet(BlockNumberFor), /// The duration of dissolve network has been set DissolveNetworkScheduleDurationSet(BlockNumberFor), + /// Weights have been successfully committed. + /// + /// - **who**: The account ID of the user committing the weights. + /// - **netuid**: The network identifier. + /// - **commit_hash**: The hash representing the committed weights. + WeightsCommitted(T::AccountId, u16, H256), + + /// Weights have been successfully revealed. + /// + /// - **who**: The account ID of the user revealing the weights. + /// - **netuid**: The network identifier. + /// - **commit_hash**: The hash of the revealed weights. + WeightsRevealed(T::AccountId, u16, H256), + + /// Weights have been successfully batch revealed. + /// + /// - **who**: The account ID of the user revealing the weights. + /// - **netuid**: The network identifier. + /// - **revealed_hashes**: A vector of hashes representing each revealed weight set. + WeightsBatchRevealed(T::AccountId, u16, Vec), } } diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 76f140002..1077acb76 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -70,7 +70,9 @@ mod hooks { // Storage version v8 -> v9 .saturating_add(migrations::migrate_fix_total_coldkey_stake::migrate_fix_total_coldkey_stake::()) // Migrate Delegate Ids on chain - .saturating_add(migrations::migrate_chain_identity::migrate_set_hotkey_identities::()); + .saturating_add(migrations::migrate_chain_identity::migrate_set_hotkey_identities::()) + // Migrate Commit-Reval 2.0 + .saturating_add(migrations::migrate_commit_reveal_v2::migrate_commit_reveal_2::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_commit_reveal_v2.rs b/pallets/subtensor/src/migrations/migrate_commit_reveal_v2.rs new file mode 100644 index 000000000..b8b831b61 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_commit_reveal_v2.rs @@ -0,0 +1,88 @@ +use super::*; +use crate::HasMigrationRun; +use frame_support::{traits::Get, weights::Weight}; +use scale_info::prelude::string::String; +use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; + +pub fn migrate_commit_reveal_2() -> Weight { + let migration_name = b"migrate_commit_reveal_2_v2".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + "Migration '{:?}' has already run. Skipping.", + migration_name + ); + return weight; + } + + log::info!( + "Running migration '{}'", + String::from_utf8_lossy(&migration_name) + ); + + // ------------------------------ + // Step 1: Remove WeightCommitRevealInterval entries + // ------------------------------ + + let mut weight_commit_reveal_interval_prefix = Vec::new(); + weight_commit_reveal_interval_prefix.extend_from_slice(&twox_128("SubtensorModule".as_bytes())); + weight_commit_reveal_interval_prefix + .extend_from_slice(&twox_128("WeightCommitRevealInterval".as_bytes())); + + let removal_results = clear_prefix(&weight_commit_reveal_interval_prefix, Some(u32::MAX)); + + let removed_entries_count = match removal_results { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::info!("Failed To Remove Some Items During migrate_commit_reveal_v2"); + removed as u64 + } + }; + + weight = weight.saturating_add(T::DbWeight::get().writes(removed_entries_count)); + + log::info!( + "Removed {:?} entries from WeightCommitRevealInterval.", + removed_entries_count + ); + + // ------------------------------ + // Step 2: Remove WeightCommits entries + // ------------------------------ + + let mut weight_commits_prefix = Vec::new(); + weight_commits_prefix.extend_from_slice(&twox_128("SubtensorModule".as_bytes())); + weight_commits_prefix.extend_from_slice(&twox_128("WeightCommits".as_bytes())); + + let removal_results_commits = clear_prefix(&weight_commits_prefix, Some(u32::MAX)); + + let removed_commits_entries = match removal_results_commits { + KillStorageResult::AllRemoved(removed) => removed as u64, + KillStorageResult::SomeRemaining(removed) => { + log::info!("Failed To Remove Some Items During migrate_commit_reveal_v2"); + removed as u64 + } + }; + + weight = weight.saturating_add(T::DbWeight::get().writes(removed_commits_entries)); + + log::info!( + "Removed {} entries from WeightCommits.", + removed_commits_entries + ); + + // ------------------------------ + // Step 3: Mark Migration as Completed + // ------------------------------ + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{:?}' completed successfully.", + String::from_utf8_lossy(&migration_name) + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 6036b23e0..a0ee65998 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -1,5 +1,6 @@ use super::*; pub mod migrate_chain_identity; +pub mod migrate_commit_reveal_v2; pub mod migrate_create_root_network; pub mod migrate_delete_subnet_21; pub mod migrate_delete_subnet_3; diff --git a/pallets/subtensor/src/rpc_info/subnet_info.rs b/pallets/subtensor/src/rpc_info/subnet_info.rs index 9b22e0401..bdd420821 100644 --- a/pallets/subtensor/src/rpc_info/subnet_info.rs +++ b/pallets/subtensor/src/rpc_info/subnet_info.rs @@ -252,7 +252,7 @@ impl Pallet { let max_validators = Self::get_max_allowed_validators(netuid); let adjustment_alpha = Self::get_adjustment_alpha(netuid); let difficulty = Self::get_difficulty_as_u64(netuid); - let commit_reveal_weights_interval = Self::get_commit_reveal_weights_interval(netuid); + let commit_reveal_periods = Self::get_reveal_period(netuid); let commit_reveal_weights_enabled = Self::get_commit_reveal_weights_enabled(netuid); let liquid_alpha_enabled = Self::get_liquid_alpha_enabled(netuid); let (alpha_low, alpha_high): (u16, u16) = Self::get_alpha_values(netuid); @@ -280,7 +280,7 @@ impl Pallet { max_validators: max_validators.into(), adjustment_alpha: adjustment_alpha.into(), difficulty: difficulty.into(), - commit_reveal_weights_interval: commit_reveal_weights_interval.into(), + commit_reveal_weights_interval: commit_reveal_periods.into(), commit_reveal_weights_enabled, alpha_high: alpha_high.into(), alpha_low: alpha_low.into(), diff --git a/pallets/subtensor/src/staking/add_stake.rs b/pallets/subtensor/src/staking/add_stake.rs index c9cbd7e04..72d8374bc 100644 --- a/pallets/subtensor/src/staking/add_stake.rs +++ b/pallets/subtensor/src/staking/add_stake.rs @@ -70,8 +70,10 @@ impl Pallet { Error::::StakeRateLimitExceeded ); - // Set the last time the stake increased for nominator drain protection. - LastAddStakeIncrease::::insert(&hotkey, &coldkey, Self::get_current_block_as_u64()); + // Track this addition in the stake delta. + StakeDeltaSinceLastEmissionDrain::::mutate(&hotkey, &coldkey, |stake_delta| { + *stake_delta = stake_delta.saturating_add_unsigned(stake_to_be_added as u128); + }); // If coldkey is not owner of the hotkey, it's a nomination stake. if !Self::coldkey_owns_hotkey(&coldkey, &hotkey) { diff --git a/pallets/subtensor/src/subnets/serving.rs b/pallets/subtensor/src/subnets/serving.rs index 1a9240c36..7e2b9a0f0 100644 --- a/pallets/subtensor/src/subnets/serving.rs +++ b/pallets/subtensor/src/subnets/serving.rs @@ -31,6 +31,9 @@ impl Pallet { /// * 'placeholder2' (u8): /// - Placeholder for further extra params. /// + /// * 'certificate' (Option>): + /// - Certificate for mutual Tls connection between neurons + /// /// # Event: /// * AxonServed; /// - On successfully serving the axon info. @@ -61,6 +64,7 @@ impl Pallet { protocol: u8, placeholder1: u8, placeholder2: u8, + certificate: Option>, ) -> dispatch::DispatchResult { // We check the callers (hotkey) signature. let hotkey_id = ensure_signed(origin)?; @@ -86,6 +90,13 @@ impl Pallet { Error::::ServingRateLimitExceeded ); + // Check certificate + if let Some(certificate) = certificate { + if let Ok(certificate) = NeuronCertificateOf::try_from(certificate) { + NeuronCertificates::::insert(netuid, hotkey_id.clone(), certificate) + } + } + // We insert the axon meta. prev_axon.block = Self::get_current_block_as_u64(); prev_axon.version = version; diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index fff358f1c..2a5ceedb4 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -45,6 +45,9 @@ impl Pallet { Uids::::insert(netuid, new_hotkey.clone(), uid_to_replace); // Make uid - hotkey association. BlockAtRegistration::::insert(netuid, uid_to_replace, block_number); // Fill block at registration. IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. + + // 4. Clear neuron certificates + NeuronCertificates::::remove(netuid, old_hotkey.clone()); } /// Appends the uid to the network. diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 1a53e44cc..87042f456 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -2,7 +2,7 @@ use super::*; use crate::epoch::math::*; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Hash}; -use sp_std::vec; +use sp_std::{collections::vec_deque::VecDeque, vec}; impl Pallet { /// ---- The implementation for committing weight hashes. @@ -18,34 +18,91 @@ impl Pallet { /// - The hash representing the committed weights. /// /// # Raises: - /// * `WeightsCommitNotAllowed`: - /// - Attempting to commit when it is not allowed. + /// * `CommitRevealDisabled`: + /// - Raised if commit-reveal is disabled for the specified network. /// + /// * `HotKeyNotRegisteredInSubNet`: + /// - Raised if the hotkey is not registered on the specified network. + /// + /// * `CommittingWeightsTooFast`: + /// - Raised if the hotkey's commit rate exceeds the permitted limit. + /// + /// * `TooManyUnrevealedCommits`: + /// - Raised if the hotkey has reached the maximum number of unrevealed commits. + /// + /// # Events: + /// * `WeightsCommitted`: + /// - Emitted upon successfully storing the weight hash. pub fn do_commit_weights( origin: T::RuntimeOrigin, netuid: u16, commit_hash: H256, ) -> DispatchResult { + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; - log::debug!("do_commit_weights( hotkey:{:?} netuid:{:?})", who, netuid); + log::debug!("do_commit_weights(hotkey: {:?}, netuid: {:?})", who, netuid); + // 2. Ensure commit-reveal is enabled. ensure!( Self::get_commit_reveal_weights_enabled(netuid), Error::::CommitRevealDisabled ); + // 3. Ensure the hotkey is registered on the network. ensure!( - Self::can_commit(netuid, &who), - Error::::WeightsCommitNotAllowed + Self::is_hotkey_registered_on_network(netuid, &who), + Error::::HotKeyNotRegisteredInSubNet ); - WeightCommits::::insert( - netuid, - &who, - (commit_hash, Self::get_current_block_as_u64()), + // 4. Check that the commit rate does not exceed the allowed frequency. + let commit_block = Self::get_current_block_as_u64(); + let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; + ensure!( + Self::check_rate_limit(netuid, neuron_uid, commit_block), + Error::::CommittingWeightsTooFast ); - Ok(()) + + // 5. Calculate the reveal blocks based on network tempo and reveal period. + let (first_reveal_block, last_reveal_block) = Self::get_reveal_blocks(netuid, commit_block); + + // 6. Retrieve or initialize the VecDeque of commits for the hotkey. + WeightCommits::::try_mutate(netuid, &who, |maybe_commits| -> DispatchResult { + let mut commits: VecDeque<(H256, u64, u64, u64)> = + maybe_commits.take().unwrap_or_default(); + + // 7. Remove any expired commits from the front of the queue. + while let Some((_, commit_block_existing, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block_existing) { + commits.pop_front(); + } else { + break; + } + } + + // 8. Verify that the number of unrevealed commits is within the allowed limit. + ensure!(commits.len() < 10, Error::::TooManyUnrevealedCommits); + + // 9. Append the new commit with calculated reveal blocks. + commits.push_back(( + commit_hash, + commit_block, + first_reveal_block, + last_reveal_block, + )); + + // 10. Store the updated commits queue back to storage. + *maybe_commits = Some(commits); + + // 11. Emit the WeightsCommitted event + Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); + + // 12. Update the last commit block for the hotkey's UID. + Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + + // 13. Return success. + Ok(()) + }) } /// ---- The implementation for revealing committed weights. @@ -63,22 +120,27 @@ impl Pallet { /// * `values` (`Vec`): /// - The values of the weights being revealed. /// - /// * `salt` (`Vec`): - /// - The values of the weights being revealed. + /// * `salt` (`Vec`): + /// - The salt used to generate the commit hash. /// /// * `version_key` (`u64`): /// - The network version key. /// /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// /// * `NoWeightsCommitFound`: /// - Attempting to reveal weights without an existing commit. /// - /// * `InvalidRevealCommitHashNotMatchTempo`: - /// - Attempting to reveal weights outside the valid tempo. + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. /// - /// * `InvalidRevealCommitHashNotMatch`: - /// - The revealed hash does not match the committed hash. + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. pub fn do_reveal_weights( origin: T::RuntimeOrigin, netuid: u16, @@ -87,25 +149,36 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { + // --- 1. Check the caller's signature (hotkey). let who = ensure_signed(origin.clone())?; log::debug!("do_reveal_weights( hotkey:{:?} netuid:{:?})", who, netuid); + // --- 2. Ensure commit-reveal is enabled for the network. ensure!( Self::get_commit_reveal_weights_enabled(netuid), Error::::CommitRevealDisabled ); - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commit| -> DispatchResult { - let (commit_hash, commit_block) = maybe_commit - .as_ref() + // --- 3. Mutate the WeightCommits to retrieve existing commits for the user. + WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() .ok_or(Error::::NoWeightsCommitFound)?; - ensure!( - Self::is_reveal_block_range(netuid, *commit_block), - Error::::InvalidRevealCommitTempo - ); + // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); + } else { + break; + } + } + // --- 5. Hash the provided data. let provided_hash: H256 = BlakeTwo256::hash_of(&( who.clone(), netuid, @@ -114,12 +187,238 @@ impl Pallet { salt.clone(), version_key, )); - ensure!( - provided_hash == *commit_hash, - Error::::InvalidRevealCommitHashNotMatch - ); - Self::do_set_weights(origin, netuid, uids, values, version_key) + // --- 6. After removing expired commits, check if any commits are left. + if commits.is_empty() { + // Check if provided_hash matches any expired commits + if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::NoWeightsCommitFound.into()); + } + } + + // --- 7. Search for the provided_hash in the non-expired commits. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8. Get the commit block for the commit being revealed. + let (_, commit_block, _, _) = commits + .get(position) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 9. Ensure the commit is ready to be revealed in the current block range. + ensure!( + Self::is_reveal_block_range(netuid, *commit_block), + Error::::RevealTooEarly + ); + + // --- 10. Remove all commits up to and including the one being revealed. + for _ in 0..=position { + commits.pop_front(); + } + + // --- 11. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } + + // --- 12. Proceed to set the revealed weights. + Self::do_set_weights(origin, netuid, uids.clone(), values.clone(), version_key)?; + + // --- 13. Emit the WeightsRevealed event. + Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + + // --- 14. Return ok. + Ok(()) + } else { + // --- 15. The provided_hash does not match any non-expired commits. + if expired_hashes.contains(&provided_hash) { + Err(Error::::ExpiredWeightCommit.into()) + } else { + Err(Error::::InvalidRevealCommitHashNotMatch.into()) + } + } + }) + } + + /// ---- The implementation for batch revealing committed weights. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `uids_list` (`Vec>`): + /// - A list of uids for each set of weights being revealed. + /// + /// * `values_list` (`Vec>`): + /// - A list of values for each set of weights being revealed. + /// + /// * `salts_list` (`Vec>`): + /// - A list of salts used to generate the commit hashes. + /// + /// * `version_keys` (`Vec`): + /// - A list of network version keys. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. + /// + /// * `InputLengthsUnequal`: + /// - The input vectors are of mismatched lengths. + pub fn do_batch_reveal_weights( + origin: T::RuntimeOrigin, + netuid: u16, + uids_list: Vec>, + values_list: Vec>, + salts_list: Vec>, + version_keys: Vec, + ) -> DispatchResult { + // --- 1. Check that the input lists are of the same length. + let num_reveals = uids_list.len(); + ensure!( + num_reveals == values_list.len() + && num_reveals == salts_list.len() + && num_reveals == version_keys.len(), + Error::::InputLengthsUnequal + ); + + // --- 2. Check the caller's signature (hotkey). + let who = ensure_signed(origin.clone())?; + + log::debug!( + "do_batch_reveal_weights( hotkey:{:?} netuid:{:?})", + who, + netuid + ); + + // --- 3. Ensure commit-reveal is enabled for the network. + ensure!( + Self::get_commit_reveal_weights_enabled(netuid), + Error::::CommitRevealDisabled + ); + + // --- 4. Mutate the WeightCommits to retrieve existing commits for the user. + WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); + } else { + break; + } + } + + // --- 6. Prepare to collect all provided hashes and their corresponding reveals. + let mut provided_hashes = Vec::new(); + let mut reveals = Vec::new(); + let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); + + for ((uids, values), (salt, version_key)) in uids_list + .into_iter() + .zip(values_list) + .zip(salts_list.into_iter().zip(version_keys)) + { + // --- 6a. Hash the provided data. + let provided_hash: H256 = BlakeTwo256::hash_of(&( + who.clone(), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + provided_hashes.push(provided_hash); + reveals.push((uids, values, version_key, provided_hash)); + } + + // --- 7. Validate all reveals first to ensure atomicity. + for (_uids, _values, _version_key, provided_hash) in &reveals { + // --- 7a. Check if the provided_hash is in the non-expired commits. + if !commits + .iter() + .any(|(hash, _, _, _)| *hash == *provided_hash) + { + // --- 7b. If not found, check if it matches any expired commits. + if expired_hashes.contains(provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } + + // --- 7c. Find the commit corresponding to the provided_hash. + let commit = commits + .iter() + .find(|(hash, _, _, _)| *hash == *provided_hash) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 7d. Check if the commit is within the reveal window. + ensure!( + Self::is_reveal_block_range(netuid, commit.1), + Error::::RevealTooEarly + ); + } + + // --- 8. All reveals are valid. Proceed to remove and process each reveal. + for (uids, values, version_key, provided_hash) in reveals { + // --- 8a. Find the position of the provided_hash. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8b. Remove the commit from the queue. + commits.remove(position); + + // --- 8c. Proceed to set the revealed weights. + Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + + // --- 8d. Collect the revealed hash. + revealed_hashes.push(provided_hash); + } else if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } + + // --- 9. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } + + // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. + Self::deposit_event(Event::WeightsBatchRevealed( + who.clone(), + netuid, + revealed_hashes, + )); + + // --- 11. Return ok. + Ok(()) }) } @@ -241,10 +540,12 @@ impl Pallet { // --- 9. Ensure the uid is not setting weights faster than the weights_set_rate_limit. let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &hotkey)?; let current_block: u64 = Self::get_current_block_as_u64(); - ensure!( - Self::check_rate_limit(netuid, neuron_uid, current_block), - Error::::SettingWeightsTooFast - ); + if !Self::get_commit_reveal_weights_enabled(netuid) { + ensure!( + Self::check_rate_limit(netuid, neuron_uid, current_block), + Error::::SettingWeightsTooFast + ); + } // --- 10. Check that the neuron uid is an allowed validator permitted to set non-self weights. ensure!( @@ -286,7 +587,9 @@ impl Pallet { Weights::::insert(netuid, neuron_uid, zipped_weights); // --- 18. Set the activity for the weights on this network. - Self::set_last_update_for_uid(netuid, neuron_uid, current_block); + if !Self::get_commit_reveal_weights_enabled(netuid) { + Self::set_last_update_for_uid(netuid, neuron_uid, current_block); + } // --- 19. Emit the tracking event. log::debug!( @@ -452,50 +755,55 @@ impl Pallet { uids.len() <= subnetwork_n as usize } - #[allow(clippy::arithmetic_side_effects)] - pub fn can_commit(netuid: u16, who: &T::AccountId) -> bool { - if let Some((_hash, commit_block)) = WeightCommits::::get(netuid, who) { - let interval: u64 = Self::get_commit_reveal_weights_interval(netuid); - if interval == 0 { - return true; //prevent division by 0 - } + pub fn is_reveal_block_range(netuid: u16, commit_block: u64) -> bool { + let current_block: u64 = Self::get_current_block_as_u64(); + let commit_epoch: u64 = Self::get_epoch_index(netuid, commit_block); + let current_epoch: u64 = Self::get_epoch_index(netuid, current_block); + let reveal_period: u64 = Self::get_reveal_period(netuid); - let current_block: u64 = Self::get_current_block_as_u64(); - let interval_start: u64 = current_block.saturating_sub(current_block % interval); - let last_commit_interval_start: u64 = - commit_block.saturating_sub(commit_block % interval); + // Reveal is allowed only in the exact epoch `commit_epoch + reveal_period` + current_epoch == commit_epoch.saturating_add(reveal_period) + } - // Allow commit if we're within the interval bounds - if current_block <= interval_start.saturating_add(interval) - && interval_start > last_commit_interval_start - { - return true; - } + pub fn get_epoch_index(netuid: u16, block_number: u64) -> u64 { + let tempo: u64 = Self::get_tempo(netuid) as u64; + let tempo_plus_one: u64 = tempo.saturating_add(1); + let netuid_plus_one: u64 = (netuid as u64).saturating_add(1); + let block_with_offset: u64 = block_number.saturating_add(netuid_plus_one); - false - } else { - true - } + block_with_offset.checked_div(tempo_plus_one).unwrap_or(0) } - #[allow(clippy::arithmetic_side_effects)] - pub fn is_reveal_block_range(netuid: u16, commit_block: u64) -> bool { - let interval: u64 = Self::get_commit_reveal_weights_interval(netuid); - if interval == 0 { - return true; //prevent division by 0 - } - - let commit_interval_start: u64 = commit_block.saturating_sub(commit_block % interval); // Find the start of the interval in which the commit occurred - let reveal_interval_start: u64 = commit_interval_start.saturating_add(interval); // Start of the next interval after the commit interval + pub fn is_commit_expired(netuid: u16, commit_block: u64) -> bool { let current_block: u64 = Self::get_current_block_as_u64(); + let current_epoch: u64 = Self::get_epoch_index(netuid, current_block); + let commit_epoch: u64 = Self::get_epoch_index(netuid, commit_block); + let reveal_period: u64 = Self::get_reveal_period(netuid); - // Allow reveal if the current block is within the interval following the commit's interval - if current_block >= reveal_interval_start - && current_block < reveal_interval_start.saturating_add(interval) - { - return true; - } + current_epoch > commit_epoch.saturating_add(reveal_period) + } - false + pub fn get_reveal_blocks(netuid: u16, commit_block: u64) -> (u64, u64) { + let reveal_period: u64 = Self::get_reveal_period(netuid); + let tempo: u64 = Self::get_tempo(netuid) as u64; + let tempo_plus_one: u64 = tempo.saturating_add(1); + let netuid_plus_one: u64 = (netuid as u64).saturating_add(1); + + let commit_epoch: u64 = Self::get_epoch_index(netuid, commit_block); + let reveal_epoch: u64 = commit_epoch.saturating_add(reveal_period); + + let first_reveal_block = reveal_epoch + .saturating_mul(tempo_plus_one) + .saturating_sub(netuid_plus_one); + let last_reveal_block = first_reveal_block.saturating_add(tempo); + + (first_reveal_block, last_reveal_block) + } + + pub fn set_reveal_period(netuid: u16, reveal_period: u64) { + RevealPeriodEpochs::::insert(netuid, reveal_period); + } + pub fn get_reveal_period(netuid: u16) -> u64 { + RevealPeriodEpochs::::get(netuid) } } diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index e872b038e..b16c180c4 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -206,33 +206,42 @@ impl Pallet { Delegates::::insert(new_hotkey, old_delegate_take); weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); } - // 9. Swap all subnet specific info. + + // 9. swap PendingdHotkeyEmission + if PendingdHotkeyEmission::::contains_key(old_hotkey) { + let old_pending_hotkey_emission = PendingdHotkeyEmission::::get(old_hotkey); + PendingdHotkeyEmission::::remove(old_hotkey); + PendingdHotkeyEmission::::insert(new_hotkey, old_pending_hotkey_emission); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + // 10. Swap all subnet specific info. let all_netuids: Vec = Self::get_all_subnet_netuids(); for netuid in all_netuids { - // 9.1 Remove the previous hotkey and insert the new hotkey from membership. + // 10.1 Remove the previous hotkey and insert the new hotkey from membership. // IsNetworkMember( hotkey, netuid ) -> bool -- is the hotkey a subnet member. let is_network_member: bool = IsNetworkMember::::get(old_hotkey, netuid); IsNetworkMember::::remove(old_hotkey, netuid); IsNetworkMember::::insert(new_hotkey, netuid, is_network_member); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - // 9.2 Swap Uids + Keys. + // 10.2 Swap Uids + Keys. // Keys( netuid, hotkey ) -> uid -- the uid the hotkey has in the network if it is a member. // Uids( netuid, hotkey ) -> uid -- the uids that the hotkey has. if is_network_member { - // 9.2.1 Swap the UIDS + // 10.2.1 Swap the UIDS if let Ok(old_uid) = Uids::::try_get(netuid, old_hotkey) { Uids::::remove(netuid, old_hotkey); Uids::::insert(netuid, new_hotkey, old_uid); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - // 9.2.2 Swap the keys. + // 10.2.2 Swap the keys. Keys::::insert(netuid, old_uid, new_hotkey.clone()); weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); } } - // 9.3 Swap Prometheus. + // 10.3 Swap Prometheus. // Prometheus( netuid, hotkey ) -> prometheus -- the prometheus data that a hotkey has in the network. if is_network_member { if let Ok(old_prometheus_info) = Prometheus::::try_get(netuid, old_hotkey) { @@ -242,7 +251,7 @@ impl Pallet { } } - // 9.4. Swap axons. + // 10.4. Swap axons. // Axons( netuid, hotkey ) -> axon -- the axon that the hotkey has. if is_network_member { if let Ok(old_axon_info) = Axons::::try_get(netuid, old_hotkey) { @@ -252,7 +261,7 @@ impl Pallet { } } - // 9.5 Swap WeightCommits + // 10.5 Swap WeightCommits // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. if is_network_member { if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid, old_hotkey) { @@ -262,7 +271,7 @@ impl Pallet { } } - // 9.6. Swap the subnet loaded emission. + // 10.6. Swap the subnet loaded emission. // LoadedEmission( netuid ) --> Vec<(hotkey, u64)> -- the loaded emission for the subnet. if is_network_member { if let Some(mut old_loaded_emission) = LoadedEmission::::get(netuid) { @@ -276,9 +285,21 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); } } + + // 10.7. Swap neuron TLS certificates. + // NeuronCertificates( netuid, hotkey ) -> Vec -- the neuron certificate for the hotkey. + if is_network_member { + if let Ok(old_neuron_certificates) = + NeuronCertificates::::try_get(netuid, old_hotkey) + { + NeuronCertificates::::remove(netuid, old_hotkey); + NeuronCertificates::::insert(netuid, new_hotkey, old_neuron_certificates); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + } } - // 10. Swap Stake. + // 11. Swap Stake. // Stake( hotkey, coldkey ) -> stake -- the stake that the hotkey controls on behalf of the coldkey. let stakes: Vec<(T::AccountId, u64)> = Stake::::iter_prefix(old_hotkey).collect(); // Clear the entire old prefix here. @@ -308,7 +329,7 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); } - // 11. Swap ChildKeys. + // 12. Swap ChildKeys. // ChildKeys( parent, netuid ) --> Vec<(proportion,child)> -- the child keys of the parent. for netuid in Self::get_all_subnet_netuids() { // Get the children of the old hotkey for this subnet @@ -332,7 +353,7 @@ impl Pallet { } } - // 12. Swap ParentKeys. + // 13. Swap ParentKeys. // ParentKeys( child, netuid ) --> Vec<(proportion,parent)> -- the parent keys of the child. for netuid in Self::get_all_subnet_netuids() { // Get the parents of the old hotkey for this subnet @@ -356,6 +377,19 @@ impl Pallet { } } + // 14. Swap Stake Delta for all coldkeys. + for (coldkey, stake_delta) in StakeDeltaSinceLastEmissionDrain::::iter_prefix(old_hotkey) + { + let new_stake_delta = StakeDeltaSinceLastEmissionDrain::::get(new_hotkey, &coldkey); + StakeDeltaSinceLastEmissionDrain::::insert( + new_hotkey, + &coldkey, + new_stake_delta.saturating_add(stake_delta), + ); + StakeDeltaSinceLastEmissionDrain::::remove(old_hotkey, &coldkey); + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + // Return successful after swapping all the relevant terms. Ok(()) } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 76546a1a2..57cc38786 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -486,13 +486,6 @@ impl Pallet { Kappa::::insert(netuid, kappa); Self::deposit_event(Event::KappaSet(netuid, kappa)); } - - pub fn get_commit_reveal_weights_interval(netuid: u16) -> u64 { - WeightCommitRevealInterval::::get(netuid) - } - pub fn set_commit_reveal_weights_interval(netuid: u16, interval: u64) { - WeightCommitRevealInterval::::set(netuid, interval); - } pub fn get_commit_reveal_weights_enabled(netuid: u16) -> bool { CommitRevealWeightsEnabled::::get(netuid) } diff --git a/pallets/subtensor/tests/migration.rs b/pallets/subtensor/tests/migration.rs index 6c40d7d78..4ddef882c 100644 --- a/pallets/subtensor/tests/migration.rs +++ b/pallets/subtensor/tests/migration.rs @@ -1,10 +1,19 @@ #![allow(unused, clippy::indexing_slicing, clippy::panic, clippy::unwrap_used)] mod mock; -use frame_support::{assert_ok, weights::Weight}; +use codec::{Decode, Encode}; +use frame_support::{ + assert_ok, + storage::unhashed::{get_raw, put_raw}, + traits::{StorageInstance, StoredMap}, + weights::Weight, + StorageHasher, Twox64Concat, +}; use frame_system::Config; use mock::*; use pallet_subtensor::*; -use sp_core::U256; +use sp_core::{H256, U256}; +use sp_io::hashing::twox_128; +use sp_runtime::traits::Zero; #[test] fn test_initialise_ti() { @@ -430,3 +439,92 @@ fn run_migration_and_check(migration_name: &'static str) -> frame_support::weigh // Return the weight of the executed migration weight } + +#[test] +fn test_migrate_commit_reveal_2() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // Step 1: Simulate Old Storage Entries + // ------------------------------ + const MIGRATION_NAME: &str = "migrate_commit_reveal_2_v2"; + + let pallet_prefix = twox_128("SubtensorModule".as_bytes()); + let storage_prefix_interval = twox_128("WeightCommitRevealInterval".as_bytes()); + let storage_prefix_commits = twox_128("WeightCommits".as_bytes()); + + let netuid: u16 = 1; + let interval_value: u64 = 50u64; + + // Construct the full key for WeightCommitRevealInterval + let mut interval_key = Vec::new(); + interval_key.extend_from_slice(&pallet_prefix); + interval_key.extend_from_slice(&storage_prefix_interval); + interval_key.extend_from_slice(&netuid.encode()); + + put_raw(&interval_key, &interval_value.encode()); + + let test_account: U256 = U256::from(1); + + // Construct the full key for WeightCommits (DoubleMap) + let mut commit_key = Vec::new(); + commit_key.extend_from_slice(&pallet_prefix); + commit_key.extend_from_slice(&storage_prefix_commits); + + // First key (netuid) hashed with Twox64Concat + let netuid_hashed = Twox64Concat::hash(&netuid.encode()); + commit_key.extend_from_slice(&netuid_hashed); + + // Second key (account) hashed with Twox64Concat + let account_hashed = Twox64Concat::hash(&test_account.encode()); + commit_key.extend_from_slice(&account_hashed); + + let commit_value: (H256, u64) = (H256::from_low_u64_be(42), 100); + put_raw(&commit_key, &commit_value.encode()); + + let stored_interval = get_raw(&interval_key).expect("Expected to get a value"); + assert_eq!( + u64::decode(&mut &stored_interval[..]).expect("Failed to decode interval value"), + interval_value + ); + + let stored_commit = get_raw(&commit_key).expect("Expected to get a value"); + assert_eq!( + <(H256, u64)>::decode(&mut &stored_commit[..]).expect("Failed to decode commit value"), + commit_value + ); + + assert!( + !HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should not have run yet" + ); + + // ------------------------------ + // Step 2: Run the Migration + // ------------------------------ + let weight = + pallet_subtensor::migrations::migrate_commit_reveal_v2::migrate_commit_reveal_2::( + ); + + assert!( + HasMigrationRun::::get(MIGRATION_NAME.as_bytes().to_vec()), + "Migration should be marked as run" + ); + + // ------------------------------ + // Step 3: Verify Migration Effects + // ------------------------------ + let stored_interval_after = get_raw(&interval_key); + assert!( + stored_interval_after.is_none(), + "WeightCommitRevealInterval should be cleared" + ); + + let stored_commit_after = get_raw(&commit_key); + assert!( + stored_commit_after.is_none(), + "WeightCommits entry should be cleared" + ); + + assert!(!weight.is_zero(), "Migration weight should be non-zero"); + }); +} diff --git a/pallets/subtensor/tests/mock.rs b/pallets/subtensor/tests/mock.rs index 6f3b44383..7a2967e8c 100644 --- a/pallets/subtensor/tests/mock.rs +++ b/pallets/subtensor/tests/mock.rs @@ -513,6 +513,24 @@ pub(crate) fn run_to_block(n: u64) { } } +#[allow(dead_code)] +pub(crate) fn step_epochs(count: u16, netuid: u16) { + for _ in 0..count { + let blocks_to_next_epoch = SubtensorModule::blocks_until_next_epoch( + netuid, + SubtensorModule::get_tempo(netuid), + SubtensorModule::get_current_block_as_u64(), + ); + step_block(blocks_to_next_epoch as u16); + + assert!(SubtensorModule::should_run_epoch( + netuid, + SubtensorModule::get_current_block_as_u64() + )); + step_block(1); + } +} + /// Increments current block by `1`, running all hooks associated with doing so, and asserts /// that the block number was in fact incremented. /// diff --git a/pallets/subtensor/tests/serving.rs b/pallets/subtensor/tests/serving.rs index 49a963951..6bc30c76f 100644 --- a/pallets/subtensor/tests/serving.rs +++ b/pallets/subtensor/tests/serving.rs @@ -99,6 +99,64 @@ fn test_serving_ok() { }); } +#[test] +fn test_serving_tls_ok() { + new_test_ext(1).execute_with(|| { + let hotkey_account_id = U256::from(1); + let netuid: u16 = 1; + let tempo: u16 = 13; + let version: u32 = 2; + let ip: u128 = 1676056785; + let port: u16 = 128; + let ip_type: u8 = 4; + let modality: u16 = 0; + let protocol: u8 = 0; + let placeholder1: u8 = 0; + let placeholder2: u8 = 0; + let certificate: Vec = "CERT".as_bytes().to_vec(); + add_network(netuid, tempo, modality); + register_ok_neuron(netuid, hotkey_account_id, U256::from(66), 0); + assert_ok!(SubtensorModule::serve_axon_tls( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + certificate.clone() + )); + + let stored_certificate = NeuronCertificates::::get(netuid, hotkey_account_id) + .expect("Certificate should exist"); + assert_eq!( + stored_certificate.public_key.clone().into_inner(), + certificate.get(1..).expect("Certificate should exist") + ); + let new_certificate = "UPDATED_CERT".as_bytes().to_vec(); + assert_ok!(SubtensorModule::serve_axon_tls( + <::RuntimeOrigin>::signed(hotkey_account_id), + netuid, + version, + ip, + port, + ip_type, + protocol, + placeholder1, + placeholder2, + new_certificate.clone() + )); + let stored_certificate = NeuronCertificates::::get(netuid, hotkey_account_id) + .expect("Certificate should exist"); + assert_eq!( + stored_certificate.public_key.clone().into_inner(), + new_certificate.get(1..).expect("Certificate should exist") + ); + }); +} + #[test] fn test_serving_set_metadata_update() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/tests/swap_hotkey.rs b/pallets/subtensor/tests/swap_hotkey.rs index c51a8c550..c3b81629e 100644 --- a/pallets/subtensor/tests/swap_hotkey.rs +++ b/pallets/subtensor/tests/swap_hotkey.rs @@ -311,6 +311,38 @@ fn test_swap_axons() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_certificates --exact --nocapture +#[test] +fn test_swap_certificates() { + new_test_ext(1).execute_with(|| { + let old_hotkey = U256::from(1); + let new_hotkey = U256::from(2); + let coldkey = U256::from(3); + let netuid = 0u16; + let certificate = NeuronCertificate::try_from(vec![1, 2, 3]).unwrap(); + let mut weight = Weight::zero(); + + add_network(netuid, 0, 1); + IsNetworkMember::::insert(old_hotkey, netuid, true); + NeuronCertificates::::insert(netuid, old_hotkey, certificate.clone()); + + assert_ok!(SubtensorModule::perform_hotkey_swap( + &old_hotkey, + &new_hotkey, + &coldkey, + &mut weight + )); + + assert!(!NeuronCertificates::::contains_key( + netuid, old_hotkey + )); + assert_eq!( + NeuronCertificates::::get(netuid, new_hotkey), + Some(certificate) + ); + }); +} +use sp_std::collections::vec_deque::VecDeque; // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --test swap_hotkey -- test_swap_weight_commits --exact --nocapture #[test] fn test_swap_weight_commits() { @@ -319,12 +351,13 @@ fn test_swap_weight_commits() { let new_hotkey = U256::from(2); let coldkey = U256::from(3); let netuid = 0u16; - let weight_commits = (H256::from_low_u64_be(100), 200); + let mut weight_commits: VecDeque<(H256, u64, u64, u64)> = VecDeque::new(); + weight_commits.push_back((H256::from_low_u64_be(100), 200, 1, 1)); let mut weight = Weight::zero(); add_network(netuid, 0, 1); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits); + WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); assert_ok!(SubtensorModule::perform_hotkey_swap( &old_hotkey, diff --git a/pallets/subtensor/tests/uids.rs b/pallets/subtensor/tests/uids.rs index 82adc6b8a..6b4c00328 100644 --- a/pallets/subtensor/tests/uids.rs +++ b/pallets/subtensor/tests/uids.rs @@ -1,8 +1,9 @@ #![allow(clippy::unwrap_used)] use crate::mock::*; -use frame_support::assert_ok; +use frame_support::{assert_err, assert_ok}; use frame_system::Config; +use pallet_subtensor::*; use sp_core::U256; mod mock; @@ -32,6 +33,7 @@ fn test_replace_neuron() { let new_hotkey_account_id = U256::from(2); let _new_colkey_account_id = U256::from(12345); + let certificate = NeuronCertificate::try_from(vec![1, 2, 3]).unwrap(); //add network add_network(netuid, tempo, 0); @@ -51,6 +53,9 @@ fn test_replace_neuron() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id); assert_ok!(neuron_uid); + // Set a neuron certificate for it + NeuronCertificates::::insert(netuid, hotkey_account_id, certificate); + // Replace the neuron. SubtensorModule::replace_neuron( netuid, @@ -77,6 +82,10 @@ fn test_replace_neuron() { &new_hotkey_account_id )); assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); + + // Check neuron certificate was reset + let certificate = NeuronCertificates::::get(netuid, hotkey_account_id); + assert_eq!(certificate, None); }); } @@ -371,3 +380,24 @@ fn test_replace_neuron_multiple_subnets_unstake_all() { ); }); } + +#[test] +fn test_neuron_certificate() { + new_test_ext(1).execute_with(|| { + // 512 bits key + let mut data = [0; 65].to_vec(); + assert_ok!(NeuronCertificate::try_from(data)); + + // 256 bits key + data = [1; 33].to_vec(); + assert_ok!(NeuronCertificate::try_from(data)); + + // too much data + data = [8; 88].to_vec(); + assert_err!(NeuronCertificate::try_from(data), ()); + + // no data + data = vec![]; + assert_err!(NeuronCertificate::try_from(data), ()); + }); +} diff --git a/pallets/subtensor/tests/weights.rs b/pallets/subtensor/tests/weights.rs index 214e3add0..7dbeba288 100644 --- a/pallets/subtensor/tests/weights.rs +++ b/pallets/subtensor/tests/weights.rs @@ -8,11 +8,13 @@ use frame_support::{ }; use mock::*; use pallet_subtensor::{Error, Owner}; +use scale_info::prelude::collections::HashMap; use sp_core::{H256, U256}; use sp_runtime::{ traits::{BlakeTwo256, DispatchInfoOf, Hash, SignedExtension}, DispatchError, }; +use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::I32F32; /*************************** @@ -402,12 +404,17 @@ fn test_set_weights_is_root_error() { let uids = vec![0]; let weights = vec![1]; - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; let hotkey = U256::from(1); assert_err!( - commit_reveal_set_weights(hotkey, root_netuid, uids, weights, salt, version_key), + SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + root_netuid, + uids.clone(), + weights.clone(), + version_key, + ), Error::::CanNotSetRootNetworkWeights ); }); @@ -430,14 +437,12 @@ fn test_weights_err_no_validator_permit() { let weights_keys: Vec = vec![1, 2]; let weight_values: Vec = vec![1, 2]; - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; - let result = commit_reveal_set_weights( - hotkey_account_id, + let result = SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey_account_id), netuid, weights_keys, weight_values, - salt.clone(), 0, ); assert_eq!(result, Err(Error::::NeuronNoValidatorPermit.into())); @@ -448,12 +453,11 @@ fn test_weights_err_no_validator_permit() { SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey_account_id) .expect("Not registered."); SubtensorModule::set_validator_permit_for_uid(netuid, neuron_uid, true); - let result = commit_reveal_set_weights( - hotkey_account_id, + let result = SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey_account_id), netuid, weights_keys, weight_values, - salt, 0, ); assert_ok!(result); @@ -470,7 +474,7 @@ fn test_set_weights_min_stake_failed() { let version_key: u64 = 0; let hotkey = U256::from(0); let coldkey = U256::from(0); - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + add_network(netuid, 0, 0); register_ok_neuron(netuid, hotkey, coldkey, 2143124); SubtensorModule::set_weights_min_stake(20_000_000_000_000); @@ -486,24 +490,22 @@ fn test_set_weights_min_stake_failed() { // Check that it fails at the pallet level. SubtensorModule::set_weights_min_stake(100_000_000_000_000); assert_eq!( - commit_reveal_set_weights( - hotkey, + SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid, dests.clone(), weights.clone(), - salt.clone(), - version_key + version_key, ), Err(Error::::NotEnoughStakeToSetWeights.into()) ); // Now passes SubtensorModule::increase_stake_on_hotkey_account(&hotkey, 100_000_000_000_000); - assert_ok!(commit_reveal_set_weights( - hotkey, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid, dests.clone(), weights.clone(), - salt.clone(), version_key )); }); @@ -517,7 +519,7 @@ fn test_weights_version_key() { let coldkey = U256::from(66); let netuid0: u16 = 1; let netuid1: u16 = 2; - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + add_network(netuid0, 0, 0); add_network(netuid1, 0, 0); register_ok_neuron(netuid0, hotkey, coldkey, 2143124); @@ -525,20 +527,18 @@ fn test_weights_version_key() { let weights_keys: Vec = vec![0]; let weight_values: Vec = vec![1]; - assert_ok!(commit_reveal_set_weights( - hotkey, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid0, weights_keys.clone(), weight_values.clone(), - salt.clone(), 0 )); - assert_ok!(commit_reveal_set_weights( - hotkey, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid1, weights_keys.clone(), weight_values.clone(), - salt.clone(), 0 )); @@ -549,42 +549,38 @@ fn test_weights_version_key() { SubtensorModule::set_weights_version_key(netuid1, key1); // Setting works with version key. - assert_ok!(commit_reveal_set_weights( - hotkey, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid0, weights_keys.clone(), weight_values.clone(), - salt.clone(), key0 )); - assert_ok!(commit_reveal_set_weights( - hotkey, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid1, weights_keys.clone(), weight_values.clone(), - salt.clone(), key1 )); // validator:20313 >= network:12312 (accepted: validator newer) - assert_ok!(commit_reveal_set_weights( - hotkey, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid0, weights_keys.clone(), weight_values.clone(), - salt.clone(), key1 )); // Setting fails with incorrect keys. // validator:12312 < network:20313 (rejected: validator not updated) assert_eq!( - commit_reveal_set_weights( - hotkey, + SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), netuid1, weights_keys.clone(), weight_values.clone(), - salt.clone(), key0 ), Err(Error::::IncorrectWeightVersionKey.into()) @@ -734,7 +730,6 @@ fn test_weights_err_max_weight_limit() { // Add network. let netuid: u16 = 1; let tempo: u16 = 100; - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; add_network(netuid, tempo, 0); // Set params. @@ -795,18 +790,18 @@ fn test_weights_err_max_weight_limit() { // Non self-weight fails. let uids: Vec = vec![1, 2, 3, 4]; let values: Vec = vec![u16::MAX / 4, u16::MAX / 4, u16::MAX / 54, u16::MAX / 4]; - let result = commit_reveal_set_weights(U256::from(0), 1, uids, values, salt.clone(), 0); + let result = + SubtensorModule::set_weights(RuntimeOrigin::signed(U256::from(0)), 1, uids, values, 0); assert_eq!(result, Err(Error::::MaxWeightExceeded.into())); // Self-weight is a success. let uids: Vec = vec![0]; // Self. let values: Vec = vec![u16::MAX]; // normalizes to u32::MAX - assert_ok!(commit_reveal_set_weights( - U256::from(0), + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(0)), 1, uids, values, - salt.clone(), 0 )); }); @@ -893,19 +888,23 @@ fn test_set_weight_not_enough_values() { // Should fail because we are only setting a single value and its not the self weight. let weight_keys: Vec = vec![1]; // not weight. let weight_values: Vec = vec![88]; // random value. - let result = - commit_reveal_set_weights(account_id, 1, weight_keys, weight_values, salt.clone(), 0); + let result = SubtensorModule::set_weights( + RuntimeOrigin::signed(account_id), + 1, + weight_keys, + weight_values, + 0, + ); assert_eq!(result, Err(Error::::WeightVecLengthIsLow.into())); // Shouldnt fail because we setting a single value but it is the self weight. let weight_keys: Vec = vec![0]; // self weight. let weight_values: Vec = vec![88]; // random value. - assert_ok!(commit_reveal_set_weights( - account_id, + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(account_id), 1, weight_keys, weight_values, - salt.clone(), 0 )); @@ -930,7 +929,6 @@ fn test_set_weight_too_many_uids() { new_test_ext(0).execute_with(|| { let netuid: u16 = 1; let tempo: u16 = 13; - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; add_network(netuid, tempo, 0); register_ok_neuron(1, U256::from(1), U256::from(2), 100_000); @@ -945,12 +943,11 @@ fn test_set_weight_too_many_uids() { // Should fail because we are setting more weights than there are neurons. let weight_keys: Vec = vec![0, 1, 2, 3, 4]; // more uids than neurons in subnet. let weight_values: Vec = vec![88, 102, 303, 1212, 11]; // random value. - let result = commit_reveal_set_weights( - U256::from(1), + let result = SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(1)), 1, weight_keys, weight_values, - salt.clone(), 0, ); assert_eq!( @@ -961,12 +958,11 @@ fn test_set_weight_too_many_uids() { // Shouldnt fail because we are setting less weights than there are neurons. let weight_keys: Vec = vec![0, 1]; // Only on neurons that exist. let weight_values: Vec = vec![10, 10]; // random value. - assert_ok!(commit_reveal_set_weights( - U256::from(1), + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(1)), 1, weight_keys, weight_values, - salt, 0 )); }); @@ -1372,7 +1368,7 @@ fn test_set_weights_commit_reveal_enabled_error() { } #[test] -fn test_commit_reveal_weights_ok() { +fn test_reveal_weights_when_commit_reveal_disabled() { new_test_ext(1).execute_with(|| { let netuid: u16 = 1; let uids: Vec = vec![0, 1]; @@ -1390,24 +1386,88 @@ fn test_commit_reveal_weights_ok() { version_key, )); - add_network(netuid, 0, 0); - register_ok_neuron(netuid, U256::from(3), U256::from(4), 300000); - register_ok_neuron(netuid, U256::from(1), U256::from(2), 100000); + System::set_block_number(0); + + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + + // Register neurons and set up configurations + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); - SubtensorModule::set_commit_reveal_weights_interval(netuid, 5); + // Enable commit-reveal and commit + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + + step_epochs(1, netuid); + + // Disable commit-reveal before reveal + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + + // Attempt to reveal, should fail with CommitRevealDisabled + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids, + weight_values, + salt, + version_key, + ), + Error::::CommitRevealDisabled + ); + }); +} + +#[test] +fn test_commit_reveal_weights_ok() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let hotkey: U256 = U256::from(1); + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + + System::set_block_number(0); + + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + + // Register neurons and set up configurations + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + // Commit at block 0 assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, commit_hash )); - step_block(5); + step_epochs(1, netuid); + // Reveal in the next epoch assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, @@ -1420,7 +1480,7 @@ fn test_commit_reveal_weights_ok() { } #[test] -fn test_commit_reveal_interval() { +fn test_commit_reveal_tempo_interval() { new_test_ext(1).execute_with(|| { let netuid: u16 = 1; let uids: Vec = vec![0, 1]; @@ -1438,42 +1498,26 @@ fn test_commit_reveal_interval() { version_key, )); - add_network(netuid, 0, 0); - register_ok_neuron(netuid, U256::from(3), U256::from(4), 300000); - register_ok_neuron(netuid, U256::from(1), U256::from(2), 100000); + System::set_block_number(0); + + let tempo: u16 = 100; + add_network(netuid, tempo, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); - - SubtensorModule::set_commit_reveal_weights_interval(netuid, 100); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - System::set_block_number(0); + // Commit at block 0 assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, commit_hash )); - assert_err!( - SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash), - Error::::WeightsCommitNotAllowed - ); - assert_err!( - SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key, - ), - Error::::InvalidRevealCommitTempo - ); - step_block(99); - assert_err!( - SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash), - Error::::WeightsCommitNotAllowed - ); + + // Attempt to reveal in the same epoch, should fail assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), @@ -1483,9 +1527,11 @@ fn test_commit_reveal_interval() { salt.clone(), version_key, ), - Error::::InvalidRevealCommitTempo + Error::::RevealTooEarly ); - step_block(1); + + step_epochs(1, netuid); + assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, @@ -1494,11 +1540,9 @@ fn test_commit_reveal_interval() { salt.clone(), version_key, )); - assert_ok!(SubtensorModule::commit_weights( - RuntimeOrigin::signed(hotkey), - netuid, - commit_hash - )); + + step_block(6); + assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), @@ -1508,25 +1552,18 @@ fn test_commit_reveal_interval() { salt.clone(), version_key, ), - Error::::InvalidRevealCommitTempo + Error::::NoWeightsCommitFound ); - step_block(100); - assert_ok!(SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key, - )); - // Testing that if you miss the next tempo you cannot reveal it. assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, commit_hash )); - step_block(205); + + // step two epochs + step_epochs(2, netuid); + assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), @@ -1536,30 +1573,17 @@ fn test_commit_reveal_interval() { salt.clone(), version_key, ), - Error::::InvalidRevealCommitTempo + Error::::ExpiredWeightCommit ); - // Testing when you commit but do not reveal until later intervals assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, commit_hash )); - step_block(425); - let commit_hash_2: H256 = BlakeTwo256::hash_of(&( - hotkey, - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key + 1, - )); - assert_ok!(SubtensorModule::commit_weights( - RuntimeOrigin::signed(hotkey), - netuid, - commit_hash_2 - )); - step_block(100); + + step_block(50); + assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), @@ -1569,15 +1593,18 @@ fn test_commit_reveal_interval() { salt.clone(), version_key, ), - Error::::InvalidRevealCommitHashNotMatch + Error::::RevealTooEarly ); + + step_epochs(1, netuid); + assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key + 1, + uids, + weight_values, + salt, + version_key, )); }); } @@ -1589,17 +1616,19 @@ fn test_commit_reveal_hash() { let uids: Vec = vec![0, 1]; let weight_values: Vec = vec![10, 10]; let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let bad_salt: Vec = vec![0, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; let hotkey: U256 = U256::from(1); - add_network(netuid, 0, 0); - register_ok_neuron(netuid, U256::from(3), U256::from(4), 300000); - register_ok_neuron(netuid, U256::from(1), U256::from(2), 100000); + add_network(netuid, 5, 0); + System::set_block_number(0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); - SubtensorModule::set_commit_reveal_weights_interval(netuid, 5); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); let commit_hash: H256 = BlakeTwo256::hash_of(&( @@ -1617,8 +1646,9 @@ fn test_commit_reveal_hash() { commit_hash )); - step_block(5); + step_epochs(1, netuid); + // Attempt to reveal with incorrect data, should fail assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), @@ -1630,46 +1660,26 @@ fn test_commit_reveal_hash() { ), Error::::InvalidRevealCommitHashNotMatch ); + assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, uids.clone(), weight_values.clone(), - salt.clone(), - 7, - ), - Error::::InvalidRevealCommitHashNotMatch - ); - assert_err!( - SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - uids.clone(), - vec![10, 9], - salt.clone(), + bad_salt.clone(), version_key, ), Error::::InvalidRevealCommitHashNotMatch ); - assert_err!( - SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - vec![0, 1, 2], - vec![10, 10, 33], - salt.clone(), - 9, - ), - Error::::InvalidRevealCommitHashNotMatch - ); + // Correct reveal, should succeed assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, uids, weight_values, - salt.clone(), + salt, version_key, )); }); @@ -1694,82 +1704,118 @@ fn test_commit_reveal_disabled_or_enabled() { version_key, )); - add_network(netuid, 0, 0); - register_ok_neuron(netuid, U256::from(3), U256::from(4), 300000); - register_ok_neuron(netuid, U256::from(1), U256::from(2), 100000); + add_network(netuid, 5, 0); + System::set_block_number(0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); - SubtensorModule::set_commit_reveal_weights_interval(netuid, 5); + // Disable commit/reveal SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + // Attempt to commit, should fail assert_err!( SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash), Error::::CommitRevealDisabled ); - step_block(5); + // Enable commit/reveal + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - assert_err!( - SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key, - ), - Error::::CommitRevealDisabled - ); + // Commit should now succeed + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); - SubtensorModule::set_commit_reveal_weights_enabled(netuid + 1, true); + step_epochs(1, netuid); - //Should still fail because bad netuid - assert_err!( - SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash), - Error::::CommitRevealDisabled - ); + // Reveal should succeed + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids, + weight_values, + salt, + version_key, + )); + }); +} - step_block(5); +#[test] +fn test_toggle_commit_reveal_weights_and_set_weights() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let hotkey: U256 = U256::from(1); - assert_err!( - SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - uids.clone(), - weight_values.clone(), - salt.clone(), - version_key, - ), - Error::::CommitRevealDisabled - ); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + + add_network(netuid, 5, 0); + System::set_block_number(0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); - // Enable and should pass + // Enable commit/reveal SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + // Commit at block 0 assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, commit_hash )); - step_block(5); + step_epochs(1, netuid); + // Reveal in the next epoch assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + + // Disable commit/reveal + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + + // Advance to allow setting weights (due to rate limit) + step_block(5); + + // Set weights directly + assert_ok!(SubtensorModule::set_weights( RuntimeOrigin::signed(hotkey), netuid, uids, weight_values, - salt.clone(), version_key, )); }); } #[test] -fn test_toggle_commit_reveal_weights_and_set_weights() { - new_test_ext(1).execute_with(|| { +fn test_tempo_change_during_commit_reveal_process() { + new_test_ext(0).execute_with(|| { let netuid: u16 = 1; let uids: Vec = vec![0, 1]; let weight_values: Vec = vec![10, 10]; @@ -1786,40 +1832,77 @@ fn test_toggle_commit_reveal_weights_and_set_weights() { version_key, )); - add_network(netuid, 0, 0); - register_ok_neuron(netuid, U256::from(3), U256::from(4), 300000); - register_ok_neuron(netuid, U256::from(1), U256::from(2), 100000); + System::set_block_number(0); + + let tempo: u16 = 100; + add_network(netuid, tempo, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - SubtensorModule::set_weights_set_rate_limit(netuid, 5); - SubtensorModule::set_commit_reveal_weights_interval(netuid, 5); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + log::info!( + "Commit successful at block {}", + SubtensorModule::get_current_block_as_u64() + ); - step_block(5); + step_block(9); + log::info!( + "Advanced to block {}", + SubtensorModule::get_current_block_as_u64() + ); - // Set weights OK - let result = SubtensorModule::set_weights( + let tempo_before_next_reveal: u16 = 200; + log::info!("Changing tempo to {}", tempo_before_next_reveal); + SubtensorModule::set_tempo(netuid, tempo_before_next_reveal); + + step_epochs(1, netuid); + log::info!( + "Advanced to block {}", + SubtensorModule::get_current_block_as_u64() + ); + + assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, uids.clone(), weight_values.clone(), - 0, + salt.clone(), + version_key, + )); + log::info!( + "Revealed at block {}", + SubtensorModule::get_current_block_as_u64() ); - assert_ok!(result); - // Enable Commit/Reveal - SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); - - // Commit is enabled the same block assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, commit_hash )); + log::info!( + "Commit successful at block {}", + SubtensorModule::get_current_block_as_u64() + ); + + let tempo: u16 = 150; + log::info!("Changing tempo to {}", tempo); + SubtensorModule::set_tempo(netuid, tempo); - step_block(5); //Step to the next commit/reveal tempo + step_epochs(1, netuid); + log::info!( + "Advanced to block {}", + SubtensorModule::get_current_block_as_u64() + ); - // Reveal OK assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, @@ -1828,109 +1911,2280 @@ fn test_toggle_commit_reveal_weights_and_set_weights() { salt.clone(), version_key, )); + log::info!( + "Revealed at block {}", + SubtensorModule::get_current_block_as_u64() + ); - // Disable Commit/Reveal - SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + let tempo: u16 = 1050; + log::info!("Changing tempo to {}", tempo); + SubtensorModule::set_tempo(netuid, tempo); + + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + log::info!( + "Commit successful at block {}", + SubtensorModule::get_current_block_as_u64() + ); - // Cannot set weights the same block due to WeightsRateLimit - step_block(5); //step to avoid settingweightstofast + let tempo: u16 = 805; + log::info!("Changing tempo to {}", tempo); + SubtensorModule::set_tempo(netuid, tempo); - let result = SubtensorModule::set_weights( + step_epochs(1, netuid); + log::info!( + "Advanced to block {}", + SubtensorModule::get_current_block_as_u64() + ); + + assert_ok!(SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, uids.clone(), weight_values.clone(), - 0, + salt.clone(), + version_key, + )); + log::info!( + "Revealed at block {}", + SubtensorModule::get_current_block_as_u64() ); - assert_ok!(result); }); } #[test] -fn test_commit_reveal_bad_salt_fail() { +fn test_commit_reveal_multiple_commits() { new_test_ext(1).execute_with(|| { let netuid: u16 = 1; let uids: Vec = vec![0, 1]; let weight_values: Vec = vec![10, 10]; - let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; - let bad_salt: Vec = vec![0, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; let hotkey: U256 = U256::from(1); - let commit_hash: H256 = BlakeTwo256::hash_of(&( + System::set_block_number(0); + + let tempo: u16 = 7200; + add_network(netuid, tempo, 0); + + // Setup the network and neurons + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + // 1. Commit 10 times successfully + let mut commit_info = Vec::new(); + for i in 0..10 { + let salt_i: Vec = vec![i; 8]; // Unique salt for each commit + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_i.clone(), + version_key, + )); + commit_info.push((commit_hash, salt_i)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + // 2. Attempt to commit an 11th time, should fail + let salt_11: Vec = vec![11; 8]; + let commit_hash_11: H256 = BlakeTwo256::hash_of(&( hotkey, netuid, uids.clone(), weight_values.clone(), - salt.clone(), + salt_11.clone(), version_key, )); + assert_err!( + SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash_11), + Error::::TooManyUnrevealedCommits + ); - add_network(netuid, 0, 0); - register_ok_neuron(netuid, U256::from(3), U256::from(4), 300000); - register_ok_neuron(netuid, U256::from(1), U256::from(2), 100000); - SubtensorModule::set_weights_set_rate_limit(netuid, 5); - SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); - SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + // 3. Attempt to reveal out of order (reveal the second commit first) + // Advance to the next epoch for reveals to be valid + step_epochs(1, netuid); - SubtensorModule::set_commit_reveal_weights_interval(netuid, 5); - SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + // Try to reveal the second commit first + let (_commit_hash_2, salt_2) = &commit_info[1]; + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_2.clone(), + version_key, + )); + + // Check that commits before the revealed one are removed + let remaining_commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey) + .expect("expected 8 remaining commits"); + assert_eq!(remaining_commits.len(), 8); // 10 commits - 2 removed (index 0 and 1) + + // 4. Reveal the last commit next + let (_commit_hash_10, salt_10) = &commit_info[9]; + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_10.clone(), + version_key, + )); + + // Remaining commits should have removed up to index 9 + let remaining_commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!(remaining_commits.is_none()); // All commits removed + // After revealing all commits, attempt to commit again should now succeed assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), netuid, - commit_hash + commit_hash_11 )); - step_block(5); + // 5. Test expired commits are removed and do not block reveals + // Commit again and let the commit expire + let salt_12: Vec = vec![12; 8]; + let commit_hash_12: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_12.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_12 + )); + // Advance two epochs so the commit expires + step_epochs(2, netuid); + + // Attempt to reveal the expired commit, should fail assert_err!( SubtensorModule::reveal_weights( RuntimeOrigin::signed(hotkey), netuid, uids.clone(), weight_values.clone(), - bad_salt.clone(), + salt_12.clone(), version_key, ), - Error::::InvalidRevealCommitHashNotMatch + Error::::ExpiredWeightCommit ); - }); -} -fn commit_reveal_set_weights( - hotkey: U256, - netuid: u16, - uids: Vec, - weights: Vec, - salt: Vec, - version_key: u64, -) -> DispatchResult { - SubtensorModule::set_commit_reveal_weights_interval(netuid, 5); - SubtensorModule::set_weights_set_rate_limit(netuid, 5); - SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + // Commit again and reveal after advancing to next epoch + let salt_13: Vec = vec![13; 8]; + let commit_hash_13: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_13.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_13 + )); - let commit_hash: H256 = BlakeTwo256::hash_of(&( - hotkey, - netuid, - uids.clone(), - weights.clone(), - salt.clone(), - version_key, - )); + step_epochs(1, netuid); - SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash)?; + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_13.clone(), + version_key, + )); - step_block(5); + // 6. Ensure that attempting to reveal after the valid reveal period fails + // Commit again + let salt_14: Vec = vec![14; 8]; + let commit_hash_14: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_14.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_14 + )); - SubtensorModule::reveal_weights( - RuntimeOrigin::signed(hotkey), - netuid, - uids, - weights, - salt, - version_key, - )?; + // Advance beyond the valid reveal period (more than one epoch) + step_epochs(2, netuid); - Ok(()) + // Attempt to reveal, should fail + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_14.clone(), + version_key, + ), + Error::::ExpiredWeightCommit + ); + + // 7. Attempt to reveal a commit that is not ready yet (before the reveal period) + // Commit again + let salt_15: Vec = vec![15; 8]; + let commit_hash_15: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_15.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_15 + )); + + // Attempt to reveal immediately, should fail + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_15.clone(), + version_key, + ), + Error::::RevealTooEarly + ); + + step_epochs(1, netuid); + + // Now reveal should succeed + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_15.clone(), + version_key, + )); + + // 8. Test that revealing with incorrect data (salt) fails + // Commit again + let salt_16: Vec = vec![16; 8]; + let commit_hash_16: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_16.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_16 + )); + + step_epochs(1, netuid); + + // Attempt to reveal with incorrect salt + let wrong_salt: Vec = vec![99; 8]; + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + wrong_salt.clone(), + version_key, + ), + Error::::InvalidRevealCommitHashNotMatch + ); + + // Reveal with correct data should succeed + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_16.clone(), + version_key, + )); + + // 9. Test that attempting to reveal when there are no commits fails + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_16.clone(), + version_key, + ), + Error::::NoWeightsCommitFound + ); + + // 10. Commit twice and attempt to reveal out of sequence (which is now allowed) + let salt_a: Vec = vec![21; 8]; + let commit_hash_a: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_a.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_a + )); + + let salt_b: Vec = vec![22; 8]; + let commit_hash_b: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_b.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_b + )); + + step_epochs(1, netuid); + + // Reveal the second commit first, should now succeed + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_b.clone(), + version_key, + )); + + // Check that the first commit has been removed + let remaining_commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!(remaining_commits.is_none()); + + // Attempting to reveal the first commit should fail as it was removed + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids, + weight_values, + salt_a, + version_key, + ), + Error::::NoWeightsCommitFound + ); + }); +} + +fn commit_reveal_set_weights( + hotkey: U256, + netuid: u16, + uids: Vec, + weights: Vec, + salt: Vec, + version_key: u64, +) -> DispatchResult { + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weights.clone(), + salt.clone(), + version_key, + )); + + SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash)?; + + step_epochs(1, netuid); + + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids, + weights, + salt, + version_key, + )?; + + Ok(()) +} + +#[test] +fn test_expired_commits_handling_in_commit_and_reveal() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey: ::AccountId = U256::from(1); + let version_key: u64 = 0; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + // Register neurons + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + // 1. Commit 5 times in epoch 0 + let mut commit_info = Vec::new(); + for i in 0..5 { + let salt: Vec = vec![i; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + // Advance to epoch 1 + step_epochs(1, netuid); + + // 2. Commit another 5 times in epoch 1 + for i in 5..10 { + let salt: Vec = vec![i; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + // 3. Attempt to commit an 11th time, should fail with TooManyUnrevealedCommits + let salt_11: Vec = vec![11; 8]; + let commit_hash_11: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_11.clone(), + version_key, + )); + assert_err!( + SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, commit_hash_11), + Error::::TooManyUnrevealedCommits + ); + + // 4. Advance to epoch 2 to expire the commits from epoch 0 + step_epochs(1, netuid); // Now at epoch 2 + + // 5. Attempt to commit again; should succeed after expired commits are removed + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_11 + )); + + // 6. Verify that the number of unrevealed, non-expired commits is now 6 + let commits: VecDeque<(H256, u64, u64, u64)> = + pallet_subtensor::WeightCommits::::get(netuid, hotkey) + .expect("Expected a commit"); + assert_eq!(commits.len(), 6); // 5 non-expired commits from epoch 1 + new commit + + // 7. Attempt to reveal an expired commit (from epoch 0) + // Previous commit removed expired commits + let (_, expired_salt) = &commit_info[0]; + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + expired_salt.clone(), + version_key, + ), + Error::::InvalidRevealCommitHashNotMatch + ); + + // 8. Reveal commits from epoch 1 at current_epoch = 2 + for (_, salt) in commit_info.iter().skip(5).take(5) { + let salt = salt.clone(); + + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + } + + // 9. Advance to epoch 3 to reveal the new commit + step_epochs(1, netuid); + + // 10. Reveal the new commit from epoch 2 + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_11.clone(), + version_key, + )); + + // 10. Verify that all commits have been revealed and the queue is empty + let commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!(commits.is_none()); + + // 11. Attempt to reveal again, should fail with NoWeightsCommitFound + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_11.clone(), + version_key, + ), + Error::::NoWeightsCommitFound + ); + + // 12. Commit again to ensure we can continue after previous commits + let salt_12: Vec = vec![12; 8]; + let commit_hash_12: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt_12.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash_12 + )); + + // Advance to next epoch (epoch 4) and reveal + step_epochs(1, netuid); + + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids, + weight_values, + salt_12, + version_key, + )); + }); +} + +#[test] +fn test_reveal_at_exact_epoch() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey: ::AccountId = U256::from(1); + let version_key: u64 = 0; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + let reveal_periods: Vec = vec![0, 1, 2, 7, 40, 86, 100]; + + for &reveal_period in &reveal_periods { + SubtensorModule::set_reveal_period(netuid, reveal_period); + + let salt: Vec = vec![42; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + + // Retrieve commit information + let commit_block = SubtensorModule::get_current_block_as_u64(); + let commit_epoch = SubtensorModule::get_epoch_index(netuid, commit_block); + let reveal_epoch = commit_epoch.saturating_add(reveal_period); + + // Attempt to reveal before the allowed epoch + if reveal_period > 0 { + // Advance to epoch before the reveal epoch + if reveal_period >= 1 { + step_epochs((reveal_period - 1) as u16, netuid); + } + + // Attempt to reveal too early + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ), + Error::::RevealTooEarly + ); + } + + // Advance to the exact reveal epoch + let current_epoch = SubtensorModule::get_epoch_index( + netuid, + SubtensorModule::get_current_block_as_u64(), + ); + if current_epoch < reveal_epoch { + step_epochs((reveal_epoch - current_epoch) as u16, netuid); + } + + // Reveal at the exact allowed epoch + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ), + Error::::NoWeightsCommitFound + ); + + let new_salt: Vec = vec![43; 8]; + let new_commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + new_commit_hash + )); + + // Advance past the reveal epoch to ensure commit expiration + step_epochs((reveal_period + 1) as u16, netuid); + + // Attempt to reveal after the allowed epoch + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key, + ), + Error::::ExpiredWeightCommit + ); + + pallet_subtensor::WeightCommits::::remove(netuid, hotkey); + } + }); +} + +#[test] +fn test_tempo_and_reveal_period_change_during_commit_reveal_process() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let salt: Vec = vec![42; 8]; + let version_key: u64 = 0; + let hotkey: ::AccountId = U256::from(1); + + // Compute initial commit hash + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + + System::set_block_number(0); + + let initial_tempo: u16 = 100; + let initial_reveal_period: u64 = 1; + add_network(netuid, initial_tempo, 0); + SubtensorModule::set_reveal_period(netuid, initial_reveal_period); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + // Step 1: Commit weights + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + log::info!( + "Commit successful at block {}", + SubtensorModule::get_current_block_as_u64() + ); + + // Retrieve commit block and epoch + let commit_block = SubtensorModule::get_current_block_as_u64(); + let commit_epoch = SubtensorModule::get_epoch_index(netuid, commit_block); + + // Step 2: Change tempo and reveal period after commit + let new_tempo: u16 = 50; + let new_reveal_period: u64 = 2; + SubtensorModule::set_tempo(netuid, new_tempo); + SubtensorModule::set_reveal_period(netuid, new_reveal_period); + log::info!( + "Changed tempo to {} and reveal period to {}", + new_tempo, + new_reveal_period + ); + + // Step 3: Advance blocks to reach the reveal epoch according to new tempo and reveal period + let current_block = SubtensorModule::get_current_block_as_u64(); + let current_epoch = SubtensorModule::get_epoch_index(netuid, current_block); + let reveal_epoch = commit_epoch.saturating_add(new_reveal_period); + + // Advance to one epoch before reveal epoch + if current_epoch < reveal_epoch { + let epochs_to_advance = reveal_epoch - current_epoch - 1; + step_epochs(epochs_to_advance as u16, netuid); + } + + // Attempt to reveal too early + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key + ), + Error::::RevealTooEarly + ); + log::info!( + "Attempted to reveal too early at block {}", + SubtensorModule::get_current_block_as_u64() + ); + + // Advance to reveal epoch + step_epochs(1, netuid); + + // Attempt to reveal at the correct epoch + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key + )); + log::info!( + "Revealed weights at block {}", + SubtensorModule::get_current_block_as_u64() + ); + + // Step 4: Change tempo and reveal period again after reveal + let new_tempo_after_reveal: u16 = 200; + let new_reveal_period_after_reveal: u64 = 1; + SubtensorModule::set_tempo(netuid, new_tempo_after_reveal); + SubtensorModule::set_reveal_period(netuid, new_reveal_period_after_reveal); + log::info!( + "Changed tempo to {} and reveal period to {} after reveal", + new_tempo_after_reveal, + new_reveal_period_after_reveal + ); + + // Step 5: Commit again + let new_salt: Vec = vec![43; 8]; + let new_commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + new_commit_hash + )); + log::info!( + "Commit successful at block {}", + SubtensorModule::get_current_block_as_u64() + ); + + // Retrieve new commit block and epoch + let new_commit_block = SubtensorModule::get_current_block_as_u64(); + let new_commit_epoch = SubtensorModule::get_epoch_index(netuid, new_commit_block); + let new_reveal_epoch = new_commit_epoch.saturating_add(new_reveal_period_after_reveal); + + // Advance to reveal epoch + let current_block = SubtensorModule::get_current_block_as_u64(); + let current_epoch = SubtensorModule::get_epoch_index(netuid, current_block); + if current_epoch < new_reveal_epoch { + let epochs_to_advance = new_reveal_epoch - current_epoch; + step_epochs(epochs_to_advance as u16, netuid); + } + + // Attempt to reveal at the correct epoch + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key + )); + log::info!( + "Revealed weights at block {}", + SubtensorModule::get_current_block_as_u64() + ); + + // Step 6: Attempt to reveal after the allowed epoch (commit expires) + // Advance past the reveal epoch + let expiration_epochs = 1; + step_epochs(expiration_epochs as u16, netuid); + + // Attempt to reveal again (should fail due to expired commit) + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key + ), + Error::::NoWeightsCommitFound + ); + log::info!( + "Attempted to reveal after expiration at block {}", + SubtensorModule::get_current_block_as_u64() + ); + }); +} + +#[test] +fn test_commit_reveal_order_enforcement() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey: ::AccountId = U256::from(1); + let version_key: u64 = 0; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + // Commit three times: A, B, C + let mut commit_info = Vec::new(); + for i in 0..3 { + let salt: Vec = vec![i; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + step_epochs(1, netuid); + + // Attempt to reveal B first (index 1), should now succeed + let (_commit_hash_b, salt_b) = &commit_info[1]; + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_b.clone(), + version_key, + )); + + // Check that commits A and B are removed + let remaining_commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey) + .expect("expected 1 remaining commit"); + assert_eq!(remaining_commits.len(), 1); // Only commit C should remain + + // Attempt to reveal C (index 2), should succeed + let (_commit_hash_c, salt_c) = &commit_info[2]; + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt_c.clone(), + version_key, + )); + + // Attempting to reveal A (index 0) should fail as it's been removed + let (_commit_hash_a, salt_a) = &commit_info[0]; + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids, + weight_values, + salt_a.clone(), + version_key, + ), + Error::::NoWeightsCommitFound + ); + }); +} + +#[test] +fn test_reveal_at_exact_block() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey: ::AccountId = U256::from(1); + let version_key: u64 = 0; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let tempo: u16 = 360; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + let reveal_periods: Vec = vec![ + 0, + 1, + 2, + 5, + 19, + 21, + 30, + 77, + 104, + 833, + 1999, + 36398, + u32::MAX as u64, + ]; + + for &reveal_period in &reveal_periods { + SubtensorModule::set_reveal_period(netuid, reveal_period); + + // Step 1: Commit weights + let salt: Vec = vec![42 + (reveal_period % 100) as u16; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + + let commit_block = SubtensorModule::get_current_block_as_u64(); + let commit_epoch = SubtensorModule::get_epoch_index(netuid, commit_block); + let reveal_epoch = commit_epoch.saturating_add(reveal_period); + + // Calculate the block number where the reveal epoch starts + let tempo_plus_one = (tempo as u64).saturating_add(1); + let netuid_plus_one = (netuid as u64).saturating_add(1); + let reveal_epoch_start_block = reveal_epoch + .saturating_mul(tempo_plus_one) + .saturating_sub(netuid_plus_one); + + // Attempt to reveal before the reveal epoch starts + let current_block = SubtensorModule::get_current_block_as_u64(); + if current_block < reveal_epoch_start_block { + // Advance to one block before the reveal epoch starts + let blocks_to_advance = reveal_epoch_start_block.saturating_sub(current_block); + if blocks_to_advance > 1 { + // Advance to one block before the reveal epoch + let new_block_number = current_block + blocks_to_advance - 1; + System::set_block_number(new_block_number); + } + + // Attempt to reveal too early + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key + ), + Error::::RevealTooEarly + ); + + // Advance one more block to reach the exact reveal epoch start block + System::set_block_number(reveal_epoch_start_block); + } else { + // If we're already at or past the reveal epoch start block + System::set_block_number(reveal_epoch_start_block); + } + + // Reveal at the exact allowed block + assert_ok!(SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key + )); + + // Attempt to reveal again; should fail with NoWeightsCommitFound + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key + ), + Error::::NoWeightsCommitFound + ); + + // Commit again with new salt + let new_salt: Vec = vec![43 + (reveal_period % 100) as u16; 8]; + let new_commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key, + )); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + new_commit_hash + )); + + // Advance blocks to after the commit expires + let commit_block = SubtensorModule::get_current_block_as_u64(); + let commit_epoch = SubtensorModule::get_epoch_index(netuid, commit_block); + let reveal_epoch = commit_epoch.saturating_add(reveal_period); + let expiration_epoch = reveal_epoch.saturating_add(1); + let expiration_epoch_start_block = expiration_epoch + .saturating_mul(tempo_plus_one) + .saturating_sub(netuid_plus_one); + + let current_block = SubtensorModule::get_current_block_as_u64(); + if current_block < expiration_epoch_start_block { + // Advance to the block where the commit expires + System::set_block_number(expiration_epoch_start_block); + } + + // Attempt to reveal after the commit has expired + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key + ), + Error::::ExpiredWeightCommit + ); + + // Clean up for next iteration + pallet_subtensor::WeightCommits::::remove(netuid, hotkey); + } + }); +} + +#[test] +fn test_successful_batch_reveal() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0, 0, 0]; + let uids_list: Vec> = vec![vec![0, 1], vec![1, 0], vec![0, 1]]; + let weight_values_list: Vec> = vec![vec![10, 20], vec![30, 40], vec![50, 60]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + // 1. Commit multiple times + let mut commit_info = Vec::new(); + for i in 0..3 { + let salt: Vec = vec![i as u16; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[i].clone(), + weight_values_list[i].clone(), + salt.clone(), + version_keys[i], + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + step_epochs(1, netuid); + + // 2. Prepare batch reveal parameters + let salts_list: Vec> = commit_info.iter().map(|(_, salt)| salt.clone()).collect(); + + // 3. Perform batch reveal + assert_ok!(SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list.clone(), + version_keys.clone(), + )); + + // 4. Ensure all commits are removed + let commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!(commits.is_none()); + }); +} + +#[test] +fn test_batch_reveal_with_expired_commits() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0, 0, 0]; + let uids_list: Vec> = vec![vec![0, 1], vec![1, 0], vec![0, 1]]; + let weight_values_list: Vec> = vec![vec![10, 20], vec![30, 40], vec![50, 60]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + let mut commit_info = Vec::new(); + + // 1. Commit the first weight in epoch 0 + let salt0: Vec = vec![0u16; 8]; + let commit_hash0: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[0].clone(), + weight_values_list[0].clone(), + salt0.clone(), + version_keys[0], + )); + commit_info.push((commit_hash0, salt0)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash0 + )); + + // Advance to epoch 1 + step_epochs(1, netuid); + + // 2. Commit the next two weights in epoch 1 + for i in 1..3 { + let salt: Vec = vec![i as u16; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[i].clone(), + weight_values_list[i].clone(), + salt.clone(), + version_keys[i], + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + // Advance to epoch 2 (after reveal period for first commit) + step_epochs(1, netuid); + + // 3. Prepare batch reveal parameters + let salts_list: Vec> = commit_info.iter().map(|(_, salt)| salt.clone()).collect(); + + // 4. Perform batch reveal + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list.clone(), + version_keys.clone(), + ); + assert_err!(result, Error::::ExpiredWeightCommit); + + // 5. Expired commit is not removed until a successful call + let commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey) + .expect("Expected remaining commits"); + assert_eq!(commits.len(), 3); + + // 6. Try revealing the remaining commits + let valid_uids_list = uids_list[1..].to_vec(); + let valid_weight_values_list = weight_values_list[1..].to_vec(); + let valid_salts_list = salts_list[1..].to_vec(); + let valid_version_keys = version_keys[1..].to_vec(); + + assert_ok!(SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + valid_uids_list, + valid_weight_values_list, + valid_salts_list, + valid_version_keys, + )); + + // 7. Ensure all commits are removed + let commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!(commits.is_none()); + }); +} + +#[test] +fn test_batch_reveal_with_invalid_input_lengths() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + // Base data for valid inputs + let uids_list: Vec> = vec![vec![0, 1], vec![1, 0]]; + let weight_values_list: Vec> = vec![vec![10, 20], vec![30, 40]]; + let salts_list: Vec> = vec![vec![0u16; 8], vec![1u16; 8]]; + let version_keys: Vec = vec![0, 0]; + + // Test cases with mismatched input lengths + + // Case 1: uids_list has an extra element + let uids_list_case = vec![vec![0, 1], vec![1, 0], vec![2, 3]]; + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list_case.clone(), + weight_values_list.clone(), + salts_list.clone(), + version_keys.clone(), + ); + assert_err!(result, Error::::InputLengthsUnequal); + + // Case 2: weight_values_list has an extra element + let weight_values_list_case = vec![vec![10, 20], vec![30, 40], vec![50, 60]]; + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list_case.clone(), + salts_list.clone(), + version_keys.clone(), + ); + assert_err!(result, Error::::InputLengthsUnequal); + + // Case 3: salts_list has an extra element + let salts_list_case = vec![vec![0u16; 8], vec![1u16; 8], vec![2u16; 8]]; + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list_case.clone(), + version_keys.clone(), + ); + assert_err!(result, Error::::InputLengthsUnequal); + + // Case 4: version_keys has an extra element + let version_keys_case = vec![0, 0, 0]; + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list.clone(), + version_keys_case.clone(), + ); + assert_err!(result, Error::::InputLengthsUnequal); + + // Case 5: All input vectors have mismatched lengths + let uids_list_case = vec![vec![0, 1]]; + let weight_values_list_case = vec![vec![10, 20], vec![30, 40]]; + let salts_list_case = vec![vec![0u16; 8]]; + let version_keys_case = vec![0, 0, 0]; + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list_case, + weight_values_list_case, + salts_list_case, + version_keys_case, + ); + assert_err!(result, Error::::InputLengthsUnequal); + + // Case 6: Valid input lengths (should not return an error) + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list.clone(), + version_keys.clone(), + ); + // We expect an error because no commits have been made, but it should not be InputLengthsUnequal + assert_err!(result, Error::::NoWeightsCommitFound); + }); +} + +#[test] +fn test_batch_reveal_with_no_commits() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0]; + let uids_list: Vec> = vec![vec![0, 1]]; + let weight_values_list: Vec> = vec![vec![10, 20]]; + let salts_list: Vec> = vec![vec![0u16; 8]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + // 1. Attempt to perform batch reveal without any commits + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list, + weight_values_list, + salts_list, + version_keys, + ); + assert_err!(result, Error::::NoWeightsCommitFound); + }); +} + +#[test] +fn test_batch_reveal_before_reveal_period() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0, 0]; + let uids_list: Vec> = vec![vec![0, 1], vec![1, 0]]; + let weight_values_list: Vec> = vec![vec![10, 20], vec![30, 40]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + // 1. Commit multiple times in the same epoch + let mut commit_info = Vec::new(); + for i in 0..2 { + let salt: Vec = vec![i as u16; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[i].clone(), + weight_values_list[i].clone(), + salt.clone(), + version_keys[i], + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + // 2. Prepare batch reveal parameters + let salts_list: Vec> = commit_info.iter().map(|(_, salt)| salt.clone()).collect(); + + // 3. Attempt to reveal before reveal period + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list.clone(), + version_keys.clone(), + ); + assert_err!(result, Error::::RevealTooEarly); + }); +} + +#[test] +fn test_batch_reveal_after_commits_expired() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0, 0]; + let uids_list: Vec> = vec![vec![0, 1], vec![1, 0]]; + let weight_values_list: Vec> = vec![vec![10, 20], vec![30, 40]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + let mut commit_info = Vec::new(); + + // 1. Commit the first weight in epoch 0 + let salt0: Vec = vec![0u16; 8]; + let commit_hash0: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[0].clone(), + weight_values_list[0].clone(), + salt0.clone(), + version_keys[0], + )); + commit_info.push((commit_hash0, salt0)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash0 + )); + + // Advance to epoch 1 + step_epochs(1, netuid); + + // 2. Commit the second weight in epoch 1 + let salt1: Vec = vec![1u16; 8]; + let commit_hash1: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[1].clone(), + weight_values_list[1].clone(), + salt1.clone(), + version_keys[1], + )); + commit_info.push((commit_hash1, salt1)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash1 + )); + + // Advance to epoch 4 to ensure both commits have expired (assuming reveal_period is 1) + step_epochs(3, netuid); + + // 3. Prepare batch reveal parameters + let salts_list: Vec> = commit_info.iter().map(|(_, salt)| salt.clone()).collect(); + + // 4. Attempt to reveal after commits have expired + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list.clone(), + weight_values_list.clone(), + salts_list, + version_keys.clone(), + ); + assert_err!(result, Error::::ExpiredWeightCommit); + }); +} + +#[test] +fn test_batch_reveal_when_commit_reveal_disabled() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0]; + let uids_list: Vec> = vec![vec![0, 1]]; + let weight_values_list: Vec> = vec![vec![10, 20]]; + let salts_list: Vec> = vec![vec![0u16; 8]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + + // 1. Attempt to perform batch reveal when commit-reveal is disabled + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list, + weight_values_list, + salts_list, + version_keys, + ); + assert_err!(result, Error::::CommitRevealDisabled); + }); +} + +#[test] +fn test_batch_reveal_with_out_of_order_commits() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let hotkey = U256::from(1); + let version_keys: Vec = vec![0, 0, 0]; + let uids_list: Vec> = vec![vec![0, 1], vec![1, 0], vec![0, 1]]; + let weight_values_list: Vec> = vec![vec![10, 20], vec![30, 40], vec![50, 60]]; + let tempo: u16 = 100; + + System::set_block_number(0); + add_network(netuid, tempo, 0); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + + // 1. Commit multiple times (A, B, C) + let mut commit_info = Vec::new(); + for i in 0..3 { + let salt: Vec = vec![i as u16; 8]; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids_list[i].clone(), + weight_values_list[i].clone(), + salt.clone(), + version_keys[i], + )); + commit_info.push((commit_hash, salt)); + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + } + + step_epochs(1, netuid); + + // 2. Prepare batch reveal parameters for commits A and C (out of order) + let salts_list: Vec> = vec![ + commit_info[2].1.clone(), // Third commit (C) + commit_info[0].1.clone(), // First commit (A) + ]; + let uids_list_out_of_order = vec![ + uids_list[2].clone(), // C + uids_list[0].clone(), // A + ]; + let weight_values_list_out_of_order = vec![ + weight_values_list[2].clone(), // C + weight_values_list[0].clone(), // A + ]; + let version_keys_out_of_order = vec![ + version_keys[2], // C + version_keys[0], // A + ]; + + // 3. Attempt batch reveal of A and C out of order + let result = SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids_list_out_of_order, + weight_values_list_out_of_order, + salts_list, + version_keys_out_of_order, + ); + + // 4. Ensure the batch reveal succeeds + assert_ok!(result); + + // 5. Prepare and reveal the remaining commit (B) + let remaining_salt = commit_info[1].1.clone(); + let remaining_uids = uids_list[1].clone(); + let remaining_weights = weight_values_list[1].clone(); + let remaining_version_key = version_keys[1]; + + assert_ok!(SubtensorModule::do_batch_reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + vec![remaining_uids], + vec![remaining_weights], + vec![remaining_salt], + vec![remaining_version_key], + )); + + // 6. Ensure all commits are removed + let commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!(commits.is_none()); + }); +} + +#[test] +fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { + new_test_ext(1).execute_with(|| { + // ==== Test Configuration ==== + let netuid: u16 = 1; + let num_hotkeys: usize = 10; + let max_unrevealed_commits: usize = 10; + let commits_per_hotkey: usize = 20; + let initial_reveal_period: u64 = 5; + let initial_tempo: u16 = 100; + + // ==== Setup Network ==== + add_network(netuid, initial_tempo, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_reveal_period(netuid, initial_reveal_period); + SubtensorModule::set_max_registrations_per_block(netuid, u16::MAX); + SubtensorModule::set_target_registrations_per_interval(netuid, u16::MAX); + + // ==== Register Validators ==== + for uid in 0..5 { + let validator_id = U256::from(100 + uid as u64); + register_ok_neuron(netuid, validator_id, U256::from(200 + uid as u64), 300_000); + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + } + + // ==== Register Hotkeys ==== + let mut hotkeys: Vec<::AccountId> = Vec::new(); + for i in 0..num_hotkeys { + let hotkey_id = U256::from(1000 + i as u64); + register_ok_neuron(netuid, hotkey_id, U256::from(2000 + i as u64), 100_000); + hotkeys.push(hotkey_id); + } + + // ==== Initialize Commit Information ==== + let mut commit_info_map: HashMap< + ::AccountId, + Vec<(H256, Vec, Vec, Vec, u64)>, + > = HashMap::new(); + + // Initialize the map + for hotkey in &hotkeys { + commit_info_map.insert(*hotkey, Vec::new()); + } + + // ==== Function to Generate Unique Data ==== + fn generate_unique_data(index: usize) -> (Vec, Vec, Vec, u64) { + let uids = vec![index as u16, (index + 1) as u16]; + let values = vec![(index * 10) as u16, ((index + 1) * 10) as u16]; + let salt = vec![(index % 100) as u16; 8]; + let version_key = index as u64; + (uids, values, salt, version_key) + } + + // ==== Simulate Concurrent Commits and Reveals ==== + for i in 0..commits_per_hotkey { + for hotkey in &hotkeys { + + let current_commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey) + .unwrap_or_default(); + if current_commits.len() >= max_unrevealed_commits { + continue; + } + + let (uids, values, salt, version_key) = generate_unique_data(i); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + *hotkey, + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + + if let Some(commits) = commit_info_map.get_mut(hotkey) { + commits.push((commit_hash, salt.clone(), uids.clone(), values.clone(), version_key)); + } + + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + commit_hash + )); + } + + // ==== Reveal Phase ==== + for hotkey in &hotkeys { + if let Some(commits) = commit_info_map.get_mut(hotkey) { + if commits.is_empty() { + continue; // No commits to reveal + } + + let (_commit_hash, salt, uids, values, version_key) = commits.first().expect("expected a value"); + + let reveal_result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + *version_key, + ); + + match reveal_result { + Ok(_) => { + commits.remove(0); + } + Err(e) => { + if e == Error::::RevealTooEarly.into() + || e == Error::::ExpiredWeightCommit.into() + || e == Error::::InvalidRevealCommitHashNotMatch.into() + { + log::info!("Expected error during reveal after epoch advancement: {:?}", e); + } else { + panic!( + "Unexpected error during reveal: {:?}, expected RevealTooEarly, ExpiredWeightCommit, or InvalidRevealCommitHashNotMatch", + e + ); + } + } + } + } + } + } + + // ==== Modify Network Parameters During Commits ==== + SubtensorModule::set_tempo(netuid, 150); + SubtensorModule::set_reveal_period(netuid, 7); + log::info!("Changed tempo to 150 and reveal_period to 7 during commits."); + + step_epochs(3, netuid); + + // ==== Continue Reveals After Epoch Advancement ==== + for hotkey in &hotkeys { + if let Some(commits) = commit_info_map.get_mut(hotkey) { + while !commits.is_empty() { + let (_commit_hash, salt, uids, values, version_key) = &commits[0]; + + // Attempt to reveal + let reveal_result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + *version_key, + ); + + match reveal_result { + Ok(_) => { + commits.remove(0); + } + Err(e) => { + // Check if the error is due to reveal being too early or commit expired + if e == Error::::RevealTooEarly.into() + || e == Error::::ExpiredWeightCommit.into() + || e == Error::::InvalidRevealCommitHashNotMatch.into() + { + log::info!("Expected error during reveal after epoch advancement: {:?}", e); + break; + } else { + panic!( + "Unexpected error during reveal after epoch advancement: {:?}, expected RevealTooEarly, ExpiredWeightCommit, or InvalidRevealCommitHashNotMatch", + e + ); + } + } + } + } + } + } + + // ==== Change Network Parameters Again ==== + SubtensorModule::set_tempo(netuid, 200); + SubtensorModule::set_reveal_period(netuid, 10); + log::info!("Changed tempo to 200 and reveal_period to 10 after initial reveals."); + + step_epochs(10, netuid); + + // ==== Final Reveal Attempts ==== + for (hotkey, commits) in commit_info_map.iter_mut() { + for (_commit_hash, salt, uids, values, version_key) in commits.iter() { + let reveal_result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + *version_key, + ); + + assert_eq!( + reveal_result, + Err(Error::::ExpiredWeightCommit.into()), + "Expected ExpiredWeightCommit error, got {:?}", + reveal_result + ); + } + } + + for hotkey in &hotkeys { + commit_info_map.insert(*hotkey, Vec::new()); + + for i in 0..max_unrevealed_commits { + let (uids, values, salt, version_key) = generate_unique_data(i + commits_per_hotkey); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + *hotkey, + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + commit_hash + )); + } + + let (uids, values, salt, version_key) = generate_unique_data(max_unrevealed_commits + commits_per_hotkey); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + *hotkey, + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + + assert_err!( + SubtensorModule::commit_weights( + RuntimeOrigin::signed(*hotkey), + netuid, + commit_hash + ), + Error::::TooManyUnrevealedCommits + ); + } + + // Attempt unauthorized reveal + let unauthorized_hotkey = hotkeys[0]; + let target_hotkey = hotkeys[1]; + if let Some(commits) = commit_info_map.get(&target_hotkey) { + if let Some((_commit_hash, salt, uids, values, version_key)) = commits.first() { + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(unauthorized_hotkey), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + *version_key, + ), + Error::::InvalidRevealCommitHashNotMatch + ); + } + } + + let non_committing_hotkey: ::AccountId = U256::from(9999); + assert_err!( + SubtensorModule::reveal_weights( + RuntimeOrigin::signed(non_committing_hotkey), + netuid, + vec![0, 1], + vec![10, 20], + vec![0; 8], + 0, + ), + Error::::NoWeightsCommitFound + ); + + assert_eq!(SubtensorModule::get_reveal_period(netuid), 10); + assert_eq!(SubtensorModule::get_tempo(netuid), 200); + }) +} + +#[test] +fn test_get_reveal_blocks() { + new_test_ext(1).execute_with(|| { + // **1. Define Test Parameters** + let netuid: u16 = 1; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let hotkey: U256 = U256::from(1); + + // **2. Generate the Commit Hash** + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + + // **3. Initialize the Block Number to 0** + System::set_block_number(0); + + // **4. Define Network Parameters** + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + + // **5. Register Neurons and Configure the Network** + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + // **6. Commit Weights at Block 0** + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + + // **7. Retrieve the Reveal Blocks Using `get_reveal_blocks`** + let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, 0); + + // **8. Assert Correct Calculation of Reveal Blocks** + // With tempo=5, netuid=1, reveal_period=1: + // commit_epoch = (0 + 2) / 6 = 0 + // reveal_epoch = 0 + 1 = 1 + // first_reveal_block = 1 * 6 - 2 = 4 + // last_reveal_block = 4 + 5 = 9 + assert_eq!(first_reveal_block, 4); + assert_eq!(last_reveal_block, 9); + + // **9. Attempt to Reveal Before `first_reveal_block` (Block 3)** + step_block(3); // Advance to block 3 + let result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); + assert_err!(result, Error::::RevealTooEarly); + + // **10. Advance to `first_reveal_block` (Block 4)** + step_block(1); // Advance to block 4 + let result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); + assert_ok!(result); + + // **11. Attempt to Reveal Again at Block 4 (Should Fail)** + let result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); + assert_err!(result, Error::::NoWeightsCommitFound); + + // **12. Advance to After `last_reveal_block` (Block 10)** + step_block(6); // Advance from block 4 to block 10 + + // **13. Attempt to Reveal at Block 10 (Should Fail)** + let result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); + assert_err!(result, Error::::NoWeightsCommitFound); + + // **14. Attempt to Reveal Outside of Any Reveal Window (No Commit)** + let result = SubtensorModule::reveal_weights( + RuntimeOrigin::signed(hotkey), + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + ); + assert_err!(result, Error::::NoWeightsCommitFound); + + // **15. Verify that All Commits Have Been Removed from Storage** + let commits = pallet_subtensor::WeightCommits::::get(netuid, hotkey); + assert!( + commits.is_none(), + "Commits should be cleared after successful reveal" + ); + }) +} + +#[test] +fn test_commit_weights_rate_limit() { + new_test_ext(1).execute_with(|| { + let netuid: u16 = 1; + let uids: Vec = vec![0, 1]; + let weight_values: Vec = vec![10, 10]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let hotkey: U256 = U256::from(1); + + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + salt.clone(), + version_key, + )); + System::set_block_number(11); + + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + + register_ok_neuron(netuid, U256::from(3), U256::from(4), 300_000); + register_ok_neuron(netuid, U256::from(1), U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 10); // Rate limit is 10 blocks + SubtensorModule::set_validator_permit_for_uid(netuid, 0, true); + SubtensorModule::set_validator_permit_for_uid(netuid, 1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + let neuron_uid = + SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("expected uid"); + SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + commit_hash + )); + + let new_salt: Vec = vec![9; 8]; + let new_commit_hash: H256 = BlakeTwo256::hash_of(&( + hotkey, + netuid, + uids.clone(), + weight_values.clone(), + new_salt.clone(), + version_key, + )); + assert_err!( + SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, new_commit_hash), + Error::::CommittingWeightsTooFast + ); + + step_block(5); + assert_err!( + SubtensorModule::commit_weights(RuntimeOrigin::signed(hotkey), netuid, new_commit_hash), + Error::::CommittingWeightsTooFast + ); + + step_block(5); // Current block is now 21 + + assert_ok!(SubtensorModule::commit_weights( + RuntimeOrigin::signed(hotkey), + netuid, + new_commit_hash + )); + + SubtensorModule::set_commit_reveal_weights_enabled(netuid, false); + let weights_keys: Vec = vec![0]; + let weight_values: Vec = vec![1]; + + assert_err!( + SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + weights_keys.clone(), + weight_values.clone(), + 0 + ), + Error::::SettingWeightsTooFast + ); + + step_block(10); + + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + weights_keys.clone(), + weight_values.clone(), + 0 + )); + + assert_err!( + SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + weights_keys.clone(), + weight_values.clone(), + 0 + ), + Error::::SettingWeightsTooFast + ); + + step_block(5); + + assert_err!( + SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + weights_keys.clone(), + weight_values.clone(), + 0 + ), + Error::::SettingWeightsTooFast + ); + + step_block(5); + + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(hotkey), + netuid, + weights_keys.clone(), + weight_values.clone(), + 0 + )); + }); } diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 8a2886eb1..c82a5aa49 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -20,6 +20,7 @@ name = "spec_version" path = "src/spec_version.rs" [dependencies] +ed25519-dalek = { workspace = true, default-features = false, features = ["alloc"] } subtensor-macros.workspace = true subtensor-custom-rpc-runtime-api = { path = "../pallets/subtensor/runtime-api", default-features = false } smallvec = { workspace = true } @@ -93,6 +94,23 @@ pallet-registry = { default-features = false, path = "../pallets/registry" } # Metadata commitment pallet pallet-commitments = { default-features = false, path = "../pallets/commitments" } +# Frontier +fp-evm = { workspace = true } +fp-rpc = { workspace = true } +fp-self-contained = { workspace = true } + +# Frontier FRAME +pallet-base-fee = { workspace = true } +pallet-dynamic-fee = { workspace = true } +pallet-ethereum = { workspace = true } +pallet-evm = { workspace = true } +pallet-evm-chain-id = { workspace = true } +pallet-evm-precompile-modexp = { workspace = true } +pallet-evm-precompile-sha3fips = { workspace = true } +pallet-evm-precompile-simple = { workspace = true } +pallet-hotfix-sufficients = { workspace = true } +fp-account = { workspace = true } + [dev-dependencies] frame-metadata = { workspace = true } sp-io = { workspace = true } @@ -110,6 +128,7 @@ std = [ "frame-system-benchmarking?/std", "frame-benchmarking/std", "codec/std", + "ed25519-dalek/std", "scale-info/std", "frame-executive/std", "frame-metadata-hash-extension/std", @@ -156,6 +175,21 @@ std = [ "log/std", "sp-storage/std", "sp-genesis-builder/std", + # Frontier + "fp-evm/std", + "fp-rpc/std", + "fp-self-contained/std", + # Frontier FRAME + "pallet-base-fee/std", + "pallet-dynamic-fee/std", + "pallet-ethereum/std", + "pallet-evm/std", + "pallet-evm-chain-id/std", + "pallet-evm-precompile-modexp/std", + "pallet-evm-precompile-sha3fips/std", + "pallet-evm-precompile-simple/std", + "pallet-hotfix-sufficients/std", + "fp-account/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -179,6 +213,11 @@ runtime-benchmarks = [ "pallet-preimage/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", + + # EVM + Frontier + "pallet-ethereum/runtime-benchmarks", + "pallet-evm/runtime-benchmarks", + "pallet-hotfix-sufficients/runtime-benchmarks", ] try-runtime = [ "frame-try-runtime/try-runtime", @@ -189,6 +228,7 @@ try-runtime = [ "pallet-sudo/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", + "pallet-hotfix-sufficients/try-runtime", "pallet-insecure-randomness-collective-flip/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", @@ -205,5 +245,13 @@ try-runtime = [ "pallet-admin-utils/try-runtime", "pallet-commitments/try-runtime", "pallet-registry/try-runtime", + + # EVM + Frontier + "fp-self-contained/try-runtime", + "pallet-base-fee/try-runtime", + "pallet-dynamic-fee/try-runtime", + "pallet-ethereum/try-runtime", + "pallet-evm/try-runtime", + "pallet-evm-chain-id/try-runtime", ] metadata-hash = ["substrate-wasm-builder/metadata-hash"] diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ec5120e9a..3d1cbb582 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -34,14 +34,18 @@ use scale_info::TypeInfo; use smallvec::smallvec; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata, RuntimeDebug}; +use sp_core::{ + crypto::{ByteArray, KeyTypeId}, + OpaqueMetadata, H160, H256, U256, +}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify, + AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, + IdentifyAccount, NumberFor, One, PostDispatchInfoOf, UniqueSaturatedInto, Verify, }, - transaction_validity::{TransactionSource, TransactionValidity}, - AccountId32, ApplyExtrinsicResult, MultiSignature, + transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + AccountId32, ApplyExtrinsicResult, ConsensusEngineId, MultiSignature, }; use sp_std::cmp::Ordering; use sp_std::prelude::*; @@ -53,8 +57,8 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, parameter_types, traits::{ - ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, InstanceFilter, KeyOwnerProofSystem, - PrivilegeCmp, Randomness, StorageInfo, + ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, FindAuthor, InstanceFilter, + KeyOwnerProofSystem, OnFinalize, OnTimestampSet, PrivilegeCmp, Randomness, StorageInfo, }, weights::{ constants::{ @@ -68,11 +72,21 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; -use pallet_transaction_payment::{FungibleAdapter, Multiplier}; +use pallet_transaction_payment::{ConstFeeMultiplier, FungibleAdapter, Multiplier}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; +use core::marker::PhantomData; + +mod precompiles; +use precompiles::FrontierPrecompiles; + +// Frontier +use fp_rpc::TransactionStatus; +use pallet_ethereum::{Call::transact, PostLogContent, Transaction as EthereumTransaction}; +use pallet_evm::{Account as EVMAccount, BalanceConverter, FeeCalculator, Runner}; + // Subtensor module pub use pallet_scheduler; pub use pallet_subtensor; @@ -175,6 +189,9 @@ pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; +pub const MAXIMUM_BLOCK_WEIGHT: Weight = + Weight::from_parts(4u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX); + // The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { @@ -192,7 +209,7 @@ parameter_types! { // We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( - Weight::from_parts(4u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength @@ -368,18 +385,15 @@ impl pallet_balances::Config for Runtime { type MaxFreezes = ConstU32<50>; } -pub struct LinearWeightToFee(sp_std::marker::PhantomData); +pub struct LinearWeightToFee; -impl WeightToFeePolynomial for LinearWeightToFee -where - C: Get, -{ +impl WeightToFeePolynomial for LinearWeightToFee { type Balance = Balance; fn polynomial() -> WeightToFeeCoefficients { let coefficient = WeightToFeeCoefficient { coeff_integer: 0, - coeff_frac: Perbill::from_parts(1_000_000), + coeff_frac: Perbill::from_parts(500_000), negative: false, degree: 1, }; @@ -389,9 +403,7 @@ where } parameter_types! { - // Used with LinearWeightToFee conversion. - pub const FeeWeightRatio: u64 = 1; - pub const TransactionByteFee: u128 = 1; + pub const OperationalFeeMultiplier: u8 = 5; pub FeeMultiplier: Multiplier = Multiplier::one(); } @@ -422,19 +434,12 @@ impl impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - - //type TransactionByteFee = TransactionByteFee; type OnChargeTransaction = FungibleAdapter; - // Convert dispatch weight to a chargeable fee. - type WeightToFee = LinearWeightToFee; - - type FeeMultiplierUpdate = (); - - type OperationalFeeMultiplier = ConstU8<1>; - + type WeightToFee = LinearWeightToFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type LengthToFee = IdentityFee; - //type FeeMultiplierUpdate = ConstFeeMultiplier; + type FeeMultiplierUpdate = ConstFeeMultiplier; } // Configure collective pallet for council @@ -622,17 +627,7 @@ parameter_types! { } #[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - RuntimeDebug, - MaxEncodedLen, - TypeInfo, + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, MaxEncodedLen, TypeInfo, )] pub enum ProxyType { Any, @@ -956,7 +951,7 @@ parameter_types! { pub const SubtensorInitialNetworkLockReductionInterval: u64 = 14 * 7200; pub const SubtensorInitialNetworkRateLimit: u64 = 7200; pub const SubtensorInitialTargetStakesPerInterval: u16 = 1; - pub const SubtensorInitialKeySwapCost: u64 = 1_000_000_000; + pub const SubtensorInitialKeySwapCost: u64 = 100_000_000; // 0.1 TAO pub const InitialAlphaHigh: u16 = 58982; // Represents 0.9 as per the production default pub const InitialAlphaLow: u16 = 45875; // Represents 0.7 as per the production default pub const InitialLiquidAlphaOn: bool = false; // Default value for LiquidAlphaOn @@ -1050,6 +1045,218 @@ impl pallet_admin_utils::Config for Runtime { type WeightInfo = pallet_admin_utils::weights::SubstrateWeight; } +// Define the ChainId +parameter_types! { + pub const SubtensorChainId: u64 = 0x03B1; // Unicode for lowercase alpha + // pub const SubtensorChainId: u64 = 0x03C4; // Unicode for lowercase tau +} + +impl pallet_evm_chain_id::Config for Runtime {} + +pub struct FindAuthorTruncated(PhantomData); +impl> FindAuthor for FindAuthorTruncated { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + if let Some(author_index) = F::find_author(digests) { + pallet_aura::Authorities::::get() + .get(author_index as usize) + .and_then(|authority_id| { + let raw_vec = authority_id.to_raw_vec(); + raw_vec.get(4..24).map(H160::from_slice) + }) + } else { + None + } + } +} + +const BLOCK_GAS_LIMIT: u64 = 75_000_000; + +/// `WeightPerGas` is an approximate ratio of the amount of Weight per Gas. +/// +fn weight_per_gas() -> Weight { + (NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT).saturating_div(BLOCK_GAS_LIMIT) +} + +parameter_types! { + pub BlockGasLimit: U256 = U256::from(BLOCK_GAS_LIMIT); + pub const GasLimitPovSizeRatio: u64 = 0; + pub PrecompilesValue: FrontierPrecompiles = FrontierPrecompiles::<_>::new(); + pub WeightPerGas: Weight = weight_per_gas(); + pub SuicideQuickClearLimit: u32 = 0; +} + +/// The difference between EVM decimals and Substrate decimals. +/// Substrate balances has 9 decimals, while EVM has 18, so the +/// difference factor is 9 decimals, or 10^9 +const EVM_DECIMALS_FACTOR: u64 = 1_000_000_000_u64; + +pub struct SubtensorEvmBalanceConverter; +impl BalanceConverter for SubtensorEvmBalanceConverter { + fn into_evm_balance(value: U256) -> Option { + U256::from(UniqueSaturatedInto::::unique_saturated_into(value)) + .checked_mul(U256::from(EVM_DECIMALS_FACTOR)) + } + + fn into_substrate_balance(value: U256) -> Option { + if value <= U256::from(u64::MAX) { + value.checked_div(U256::from(EVM_DECIMALS_FACTOR)) + } else { + None + } + } +} + +impl pallet_evm::Config for Runtime { + type FeeCalculator = BaseFee; + type GasWeightMapping = pallet_evm::FixedGasWeightMapping; + type WeightPerGas = WeightPerGas; + type BlockHashMapping = pallet_ethereum::EthereumBlockHashMapping; + type CallOrigin = pallet_evm::EnsureAddressTruncated; + type WithdrawOrigin = pallet_evm::EnsureAddressTruncated; + type AddressMapping = pallet_evm::HashedAddressMapping; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type PrecompilesType = FrontierPrecompiles; + type PrecompilesValue = PrecompilesValue; + type ChainId = SubtensorChainId; + type BlockGasLimit = BlockGasLimit; + type Runner = pallet_evm::runner::stack::Runner; + type OnChargeTransaction = (); + type OnCreate = (); + type FindAuthor = FindAuthorTruncated; + type GasLimitPovSizeRatio = GasLimitPovSizeRatio; + type SuicideQuickClearLimit = SuicideQuickClearLimit; + type Timestamp = Timestamp; + type WeightInfo = pallet_evm::weights::SubstrateWeight; + type BalanceConverter = SubtensorEvmBalanceConverter; +} + +parameter_types! { + pub const PostBlockAndTxnHashes: PostLogContent = PostLogContent::BlockAndTxnHashes; +} + +impl pallet_ethereum::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type StateRoot = pallet_ethereum::IntermediateStateRoot; + type PostLogContent = PostBlockAndTxnHashes; + type ExtraDataLength = ConstU32<30>; +} + +parameter_types! { + pub BoundDivision: U256 = U256::from(1024); +} + +impl pallet_dynamic_fee::Config for Runtime { + type MinGasPriceBoundDivisor = BoundDivision; +} + +parameter_types! { + pub DefaultBaseFeePerGas: U256 = U256::from(20_000_000_000_u128); + pub DefaultElasticity: Permill = Permill::from_parts(125_000); +} +pub struct BaseFeeThreshold; +impl pallet_base_fee::BaseFeeThreshold for BaseFeeThreshold { + fn lower() -> Permill { + Permill::zero() + } + fn ideal() -> Permill { + Permill::from_parts(500_000) + } + fn upper() -> Permill { + Permill::from_parts(1_000_000) + } +} +impl pallet_base_fee::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Threshold = BaseFeeThreshold; + type DefaultBaseFeePerGas = DefaultBaseFeePerGas; + type DefaultElasticity = DefaultElasticity; +} + +#[derive(Clone)] +pub struct TransactionConverter(PhantomData); + +impl Default for TransactionConverter { + fn default() -> Self { + Self(PhantomData) + } +} + +impl fp_rpc::ConvertTransaction<::Extrinsic> for TransactionConverter { + fn convert_transaction( + &self, + transaction: pallet_ethereum::Transaction, + ) -> ::Extrinsic { + let extrinsic = UncheckedExtrinsic::new_unsigned( + pallet_ethereum::Call::::transact { transaction }.into(), + ); + let encoded = extrinsic.encode(); + ::Extrinsic::decode(&mut &encoded[..]) + .expect("Encoded extrinsic is always valid") + } +} + +impl fp_self_contained::SelfContainedCall for RuntimeCall { + type SignedInfo = H160; + + fn is_self_contained(&self) -> bool { + match self { + RuntimeCall::Ethereum(call) => call.is_self_contained(), + _ => false, + } + } + + fn check_self_contained(&self) -> Option> { + match self { + RuntimeCall::Ethereum(call) => call.check_self_contained(), + _ => None, + } + } + + fn validate_self_contained( + &self, + info: &Self::SignedInfo, + dispatch_info: &DispatchInfoOf, + len: usize, + ) -> Option { + match self { + RuntimeCall::Ethereum(call) => call.validate_self_contained(info, dispatch_info, len), + _ => None, + } + } + + fn pre_dispatch_self_contained( + &self, + info: &Self::SignedInfo, + dispatch_info: &DispatchInfoOf, + len: usize, + ) -> Option> { + match self { + RuntimeCall::Ethereum(call) => { + call.pre_dispatch_self_contained(info, dispatch_info, len) + } + _ => None, + } + } + + fn apply_self_contained( + self, + info: Self::SignedInfo, + ) -> Option>> { + match self { + call @ RuntimeCall::Ethereum(pallet_ethereum::Call::transact { .. }) => { + Some(call.dispatch(RuntimeOrigin::from( + pallet_ethereum::RawOrigin::EthereumTransaction(info), + ))) + } + _ => None, + } + } +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub struct Runtime @@ -1075,6 +1282,13 @@ construct_runtime!( Commitments: pallet_commitments = 18, AdminUtils: pallet_admin_utils = 19, SafeMode: pallet_safe_mode = 20, + + // Frontier + Ethereum: pallet_ethereum = 21, + EVM: pallet_evm = 22, + EVMChainId: pallet_evm_chain_id = 23, + DynamicFee: pallet_dynamic_fee = 24, + BaseFee: pallet_base_fee = 25, } ); @@ -1109,7 +1323,12 @@ type Migrations = ( // Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + fp_self_contained::UncheckedExtrinsic; + +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = + fp_self_contained::CheckedExtrinsic; + // The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; // Executive: handles dispatch to the various modules. @@ -1323,6 +1542,262 @@ impl_runtime_apis! { } } + impl fp_rpc::EthereumRuntimeRPCApi for Runtime { + fn chain_id() -> u64 { + ::ChainId::get() + } + + fn account_basic(address: H160) -> EVMAccount { + let (account, _) = pallet_evm::Pallet::::account_basic(&address); + account + } + + fn gas_price() -> U256 { + let (gas_price, _) = ::FeeCalculator::min_gas_price(); + gas_price + } + + fn account_code_at(address: H160) -> Vec { + pallet_evm::AccountCodes::::get(address) + } + + fn author() -> H160 { + >::find_author() + } + + fn storage_at(address: H160, index: U256) -> H256 { + let mut tmp = [0u8; 32]; + index.to_big_endian(&mut tmp); + pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) + } + + fn call( + from: H160, + to: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + access_list: Option)>>, + ) -> Result { + use pallet_evm::GasWeightMapping as _; + + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + // Estimated encoded transaction size must be based on the heaviest transaction + // type (EIP1559Transaction) to be compatible with all transaction types. + let mut estimated_transaction_len = data.len() + + // pallet ethereum index: 1 + // transact call index: 1 + // Transaction enum variant: 1 + // chain_id 8 bytes + // nonce: 32 + // max_priority_fee_per_gas: 32 + // max_fee_per_gas: 32 + // gas_limit: 32 + // action: 21 (enum varianrt + call address) + // value: 32 + // access_list: 1 (empty vec size) + // 65 bytes signature + 258; + + if access_list.is_some() { + estimated_transaction_len += access_list.encoded_size(); + } + + + let gas_limit = if gas_limit > U256::from(u64::MAX) { + u64::MAX + } else { + gas_limit.low_u64() + }; + let without_base_extrinsic_weight = true; + + let (weight_limit, proof_size_base_cost) = + match ::GasWeightMapping::gas_to_weight( + gas_limit, + without_base_extrinsic_weight + ) { + weight_limit if weight_limit.proof_size() > 0 => { + (Some(weight_limit), Some(estimated_transaction_len as u64)) + } + _ => (None, None), + }; + + ::Runner::call( + from, + to, + data, + value, + gas_limit.unique_saturated_into(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + access_list.unwrap_or_default(), + false, + true, + weight_limit, + proof_size_base_cost, + config.as_ref().unwrap_or(::config()), + ).map_err(|err| err.error.into()) + } + + fn create( + from: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + access_list: Option)>>, + ) -> Result { + use pallet_evm::GasWeightMapping as _; + + let config = if estimate { + let mut config = ::config().clone(); + config.estimate = true; + Some(config) + } else { + None + }; + + + let mut estimated_transaction_len = data.len() + + // from: 20 + // value: 32 + // gas_limit: 32 + // nonce: 32 + // 1 byte transaction action variant + // chain id 8 bytes + // 65 bytes signature + 190; + + if max_fee_per_gas.is_some() { + estimated_transaction_len += 32; + } + if max_priority_fee_per_gas.is_some() { + estimated_transaction_len += 32; + } + if access_list.is_some() { + estimated_transaction_len += access_list.encoded_size(); + } + + + let gas_limit = if gas_limit > U256::from(u64::MAX) { + u64::MAX + } else { + gas_limit.low_u64() + }; + let without_base_extrinsic_weight = true; + + let (weight_limit, proof_size_base_cost) = + match ::GasWeightMapping::gas_to_weight( + gas_limit, + without_base_extrinsic_weight + ) { + weight_limit if weight_limit.proof_size() > 0 => { + (Some(weight_limit), Some(estimated_transaction_len as u64)) + } + _ => (None, None), + }; + + let whitelist = pallet_evm::WhitelistedCreators::::get(); + ::Runner::create( + from, + data, + value, + gas_limit.unique_saturated_into(), + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + access_list.unwrap_or_default(), + whitelist, + false, + true, + weight_limit, + proof_size_base_cost, + config.as_ref().unwrap_or(::config()), + ).map_err(|err| err.error.into()) + } + + fn current_transaction_statuses() -> Option> { + pallet_ethereum::CurrentTransactionStatuses::::get() + } + + fn current_block() -> Option { + pallet_ethereum::CurrentBlock::::get() + } + + fn current_receipts() -> Option> { + pallet_ethereum::CurrentReceipts::::get() + } + + fn current_all() -> ( + Option, + Option>, + Option> + ) { + ( + pallet_ethereum::CurrentBlock::::get(), + pallet_ethereum::CurrentReceipts::::get(), + pallet_ethereum::CurrentTransactionStatuses::::get() + ) + } + + fn extrinsic_filter( + xts: Vec<::Extrinsic>, + ) -> Vec { + xts.into_iter().filter_map(|xt| match xt.0.function { + RuntimeCall::Ethereum(transact { transaction }) => Some(transaction), + _ => None + }).collect::>() + } + + fn elasticity() -> Option { + Some(pallet_base_fee::Elasticity::::get()) + } + + fn gas_limit_multiplier_support() {} + + fn pending_block( + xts: Vec<::Extrinsic>, + ) -> (Option, Option>) { + for ext in xts.into_iter() { + let _ = Executive::apply_extrinsic(ext); + } + + Ethereum::on_finalize(System::block_number() + 1); + + ( + pallet_ethereum::CurrentBlock::::get(), + pallet_ethereum::CurrentTransactionStatuses::::get() + ) + } + + fn initialize_pending_block(header: &::Header) { + Executive::initialize_block(header); + } + } + + impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { + fn convert_transaction(transaction: EthereumTransaction) -> ::Extrinsic { + UncheckedExtrinsic::new_unsigned( + pallet_ethereum::Call::::transact { transaction }.into(), + ) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/runtime/src/precompiles/balance_transfer.rs b/runtime/src/precompiles/balance_transfer.rs new file mode 100644 index 000000000..6ae554fa4 --- /dev/null +++ b/runtime/src/precompiles/balance_transfer.rs @@ -0,0 +1,63 @@ +use frame_system::RawOrigin; +use pallet_evm::{ + BalanceConverter, ExitError, ExitSucceed, PrecompileFailure, PrecompileHandle, + PrecompileOutput, PrecompileResult, +}; +use sp_core::U256; +use sp_runtime::traits::{Dispatchable, UniqueSaturatedInto}; +use sp_std::vec; + +use crate::{Runtime, RuntimeCall}; + +use crate::precompiles::{bytes_to_account_id, get_method_id, get_slice}; + +pub const BALANCE_TRANSFER_INDEX: u64 = 2048; + +pub struct BalanceTransferPrecompile; + +impl BalanceTransferPrecompile { + pub fn execute(handle: &mut impl PrecompileHandle) -> PrecompileResult { + let txdata = handle.input(); + + // Match method ID: keccak256("transfer(bytes32)") + let method: &[u8] = get_slice(txdata, 0, 4)?; + if get_method_id("transfer(bytes32)") == method { + // Forward all received value to the destination address + let amount: U256 = handle.context().apparent_value; + + // This is hardcoded hashed address mapping of + // 0x0000000000000000000000000000000000000800 to ss58 public key + // i.e. the contract sends funds it received to the destination address + // from the method parameter + let address_bytes_src: [u8; 32] = [ + 0x07, 0xec, 0x71, 0x2a, 0x5d, 0x38, 0x43, 0x4d, 0xdd, 0x03, 0x3f, 0x8f, 0x02, 0x4e, + 0xcd, 0xfc, 0x4b, 0xb5, 0x95, 0x1c, 0x13, 0xc3, 0x08, 0x5c, 0x39, 0x9c, 0x8a, 0x5f, + 0x62, 0x93, 0x70, 0x5d, + ]; + let address_bytes_dst: &[u8] = get_slice(txdata, 4, 36)?; + let account_id_src = bytes_to_account_id(&address_bytes_src)?; + let account_id_dst = bytes_to_account_id(address_bytes_dst)?; + let amount_sub = + ::BalanceConverter::into_substrate_balance(amount) + .ok_or(ExitError::OutOfFund)?; + + let call = + RuntimeCall::Balances(pallet_balances::Call::::transfer_allow_death { + dest: account_id_dst.into(), + value: amount_sub.unique_saturated_into(), + }); + + let result = call.dispatch(RawOrigin::Signed(account_id_src).into()); + if result.is_err() { + return Err(PrecompileFailure::Error { + exit_status: ExitError::OutOfFund, + }); + } + } + + Ok(PrecompileOutput { + exit_status: ExitSucceed::Returned, + output: vec![], + }) + } +} diff --git a/runtime/src/precompiles/ed25519.rs b/runtime/src/precompiles/ed25519.rs new file mode 100644 index 000000000..83be4ca77 --- /dev/null +++ b/runtime/src/precompiles/ed25519.rs @@ -0,0 +1,44 @@ +extern crate alloc; + +use alloc::vec::Vec; + +use crate::precompiles::get_slice; +use ed25519_dalek::{Signature, Verifier, VerifyingKey}; +use fp_evm::{ExitError, ExitSucceed, LinearCostPrecompile, PrecompileFailure}; + +pub const EDVERIFY_PRECOMPILE_INDEX: u64 = 1026; + +pub struct Ed25519Verify; + +impl LinearCostPrecompile for Ed25519Verify { + const BASE: u64 = 15; + const WORD: u64 = 3; + + fn execute(input: &[u8], _: u64) -> Result<(ExitSucceed, Vec), PrecompileFailure> { + if input.len() < 132 { + return Err(PrecompileFailure::Error { + exit_status: ExitError::Other("input must contain 128 bytes".into()), + }); + }; + + let mut buf = [0u8; 32]; + + let msg = get_slice(input, 4, 36)?; + let pk = VerifyingKey::try_from(get_slice(input, 36, 68)?).map_err(|_| { + PrecompileFailure::Error { + exit_status: ExitError::Other("Public key recover failed".into()), + } + })?; + let sig = Signature::try_from(get_slice(input, 68, 132)?).map_err(|_| { + PrecompileFailure::Error { + exit_status: ExitError::Other("Signature recover failed".into()), + } + })?; + + if pk.verify(msg, &sig).is_ok() { + buf[31] = 1u8; + }; + + Ok((ExitSucceed::Returned, buf.to_vec())) + } +} diff --git a/runtime/src/precompiles/mod.rs b/runtime/src/precompiles/mod.rs new file mode 100644 index 000000000..22f2a4881 --- /dev/null +++ b/runtime/src/precompiles/mod.rs @@ -0,0 +1,125 @@ +use core::marker::PhantomData; +use sp_core::{hashing::keccak_256, H160}; +use sp_runtime::AccountId32; + +use pallet_evm::{ + ExitError, IsPrecompileResult, Precompile, PrecompileFailure, PrecompileHandle, + PrecompileResult, PrecompileSet, +}; +use pallet_evm_precompile_modexp::Modexp; +use pallet_evm_precompile_sha3fips::Sha3FIPS256; +use pallet_evm_precompile_simple::{ECRecover, ECRecoverPublicKey, Identity, Ripemd160, Sha256}; + +// Include custom precompiles +mod balance_transfer; +mod ed25519; +mod staking; + +use balance_transfer::*; +use ed25519::*; +use staking::*; + +pub struct FrontierPrecompiles(PhantomData); + +impl Default for FrontierPrecompiles +where + R: pallet_evm::Config, +{ + fn default() -> Self { + Self::new() + } +} + +impl FrontierPrecompiles +where + R: pallet_evm::Config, +{ + pub fn new() -> Self { + Self(Default::default()) + } + pub fn used_addresses() -> [H160; 10] { + [ + hash(1), + hash(2), + hash(3), + hash(4), + hash(5), + hash(1024), + hash(1025), + hash(EDVERIFY_PRECOMPILE_INDEX), + hash(BALANCE_TRANSFER_INDEX), + hash(STAKING_PRECOMPILE_INDEX), + ] + } +} +impl PrecompileSet for FrontierPrecompiles +where + R: pallet_evm::Config, +{ + fn execute(&self, handle: &mut impl PrecompileHandle) -> Option { + match handle.code_address() { + // Ethereum precompiles : + a if a == hash(1) => Some(ECRecover::execute(handle)), + a if a == hash(2) => Some(Sha256::execute(handle)), + a if a == hash(3) => Some(Ripemd160::execute(handle)), + a if a == hash(4) => Some(Identity::execute(handle)), + a if a == hash(5) => Some(Modexp::execute(handle)), + // Non-Frontier specific nor Ethereum precompiles : + a if a == hash(1024) => Some(Sha3FIPS256::execute(handle)), + a if a == hash(1025) => Some(ECRecoverPublicKey::execute(handle)), + a if a == hash(EDVERIFY_PRECOMPILE_INDEX) => Some(Ed25519Verify::execute(handle)), + // Subtensor specific precompiles : + a if a == hash(BALANCE_TRANSFER_INDEX) => { + Some(BalanceTransferPrecompile::execute(handle)) + } + a if a == hash(STAKING_PRECOMPILE_INDEX) => Some(StakingPrecompile::execute(handle)), + _ => None, + } + } + + fn is_precompile(&self, address: H160, _gas: u64) -> IsPrecompileResult { + IsPrecompileResult::Answer { + is_precompile: Self::used_addresses().contains(&address), + extra_cost: 0, + } + } +} + +fn hash(a: u64) -> H160 { + H160::from_low_u64_be(a) +} + +/// Returns Ethereum method ID from an str method signature +/// +pub fn get_method_id(method_signature: &str) -> [u8; 4] { + // Calculate the full Keccak-256 hash of the method signature + let hash = keccak_256(method_signature.as_bytes()); + + // Extract the first 4 bytes to get the method ID + [hash[0], hash[1], hash[2], hash[3]] +} + +/// Convert bytes to AccountId32 with PrecompileFailure as Error +/// which consumes all gas +/// +pub fn bytes_to_account_id(account_id_bytes: &[u8]) -> Result { + AccountId32::try_from(account_id_bytes).map_err(|_| { + log::info!("Error parsing account id bytes {:?}", account_id_bytes); + PrecompileFailure::Error { + exit_status: ExitError::InvalidRange, + } + }) +} + +/// Takes a slice from bytes with PrecompileFailure as Error +/// +pub fn get_slice(data: &[u8], from: usize, to: usize) -> Result<&[u8], PrecompileFailure> { + let maybe_slice = data.get(from..to); + if let Some(slice) = maybe_slice { + Ok(slice) + } else { + Err(PrecompileFailure::Error { + exit_status: ExitError::InvalidRange, + }) + } +} diff --git a/runtime/src/precompiles/solidity/balanceTransfer.abi b/runtime/src/precompiles/solidity/balanceTransfer.abi new file mode 100644 index 000000000..99913b900 --- /dev/null +++ b/runtime/src/precompiles/solidity/balanceTransfer.abi @@ -0,0 +1,15 @@ +[ + { + "inputs": [ + { + "internalType": "bytes32", + "name": "data", + "type": "bytes32" + } + ], + "name": "transfer", + "outputs": [], + "stateMutability": "payable", + "type": "function" + } +] \ No newline at end of file diff --git a/runtime/src/precompiles/solidity/balanceTransfer.sol b/runtime/src/precompiles/solidity/balanceTransfer.sol new file mode 100644 index 000000000..42790b900 --- /dev/null +++ b/runtime/src/precompiles/solidity/balanceTransfer.sol @@ -0,0 +1,7 @@ +pragma solidity ^0.8.0; + +address constant ISUBTENSOR_BALANCE_TRANSFER_ADDRESS = 0x0000000000000000000000000000000000000800; + +interface ISubtensorBalanceTransfer { + function transfer(bytes32 data) external payable; +} \ No newline at end of file diff --git a/runtime/src/precompiles/solidity/ed25519Verify.abi b/runtime/src/precompiles/solidity/ed25519Verify.abi new file mode 100644 index 000000000..05d75ae6c --- /dev/null +++ b/runtime/src/precompiles/solidity/ed25519Verify.abi @@ -0,0 +1,14 @@ +[ + { + "inputs": [ + { "internalType": "bytes32", "name": "message", "type": "bytes32" }, + { "internalType": "bytes32", "name": "publicKey", "type": "bytes32" }, + { "internalType": "bytes32", "name": "r", "type": "bytes32" }, + { "internalType": "bytes32", "name": "s", "type": "bytes32" } + ], + "name": "verify", + "outputs": [{ "internalType": "bool", "name": "", "type": "bool" }], + "stateMutability": "pure", + "type": "function" + } +] \ No newline at end of file diff --git a/runtime/src/precompiles/solidity/ed25519Verify.sol b/runtime/src/precompiles/solidity/ed25519Verify.sol new file mode 100644 index 000000000..035feb4cc --- /dev/null +++ b/runtime/src/precompiles/solidity/ed25519Verify.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.0; + +address constant IED25519VERIFY_ADDRESS = 0x0000000000000000000000000000000000000402; + +interface IEd25519Verify { + /** + * @dev Verifies Ed25519 signature using provided message and public key. + * + * @param message The 32-byte signature payload message. + * @param publicKey 32-byte public key matching to private key used to sign the message. + * @param r The Ed25519 signature commitment (first 32 bytes). + * @param s The Ed25519 signature response (second 32 bytes). + * @return bool Returns true if the signature is valid for the given message and public key, false otherwise. + */ + function verify(bytes32 message, bytes32 publicKey, bytes32 r, bytes32 s) external pure returns (bool); +} diff --git a/runtime/src/precompiles/solidity/staking.abi b/runtime/src/precompiles/solidity/staking.abi new file mode 100644 index 000000000..44b1829c4 --- /dev/null +++ b/runtime/src/precompiles/solidity/staking.abi @@ -0,0 +1,43 @@ +[ + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + } + ], + "name": "addStake", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint16", + "name": "netuid", + "type": "uint16" + } + ], + "name": "removeStake", + "outputs": [], + "stateMutability": "payable", + "type": "function" + } +] diff --git a/runtime/src/precompiles/solidity/staking.sol b/runtime/src/precompiles/solidity/staking.sol new file mode 100644 index 000000000..ec7fb7297 --- /dev/null +++ b/runtime/src/precompiles/solidity/staking.sol @@ -0,0 +1,45 @@ +pragma solidity ^0.8.0; + +address constant ISTAKING_ADDRESS = 0x0000000000000000000000000000000000000801; + +interface IStaking { + /** + * @dev Adds a subtensor stake corresponding to the value sent with the transaction, associated + * with the `hotkey`. + * + * This function allows external accounts and contracts to stake TAO into the subtensor pallet, + * which effectively calls `add_stake` on the subtensor pallet with specified hotkey as a parameter + * and coldkey being the hashed address mapping of H160 sender address to Substrate ss58 address as + * implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param hotkey The hotkey public key (32 bytes). + * @param netuid The subnet to stake to (uint16). Currently a noop, functionality will be enabled with RAO. + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is + * correctly attributed. + */ + function addStake(bytes32 hotkey, uint16 netuid) external payable; + + /** + * @dev Removes a subtensor stake `amount` from the specified `hotkey`. + * + * This function allows external accounts and contracts to unstake TAO from the subtensor pallet, + * which effectively calls `remove_stake` on the subtensor pallet with specified hotkey as a parameter + * and coldkey being the hashed address mapping of H160 sender address to Substrate ss58 address as + * implemented in Frontier HashedAddressMapping: + * https://github.com/polkadot-evm/frontier/blob/2e219e17a526125da003e64ef22ec037917083fa/frame/evm/src/lib.rs#L739 + * + * @param hotkey The hotkey public key (32 bytes). + * @param amount The amount to unstake in rao. + * @param netuid The subnet to stake to (uint16). Currently a noop, functionality will be enabled with RAO. + + * + * Requirements: + * - `hotkey` must be a valid hotkey registered on the network, ensuring that the stake is + * correctly attributed. + * - The existing stake amount must be not lower than specified amount + */ + function removeStake(bytes32 hotkey, uint256 amount, uint16 netuid) external; +} diff --git a/runtime/src/precompiles/staking.rs b/runtime/src/precompiles/staking.rs new file mode 100644 index 000000000..e6237dfcf --- /dev/null +++ b/runtime/src/precompiles/staking.rs @@ -0,0 +1,186 @@ +// The goal of staking precompile is to allow interaction between EVM users and smart contracts and +// subtensor staking functionality, namely add_stake, and remove_stake extrinsicsk, as well as the +// staking state. +// +// Additional requirement is to preserve compatibility with Ethereum indexers, which requires +// no balance transfers from EVM accounts without a corresponding transaction that can be +// parsed by an indexer. +// +// Implementation of add_stake: +// - User transfers balance that will be staked to the precompile address with a payable +// method addStake. This method also takes hotkey public key (bytes32) of the hotkey +// that the stake should be assigned to. +// - Precompile transfers the balance back to the signing address, and then invokes +// do_add_stake from subtensor pallet with signing origin that mmatches to HashedAddressMapping +// of the message sender, which will effectively withdraw and stake balance from the message +// sender. +// - Precompile checks the result of do_add_stake and, in case of a failure, reverts the transaction, +// and leaves the balance on the message sender account. +// +// Implementation of remove_stake: +// - User involkes removeStake method and specifies hotkey public key (bytes32) of the hotkey +// to remove stake from, and the amount to unstake. +// - Precompile calls do_remove_stake method of the subtensor pallet with the signing origin of message +// sender, which effectively unstakes the specified amount and credits it to the message sender +// - Precompile checks the result of do_remove_stake and, in case of a failure, reverts the transaction. +// + +use frame_system::RawOrigin; +use pallet_evm::{AddressMapping, BalanceConverter, HashedAddressMapping}; +use pallet_evm::{ + ExitError, ExitSucceed, PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileResult, +}; +use sp_core::crypto::Ss58Codec; +use sp_core::U256; +use sp_runtime::traits::Dispatchable; +use sp_runtime::traits::{BlakeTwo256, UniqueSaturatedInto}; +use sp_runtime::AccountId32; + +use crate::precompiles::{get_method_id, get_slice}; +use sp_std::vec; + +use crate::{Runtime, RuntimeCall}; +pub const STAKING_PRECOMPILE_INDEX: u64 = 2049; + +pub struct StakingPrecompile; + +impl StakingPrecompile { + pub fn execute(handle: &mut impl PrecompileHandle) -> PrecompileResult { + let txdata = handle.input(); + let method_id = get_slice(txdata, 0, 4)?; + let method_input = txdata + .get(4..) + .map_or_else(vec::Vec::new, |slice| slice.to_vec()); // Avoiding borrowing conflicts + + match method_id { + id if id == get_method_id("addStake(bytes32,uint16)") => { + Self::add_stake(handle, &method_input) + } + id if id == get_method_id("removeStake(bytes32,uint256,uint16)") => { + Self::remove_stake(handle, &method_input) + } + _ => Err(PrecompileFailure::Error { + exit_status: ExitError::InvalidRange, + }), + } + } + + fn add_stake(handle: &mut impl PrecompileHandle, data: &[u8]) -> PrecompileResult { + let hotkey = Self::parse_hotkey(data)?.into(); + let amount: U256 = handle.context().apparent_value; + let amount_sub = + ::BalanceConverter::into_substrate_balance(amount) + .ok_or(ExitError::OutOfFund)?; + + // Create the add_stake call + let call = RuntimeCall::SubtensorModule(pallet_subtensor::Call::::add_stake { + hotkey, + amount_staked: amount_sub.unique_saturated_into(), + }); + // Dispatch the add_stake call + Self::dispatch(handle, call) + } + fn remove_stake(handle: &mut impl PrecompileHandle, data: &[u8]) -> PrecompileResult { + let hotkey = Self::parse_hotkey(data)?.into(); + + // We have to treat this as uint256 (because of Solidity ABI encoding rules, it pads uint64), + // but this will never exceed 8 bytes, se we will ignore higher bytes and will only use lower + // 8 bytes. + let amount = data + .get(56..64) + .map(U256::from_big_endian) + .ok_or(ExitError::OutOfFund)?; + let amount_sub = + ::BalanceConverter::into_substrate_balance(amount) + .ok_or(ExitError::OutOfFund)?; + + let call = RuntimeCall::SubtensorModule(pallet_subtensor::Call::::remove_stake { + hotkey, + amount_unstaked: amount_sub.unique_saturated_into(), + }); + Self::dispatch(handle, call) + } + + fn parse_hotkey(data: &[u8]) -> Result<[u8; 32], PrecompileFailure> { + if data.len() < 32 { + return Err(PrecompileFailure::Error { + exit_status: ExitError::InvalidRange, + }); + } + let mut hotkey = [0u8; 32]; + hotkey.copy_from_slice(get_slice(data, 0, 32)?); + Ok(hotkey) + } + + fn dispatch(handle: &mut impl PrecompileHandle, call: RuntimeCall) -> PrecompileResult { + let account_id = + as AddressMapping>::into_account_id( + handle.context().caller, + ); + + // Transfer the amount back to the caller before executing the staking operation + // let caller = handle.context().caller; + let amount = handle.context().apparent_value; + + if !amount.is_zero() { + Self::transfer_back_to_caller(&account_id, amount)?; + } + + let result = call.dispatch(RawOrigin::Signed(account_id.clone()).into()); + match &result { + Ok(post_info) => log::info!("Dispatch succeeded. Post info: {:?}", post_info), + Err(dispatch_error) => log::error!("Dispatch failed. Error: {:?}", dispatch_error), + } + match result { + Ok(_) => Ok(PrecompileOutput { + exit_status: ExitSucceed::Returned, + output: vec![], + }), + Err(_) => Err(PrecompileFailure::Error { + exit_status: ExitError::Other("Subtensor call failed".into()), + }), + } + } + + fn transfer_back_to_caller( + account_id: &AccountId32, + amount: U256, + ) -> Result<(), PrecompileFailure> { + // this is staking smart contract's(0x0000000000000000000000000000000000000801) sr25519 address + let smart_contract_account_id = + match AccountId32::from_ss58check("5CwnBK9Ack1mhznmCnwiibCNQc174pYQVktYW3ayRpLm4K2X") { + Ok(addr) => addr, + Err(_) => { + return Err(PrecompileFailure::Error { + exit_status: ExitError::Other("Invalid SS58 address".into()), + }); + } + }; + let amount_sub = + ::BalanceConverter::into_substrate_balance(amount) + .ok_or(ExitError::OutOfFund)?; + + // Create a transfer call from the smart contract to the caller + let transfer_call = + RuntimeCall::Balances(pallet_balances::Call::::transfer_allow_death { + dest: account_id.clone().into(), + value: amount_sub.unique_saturated_into(), + }); + + // Execute the transfer + let transfer_result = + transfer_call.dispatch(RawOrigin::Signed(smart_contract_account_id).into()); + + if let Err(dispatch_error) = transfer_result { + log::error!( + "Transfer back to caller failed. Error: {:?}", + dispatch_error + ); + return Err(PrecompileFailure::Error { + exit_status: ExitError::Other("Transfer back to caller failed".into()), + }); + } + + Ok(()) + } +} diff --git a/scripts/localnet.sh b/scripts/localnet.sh index 51e3d05a8..1e9618954 100755 --- a/scripts/localnet.sh +++ b/scripts/localnet.sh @@ -56,6 +56,10 @@ echo "*** Building chainspec..." "$BASE_DIR/target/release/node-subtensor" build-spec --disable-default-bootnode --raw --chain $CHAIN >$FULL_PATH echo "*** Chainspec built and output to file" +# generate node keys +$BASE_DIR/target/release/node-subtensor key generate-node-key --chain="$FULL_PATH" --base-path /tmp/alice +$BASE_DIR/target/release/node-subtensor key generate-node-key --chain="$FULL_PATH" --base-path /tmp/bob + if [ $NO_PURGE -eq 1 ]; then echo "*** Purging previous state skipped..." else @@ -72,7 +76,7 @@ alice_start=( --chain="$FULL_PATH" --alice --port 30334 - --rpc-port 9946 + --rpc-port 9944 --validator --rpc-cors=all --allow-private-ipv4 diff --git a/scripts/publish.sh b/scripts/publish.sh index 8b2671787..dd5b110f2 100644 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,28 +1,28 @@ #!/bin/bash set -ex cd support/macros -cargo publish --token $1 +cargo publish $1 cd ../.. cd pallets/commitments -cargo publish --token $1 +cargo publish $1 cd .. cd collective -cargo publish --token $1 +cargo publish $1 cd .. cd registry -cargo publish --token $1 +cargo publish $1 cd .. cd subtensor -cargo publish --token $1 +cargo publish $1 cd runtime-api -cargo publish --token $1 +cargo publish $1 cd ../.. cd admin-utils -cargo publish --token $1 +cargo publish $1 cd ../.. cd runtime -cargo publish --token $1 +cargo publish $1 cd .. cd node -cargo publish --token $1 +cargo publish $1 echo "published successfully." diff --git a/support/linting/src/forbid_as_primitive.rs b/support/linting/src/forbid_as_primitive.rs new file mode 100644 index 000000000..b60cf0a49 --- /dev/null +++ b/support/linting/src/forbid_as_primitive.rs @@ -0,0 +1,78 @@ +use super::*; +use syn::{visit::Visit, ExprMethodCall, File, Ident}; + +pub struct ForbidAsPrimitiveConversion; + +impl Lint for ForbidAsPrimitiveConversion { + fn lint(source: &File) -> Result { + let mut visitor = AsPrimitiveVisitor::default(); + + visitor.visit_file(source); + + if !visitor.errors.is_empty() { + return Err(visitor.errors); + } + + Ok(()) + } +} + +#[derive(Default)] +struct AsPrimitiveVisitor { + errors: Vec, +} + +impl<'ast> Visit<'ast> for AsPrimitiveVisitor { + fn visit_expr_method_call(&mut self, node: &'ast ExprMethodCall) { + if is_as_primitive(&node.method) { + self.errors.push(syn::Error::new( + node.method.span(), + "Using 'as_*()' methods is banned to avoid accidental panics. Use `try_into()` instead.", + )); + } + + syn::visit::visit_expr_method_call(self, node); + } +} + +fn is_as_primitive(ident: &Ident) -> bool { + matches!( + ident.to_string().as_str(), + "as_u32" | "as_u64" | "as_u128" | "as_usize" + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn lint(input: &str) -> Result { + let expr: ExprMethodCall = syn::parse_str(input).expect("should only use on a method call"); + let mut visitor = AsPrimitiveVisitor::default(); + visitor.visit_expr_method_call(&expr); + if !visitor.errors.is_empty() { + return Err(visitor.errors); + } + Ok(()) + } + + #[test] + fn test_as_primitives() { + let input = r#"x.as_u32()"#; + assert!(lint(input).is_err()); + let input = r#"x.as_u64()"#; + assert!(lint(input).is_err()); + let input = r#"x.as_u128()"#; + assert!(lint(input).is_err()); + let input = r#"x.as_usize()"#; + assert!(lint(input).is_err()); + } + + #[test] + fn test_non_as_primitives() { + let input = r#"x.as_ref()"#; + assert!(lint(input).is_ok()); + let input = r#"x.as_slice()"#; + assert!(lint(input).is_ok()); + } +} diff --git a/support/linting/src/lib.rs b/support/linting/src/lib.rs index e5416c1d5..7aaf471c7 100644 --- a/support/linting/src/lib.rs +++ b/support/linting/src/lib.rs @@ -1,8 +1,10 @@ pub mod lint; pub use lint::*; +mod forbid_as_primitive; mod pallet_index; mod require_freeze_struct; +pub use forbid_as_primitive::ForbidAsPrimitiveConversion; pub use pallet_index::RequireExplicitPalletIndex; pub use require_freeze_struct::RequireFreezeStruct; diff --git a/support/procedural-fork/src/benchmark.rs b/support/procedural-fork/src/benchmark.rs index 376200d6e..0eb3c330a 100644 --- a/support/procedural-fork/src/benchmark.rs +++ b/support/procedural-fork/src/benchmark.rs @@ -323,6 +323,24 @@ fn ensure_valid_return_type(item_fn: &ItemFn) -> Result<()> { Ok(()) } +/// Ensure that the passed statements do not contain any forbidden variable names +fn ensure_no_forbidden_variable_names(stmts: &[Stmt]) -> Result<()> { + const FORBIDDEN_VAR_NAMES: [&str; 2] = ["recording", "verify"]; + for stmt in stmts { + let Stmt::Local(l) = stmt else { continue }; + let Pat::Ident(ident) = &l.pat else { continue }; + if FORBIDDEN_VAR_NAMES.contains(&ident.ident.to_string().as_str()) { + return Err(Error::new( + ident.span(), + format!( + "Variables {FORBIDDEN_VAR_NAMES:?} are reserved for benchmarking internals.", + ), + )); + } + } + Ok(()) +} + /// Parses params such as `x: Linear<0, 1>` fn parse_params(item_fn: &ItemFn) -> Result> { let mut params: Vec = Vec::new(); @@ -481,9 +499,12 @@ impl BenchmarkDef { } }; + let setup_stmts = Vec::from(&item_fn.block.stmts[0..i]); + ensure_no_forbidden_variable_names(&setup_stmts)?; + Ok(BenchmarkDef { params, - setup_stmts: Vec::from(&item_fn.block.stmts[0..i]), + setup_stmts, call_def, verify_stmts, last_stmt, @@ -692,18 +713,16 @@ pub fn benchmarks( fn instance( &self, + recording: &mut impl #krate::Recording, components: &[(#krate::BenchmarkParameter, u32)], verify: bool, - ) -> Result< - #krate::__private::Box Result<(), #krate::BenchmarkError>>, - #krate::BenchmarkError, - > { + ) -> Result<(), #krate::BenchmarkError> { match self { #( Self::#benchmark_names => { <#benchmark_names as #krate::BenchmarkingSetup< #type_use_generics - >>::instance(&#benchmark_names, components, verify) + >>::instance(&#benchmark_names, recording, components, verify) } ) * @@ -794,17 +813,7 @@ pub fn benchmarks( #krate::benchmarking::set_whitelist(whitelist.clone()); let mut results: #krate::__private::Vec<#krate::BenchmarkResult> = #krate::__private::Vec::new(); - // Always do at least one internal repeat... - for _ in 0 .. internal_repeats.max(1) { - // Always reset the state after the benchmark. - #krate::__private::defer!(#krate::benchmarking::wipe_db()); - - // Set up the externalities environment for the setup we want to - // benchmark. - let closure_to_benchmark = < - SelectedBenchmark as #krate::BenchmarkingSetup<#type_use_generics> - >::instance(&selected_benchmark, c, verify)?; - + let on_before_start = || { // Set the block number to at least 1 so events are deposited. if #krate::__private::Zero::is_zero(&#frame_system::Pallet::::block_number()) { #frame_system::Pallet::::set_block_number(1u32.into()); @@ -822,6 +831,12 @@ pub fn benchmarks( // Reset the read/write counter so we don't count operations in the setup process. #krate::benchmarking::reset_read_write_count(); + }; + + // Always do at least one internal repeat... + for _ in 0 .. internal_repeats.max(1) { + // Always reset the state after the benchmark. + #krate::__private::defer!(#krate::benchmarking::wipe_db()); // Time the extrinsic logic. #krate::__private::log::trace!( @@ -831,20 +846,12 @@ pub fn benchmarks( c ); - let start_pov = #krate::benchmarking::proof_size(); - let start_extrinsic = #krate::benchmarking::current_time(); - - closure_to_benchmark()?; - - let finish_extrinsic = #krate::benchmarking::current_time(); - let end_pov = #krate::benchmarking::proof_size(); + let mut recording = #krate::BenchmarkRecording::new(&on_before_start); + >::instance(&selected_benchmark, &mut recording, c, verify)?; // Calculate the diff caused by the benchmark. - let elapsed_extrinsic = finish_extrinsic.saturating_sub(start_extrinsic); - let diff_pov = match (start_pov, end_pov) { - (Some(start), Some(end)) => end.saturating_sub(start), - _ => Default::default(), - }; + let elapsed_extrinsic = recording.elapsed_extrinsic().expect("elapsed time should be recorded"); + let diff_pov = recording.diff_pov().unwrap_or_default(); // Commit the changes to get proper write count #krate::benchmarking::commit_db(); @@ -1163,9 +1170,10 @@ fn expand_benchmark( fn instance( &self, + recording: &mut impl #krate::Recording, components: &[(#krate::BenchmarkParameter, u32)], verify: bool - ) -> Result<#krate::__private::Box Result<(), #krate::BenchmarkError>>, #krate::BenchmarkError> { + ) -> Result<(), #krate::BenchmarkError> { #( // prepare instance #param_names let #param_names = components.iter() @@ -1179,15 +1187,15 @@ fn expand_benchmark( #setup_stmts )* #pre_call - Ok(#krate::__private::Box::new(move || -> Result<(), #krate::BenchmarkError> { - #post_call - if verify { - #( - #verify_stmts - )* - } - #impl_last_stmt - })) + recording.start(); + #post_call + recording.stop(); + if verify { + #( + #verify_stmts + )* + } + #impl_last_stmt } } @@ -1205,18 +1213,15 @@ fn expand_benchmark( // Always reset the state after the benchmark. #krate::__private::defer!(#krate::benchmarking::wipe_db()); - // Set up the benchmark, return execution + verification function. - let closure_to_verify = < - SelectedBenchmark as #krate::BenchmarkingSetup - >::instance(&selected_benchmark, &c, true)?; - - // Set the block number to at least 1 so events are deposited. - if #krate::__private::Zero::is_zero(&#frame_system::Pallet::::block_number()) { - #frame_system::Pallet::::set_block_number(1u32.into()); - } + let on_before_start = || { + // Set the block number to at least 1 so events are deposited. + if #krate::__private::Zero::is_zero(&#frame_system::Pallet::::block_number()) { + #frame_system::Pallet::::set_block_number(1u32.into()); + } + }; // Run execution + verification - closure_to_verify() + >::test_instance(&selected_benchmark, &c, &on_before_start) }; if components.is_empty() { diff --git a/support/procedural-fork/src/construct_runtime/expand/call.rs b/support/procedural-fork/src/construct_runtime/expand/call.rs index 7e8c2e856..cc467c31d 100644 --- a/support/procedural-fork/src/construct_runtime/expand/call.rs +++ b/support/procedural-fork/src/construct_runtime/expand/call.rs @@ -69,6 +69,7 @@ pub fn expand_outer_dispatch( quote! { #( #query_call_part_macros )* + /// The aggregated runtime call type. #[derive( Clone, PartialEq, Eq, #scrate::__private::codec::Encode, diff --git a/support/procedural-fork/src/construct_runtime/expand/inherent.rs b/support/procedural-fork/src/construct_runtime/expand/inherent.rs index b58d540fe..9705f9703 100644 --- a/support/procedural-fork/src/construct_runtime/expand/inherent.rs +++ b/support/procedural-fork/src/construct_runtime/expand/inherent.rs @@ -61,17 +61,17 @@ pub fn expand_outer_inherent( trait InherentDataExt { fn create_extrinsics(&self) -> - #scrate::__private::sp_std::vec::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic>; + #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic>; fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult; } impl InherentDataExt for #scrate::inherent::InherentData { fn create_extrinsics(&self) -> - #scrate::__private::sp_std::vec::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> + #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> { use #scrate::inherent::ProvideInherent; - let mut inherents = #scrate::__private::sp_std::vec::Vec::new(); + let mut inherents = #scrate::__private::Vec::new(); #( #pallet_attrs diff --git a/support/procedural-fork/src/construct_runtime/expand/metadata.rs b/support/procedural-fork/src/construct_runtime/expand/metadata.rs index f98c719ca..9f3d9cd4a 100644 --- a/support/procedural-fork/src/construct_runtime/expand/metadata.rs +++ b/support/procedural-fork/src/construct_runtime/expand/metadata.rs @@ -114,7 +114,7 @@ pub fn expand_runtime_metadata( >(); #scrate::__private::metadata_ir::MetadataIR { - pallets: #scrate::__private::sp_std::vec![ #(#pallets),* ], + pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, @@ -159,7 +159,7 @@ pub fn expand_runtime_metadata( }) } - pub fn metadata_versions() -> #scrate::__private::sp_std::vec::Vec { + pub fn metadata_versions() -> #scrate::__private::Vec { #scrate::__private::metadata_ir::supported_versions() } } diff --git a/support/procedural-fork/src/construct_runtime/expand/origin.rs b/support/procedural-fork/src/construct_runtime/expand/origin.rs index 2d50777bf..58c8adec5 100644 --- a/support/procedural-fork/src/construct_runtime/expand/origin.rs +++ b/support/procedural-fork/src/construct_runtime/expand/origin.rs @@ -110,25 +110,25 @@ pub fn expand_outer_origin( #[derive(Clone)] pub struct RuntimeOrigin { pub caller: OriginCaller, - filter: #scrate::__private::sp_std::rc::Rc::RuntimeCall) -> bool>>, + filter: #scrate::__private::Rc<#scrate::__private::Box::RuntimeCall) -> bool>>, } #[cfg(not(feature = "std"))] - impl #scrate::__private::sp_std::fmt::Debug for RuntimeOrigin { + impl core::fmt::Debug for RuntimeOrigin { fn fmt( &self, - fmt: &mut #scrate::__private::sp_std::fmt::Formatter, - ) -> #scrate::__private::sp_std::result::Result<(), #scrate::__private::sp_std::fmt::Error> { + fmt: &mut core::fmt::Formatter, + ) -> core::result::Result<(), core::fmt::Error> { fmt.write_str("") } } #[cfg(feature = "std")] - impl #scrate::__private::sp_std::fmt::Debug for RuntimeOrigin { + impl core::fmt::Debug for RuntimeOrigin { fn fmt( &self, - fmt: &mut #scrate::__private::sp_std::fmt::Formatter, - ) -> #scrate::__private::sp_std::result::Result<(), #scrate::__private::sp_std::fmt::Error> { + fmt: &mut core::fmt::Formatter, + ) -> core::result::Result<(), core::fmt::Error> { fmt.debug_struct("Origin") .field("caller", &self.caller) .field("filter", &"[function ptr]") @@ -144,7 +144,7 @@ pub fn expand_outer_origin( fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static) { let f = self.filter.clone(); - self.filter = #scrate::__private::sp_std::rc::Rc::new(Box::new(move |call| { + self.filter = #scrate::__private::Rc::new(#scrate::__private::Box::new(move |call| { f(call) && filter(call) })); } @@ -155,7 +155,7 @@ pub fn expand_outer_origin( as #scrate::traits::Contains<<#runtime as #system_path::Config>::RuntimeCall> >::contains; - self.filter = #scrate::__private::sp_std::rc::Rc::new(Box::new(filter)); + self.filter = #scrate::__private::Rc::new(#scrate::__private::Box::new(filter)); } fn set_caller_from(&mut self, other: impl Into) { @@ -257,7 +257,7 @@ pub fn expand_outer_origin( impl TryFrom for #system_path::Origin<#runtime> { type Error = OriginCaller; fn try_from(x: OriginCaller) - -> #scrate::__private::sp_std::result::Result<#system_path::Origin<#runtime>, OriginCaller> + -> core::result::Result<#system_path::Origin<#runtime>, OriginCaller> { if let OriginCaller::system(l) = x { Ok(l) @@ -280,7 +280,7 @@ pub fn expand_outer_origin( fn from(x: OriginCaller) -> Self { let mut o = RuntimeOrigin { caller: x, - filter: #scrate::__private::sp_std::rc::Rc::new(Box::new(|_| true)), + filter: #scrate::__private::Rc::new(#scrate::__private::Box::new(|_| true)), }; #scrate::traits::OriginTrait::reset_filter(&mut o); @@ -289,7 +289,7 @@ pub fn expand_outer_origin( } } - impl From for #scrate::__private::sp_std::result::Result<#system_path::Origin<#runtime>, RuntimeOrigin> { + impl From for core::result::Result<#system_path::Origin<#runtime>, RuntimeOrigin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: RuntimeOrigin) -> Self { if let OriginCaller::system(l) = val.caller { @@ -357,7 +357,7 @@ fn expand_origin_caller_variant( } fn expand_origin_pallet_conversions( - scrate: &TokenStream, + _scrate: &TokenStream, runtime: &Ident, pallet: &Pallet, instance: Option<&Ident>, @@ -405,7 +405,7 @@ fn expand_origin_pallet_conversions( } #attr - impl From for #scrate::__private::sp_std::result::Result<#pallet_origin, RuntimeOrigin> { + impl From for core::result::Result<#pallet_origin, RuntimeOrigin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: RuntimeOrigin) -> Self { if let OriginCaller::#variant_name(l) = val.caller { @@ -421,7 +421,7 @@ fn expand_origin_pallet_conversions( type Error = OriginCaller; fn try_from( x: OriginCaller, - ) -> #scrate::__private::sp_std::result::Result<#pallet_origin, OriginCaller> { + ) -> core::result::Result<#pallet_origin, OriginCaller> { if let OriginCaller::#variant_name(l) = x { Ok(l) } else { @@ -435,7 +435,7 @@ fn expand_origin_pallet_conversions( type Error = (); fn try_from( x: &'a OriginCaller, - ) -> #scrate::__private::sp_std::result::Result<&'a #pallet_origin, ()> { + ) -> core::result::Result<&'a #pallet_origin, ()> { if let OriginCaller::#variant_name(l) = x { Ok(&l) } else { @@ -449,7 +449,7 @@ fn expand_origin_pallet_conversions( type Error = (); fn try_from( x: &'a RuntimeOrigin, - ) -> #scrate::__private::sp_std::result::Result<&'a #pallet_origin, ()> { + ) -> core::result::Result<&'a #pallet_origin, ()> { if let OriginCaller::#variant_name(l) = &x.caller { Ok(&l) } else { diff --git a/support/procedural-fork/src/construct_runtime/mod.rs b/support/procedural-fork/src/construct_runtime/mod.rs index de688b3d6..9bc271fdc 100644 --- a/support/procedural-fork/src/construct_runtime/mod.rs +++ b/support/procedural-fork/src/construct_runtime/mod.rs @@ -491,7 +491,7 @@ fn construct_runtime_final_expansion( #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] - fn runtime_metadata(&self) -> #scrate::__private::sp_std::vec::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { + fn runtime_metadata(&self) -> #scrate::__private::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { Default::default() } } @@ -554,6 +554,7 @@ pub(crate) fn decl_all_pallets<'a>( for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; + let docs = &pallet_declaration.docs; let mut generics = vec![quote!(#runtime)]; generics.extend( pallet_declaration @@ -567,6 +568,7 @@ pub(crate) fn decl_all_pallets<'a>( attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed")); } let type_decl = quote!( + #( #[doc = #docs] )* #(#attrs)* pub type #type_name = #pallet::Pallet <#(#generics),*>; ); @@ -703,10 +705,10 @@ pub(crate) fn decl_pallet_runtime_setup( impl #scrate::traits::PalletInfo for PalletInfo { fn index() -> Option { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#indices) } )* @@ -715,10 +717,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn name() -> Option<&'static str> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#name_strings) } )* @@ -727,10 +729,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn name_hash() -> Option<[u8; 16]> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#name_hashes) } )* @@ -739,10 +741,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn module_name() -> Option<&'static str> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some(#module_names) } )* @@ -751,10 +753,10 @@ pub(crate) fn decl_pallet_runtime_setup( } fn crate_version() -> Option<#scrate::traits::CrateVersion> { - let type_id = #scrate::__private::sp_std::any::TypeId::of::

(); + let type_id = core::any::TypeId::of::

(); #( #pallet_attrs - if type_id == #scrate::__private::sp_std::any::TypeId::of::<#names>() { + if type_id == core::any::TypeId::of::<#names>() { return Some( <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() ) diff --git a/support/procedural-fork/src/construct_runtime/parse.rs b/support/procedural-fork/src/construct_runtime/parse.rs index 173a8dd12..e5e60b3ff 100644 --- a/support/procedural-fork/src/construct_runtime/parse.rs +++ b/support/procedural-fork/src/construct_runtime/parse.rs @@ -65,8 +65,6 @@ pub enum RuntimeDeclaration { /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug)] pub struct ImplicitRuntimeDeclaration { - pub name: Ident, - pub where_section: Option, pub pallets: Vec, } @@ -103,8 +101,6 @@ impl Parse for RuntimeDeclaration { match convert_pallets(pallets.content.inner.into_iter().collect())? { PalletsConversion::Implicit(pallets) => { Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { - name, - where_section, pallets, })) } @@ -131,9 +127,6 @@ impl Parse for RuntimeDeclaration { #[derive(Debug)] pub struct WhereSection { pub span: Span, - pub block: syn::TypePath, - pub node_block: syn::TypePath, - pub unchecked_extrinsic: syn::TypePath, } impl Parse for WhereSection { @@ -152,10 +145,9 @@ impl Parse for WhereSection { } input.parse::()?; } - let block = remove_kind(input, WhereKind::Block, &mut definitions)?.value; - let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; - let unchecked_extrinsic = - remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; + remove_kind(input, WhereKind::Block, &mut definitions)?; + remove_kind(input, WhereKind::NodeBlock, &mut definitions)?; + remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?; if let Some(WhereDefinition { ref kind_span, ref kind, @@ -168,12 +160,7 @@ impl Parse for WhereSection { ); return Err(Error::new(*kind_span, msg)); } - Ok(Self { - span: input.span(), - block, - node_block, - unchecked_extrinsic, - }) + Ok(Self { span: input.span() }) } } @@ -188,7 +175,6 @@ pub enum WhereKind { pub struct WhereDefinition { pub kind_span: Span, pub kind: WhereKind, - pub value: syn::TypePath, } impl Parse for WhereDefinition { @@ -210,14 +196,10 @@ impl Parse for WhereDefinition { return Err(lookahead.error()); }; - Ok(Self { - kind_span, - kind, - value: { - let _: Token![=] = input.parse()?; - input.parse()? - }, - }) + let _: Token![=] = input.parse()?; + let _: syn::TypePath = input.parse()?; + + Ok(Self { kind_span, kind }) } } @@ -646,6 +628,8 @@ pub struct Pallet { pub pallet_parts: Vec, /// Expressions specified inside of a #[cfg] attribute. pub cfg_pattern: Vec, + /// The doc literals + pub docs: Vec, } impl Pallet { @@ -827,6 +811,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; diff --git a/support/procedural-fork/src/dynamic_params.rs b/support/procedural-fork/src/dynamic_params.rs index 70a18bf34..e1f9f626c 100644 --- a/support/procedural-fork/src/dynamic_params.rs +++ b/support/procedural-fork/src/dynamic_params.rs @@ -94,7 +94,7 @@ impl ToTokens for DynamicParamModAttr { let mut quoted_enum = quote! {}; for m in self.inner_mods() { let aggregate_name = - syn::Ident::new(&m.ident.to_string().to_class_case(), m.ident.span()); + syn::Ident::new(&m.ident.to_string().to_pascal_case(), m.ident.span()); let mod_name = &m.ident; let mut attrs = m.attrs.clone(); @@ -245,7 +245,7 @@ impl ToTokens for DynamicPalletParamAttr { ); let aggregate_name = syn::Ident::new( - ¶ms_mod.ident.to_string().to_class_case(), + ¶ms_mod.ident.to_string().to_pascal_case(), params_mod.ident.span(), ); let (mod_name, vis) = (¶ms_mod.ident, ¶ms_mod.vis); diff --git a/support/procedural-fork/src/pallet/expand/constants.rs b/support/procedural-fork/src/pallet/expand/constants.rs index 5153ccf49..19862a8a6 100644 --- a/support/procedural-fork/src/pallet/expand/constants.rs +++ b/support/procedural-fork/src/pallet/expand/constants.rs @@ -30,8 +30,7 @@ struct ConstDef { pub metadata_name: Option, } -/// -/// * Impl fn module_constant_metadata for pallet. +/// Implement the `pallet_constants_metadata` function for the pallet. pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { let frame_support = &def.frame_support; let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); @@ -97,7 +96,7 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { name: #ident_str, ty: #frame_support::__private::scale_info::meta_type::<#const_type>(), value: { #default_byte_impl }, - docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ], + docs: #frame_support::__private::vec![ #( #doc ),* ], } }) }); @@ -107,9 +106,9 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub fn pallet_constants_metadata() - -> #frame_support::__private::sp_std::vec::Vec<#frame_support::__private::metadata_ir::PalletConstantMetadataIR> + -> #frame_support::__private::Vec<#frame_support::__private::metadata_ir::PalletConstantMetadataIR> { - #frame_support::__private::sp_std::vec![ #( #consts ),* ] + #frame_support::__private::vec![ #( #consts ),* ] } } ) diff --git a/support/procedural-fork/src/pallet/expand/documentation.rs b/support/procedural-fork/src/pallet/expand/documentation.rs index adc4f7ce9..62b2e8b8b 100644 --- a/support/procedural-fork/src/pallet/expand/documentation.rs +++ b/support/procedural-fork/src/pallet/expand/documentation.rs @@ -166,9 +166,9 @@ pub fn expand_documentation(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub fn pallet_documentation_metadata() - -> #frame_support::__private::sp_std::vec::Vec<&'static str> + -> #frame_support::__private::Vec<&'static str> { - #frame_support::__private::sp_std::vec![ #( #docs ),* ] + #frame_support::__private::vec![ #( #docs ),* ] } } ) diff --git a/support/procedural-fork/src/pallet/expand/error.rs b/support/procedural-fork/src/pallet/expand/error.rs index e2c3f680c..1b76034ef 100644 --- a/support/procedural-fork/src/pallet/expand/error.rs +++ b/support/procedural-fork/src/pallet/expand/error.rs @@ -66,28 +66,30 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] #[codec(skip)] __Ignore( - #frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen)>, + core::marker::PhantomData<(#type_use_gen)>, #frame_support::Never, ) ); - let as_str_matches = error.variants.iter().map( - |VariantDef { ident: variant, field: field_ty, docs: _, cfg_attrs }| { - let variant_str = variant.to_string(); - let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); - match field_ty { - Some(VariantField { is_named: true }) => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) - }, - Some(VariantField { is_named: false }) => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) - }, - None => { - quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) - }, - } - }, - ); + let as_str_matches = + error + .variants + .iter() + .map(|VariantDef { ident: variant, field: field_ty, cfg_attrs }| { + let variant_str = variant.to_string(); + let cfg_attrs = cfg_attrs.iter().map(|attr| attr.to_token_stream()); + match field_ty { + Some(VariantField { is_named: true }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant { .. } => #variant_str,) + }, + Some(VariantField { is_named: false }) => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant(..) => #variant_str,) + }, + None => { + quote::quote_spanned!(error.attr_span => #( #cfg_attrs )* Self::#variant => #variant_str,) + }, + } + }); let error_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; @@ -126,11 +128,11 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { } quote::quote_spanned!(error.attr_span => - impl<#type_impl_gen> #frame_support::__private::sp_std::fmt::Debug for #error_ident<#type_use_gen> + impl<#type_impl_gen> core::fmt::Debug for #error_ident<#type_use_gen> #config_where_clause { - fn fmt(&self, f: &mut #frame_support::__private::sp_std::fmt::Formatter<'_>) - -> #frame_support::__private::sp_std::fmt::Result + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) + -> core::fmt::Result { f.write_str(self.as_str()) } diff --git a/support/procedural-fork/src/pallet/expand/hooks.rs b/support/procedural-fork/src/pallet/expand/hooks.rs index 6967f4c08..8ff0e8f30 100644 --- a/support/procedural-fork/src/pallet/expand/hooks.rs +++ b/support/procedural-fork/src/pallet/expand/hooks.rs @@ -258,24 +258,24 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::on_runtime_upgrade() } - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<#frame_support::__private::sp_std::vec::Vec, #frame_support::sp_runtime::TryRuntimeError> { - < - Self - as - #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> - >::pre_upgrade() - } + #frame_support::try_runtime_enabled! { + fn pre_upgrade() -> Result<#frame_support::__private::Vec, #frame_support::sp_runtime::TryRuntimeError> { + < + Self + as + #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> + >::pre_upgrade() + } - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: #frame_support::__private::sp_std::vec::Vec) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { - #post_storage_version_check + fn post_upgrade(state: #frame_support::__private::Vec) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { + #post_storage_version_check - < - Self - as - #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> - >::post_upgrade(state) + < + Self + as + #frame_support::traits::Hooks<#frame_system::pallet_prelude::BlockNumberFor::> + >::post_upgrade(state) + } } } @@ -310,34 +310,35 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } } - #[cfg(feature = "try-runtime")] - impl<#type_impl_gen> - #frame_support::traits::TryState<#frame_system::pallet_prelude::BlockNumberFor::> - for #pallet_ident<#type_use_gen> #where_clause - { - fn try_state( - n: #frame_system::pallet_prelude::BlockNumberFor::, - _s: #frame_support::traits::TryStateSelect - ) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { - #frame_support::__private::log::info!( - target: #frame_support::LOG_TARGET, - "🩺 Running {:?} try-state checks", - #pallet_name, - ); - < - Self as #frame_support::traits::Hooks< - #frame_system::pallet_prelude::BlockNumberFor:: - > - >::try_state(n).map_err(|err| { - #frame_support::__private::log::error!( + #frame_support::try_runtime_enabled! { + impl<#type_impl_gen> + #frame_support::traits::TryState<#frame_system::pallet_prelude::BlockNumberFor::> + for #pallet_ident<#type_use_gen> #where_clause + { + fn try_state( + n: #frame_system::pallet_prelude::BlockNumberFor::, + _s: #frame_support::traits::TryStateSelect + ) -> Result<(), #frame_support::sp_runtime::TryRuntimeError> { + #frame_support::__private::log::info!( target: #frame_support::LOG_TARGET, - "❌ {:?} try_state checks failed: {:?}", + "🩺 Running {:?} try-state checks", #pallet_name, - err ); + < + Self as #frame_support::traits::Hooks< + #frame_system::pallet_prelude::BlockNumberFor:: + > + >::try_state(n).map_err(|err| { + #frame_support::__private::log::error!( + target: #frame_support::LOG_TARGET, + "❌ {:?} try_state checks failed: {:?}", + #pallet_name, + err + ); - err - }) + err + }) + } } } ) diff --git a/support/procedural-fork/src/pallet/expand/pallet_struct.rs b/support/procedural-fork/src/pallet/expand/pallet_struct.rs index c5def65ed..64e5d533c 100644 --- a/support/procedural-fork/src/pallet/expand/pallet_struct.rs +++ b/support/procedural-fork/src/pallet/expand/pallet_struct.rs @@ -54,7 +54,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if let Some(field) = pallet_item.fields.iter_mut().next() { if field.ty == syn::parse_quote!(_) { field.ty = syn::parse_quote!( - #frame_support::__private::sp_std::marker::PhantomData<(#type_use_gen)> + core::marker::PhantomData<(#type_use_gen)> ); } } @@ -148,10 +148,10 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #storages_where_clauses { fn storage_info() - -> #frame_support::__private::sp_std::vec::Vec<#frame_support::traits::StorageInfo> + -> #frame_support::__private::Vec<#frame_support::traits::StorageInfo> { #[allow(unused_mut)] - let mut res = #frame_support::__private::sp_std::vec![]; + let mut res = #frame_support::__private::vec![]; #( #(#storage_cfg_attrs)* @@ -191,8 +191,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { let whitelisted_storage_keys_impl = quote::quote![ use #frame_support::traits::{StorageInfoTrait, TrackedStorageKey, WhitelistedStorageKeys}; impl<#type_impl_gen> WhitelistedStorageKeys for #pallet_ident<#type_use_gen> #storages_where_clauses { - fn whitelisted_storage_keys() -> #frame_support::__private::sp_std::vec::Vec { - use #frame_support::__private::sp_std::vec; + fn whitelisted_storage_keys() -> #frame_support::__private::Vec { + use #frame_support::__private::vec; vec![#( TrackedStorageKey::new(#whitelisted_storage_idents::<#type_use_gen>::hashed_key().to_vec()) ),*] @@ -284,7 +284,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #config_where_clause { fn count() -> usize { 1 } - fn infos() -> #frame_support::__private::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> { + fn infos() -> #frame_support::__private::Vec<#frame_support::traits::PalletInfoData> { use #frame_support::traits::PalletInfoAccess; let item = #frame_support::traits::PalletInfoData { index: Self::index(), @@ -292,7 +292,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - #frame_support::__private::sp_std::vec![item] + #frame_support::__private::vec![item] } } diff --git a/support/procedural-fork/src/pallet/expand/storage.rs b/support/procedural-fork/src/pallet/expand/storage.rs index b77e9846b..95b046670 100644 --- a/support/procedural-fork/src/pallet/expand/storage.rs +++ b/support/procedural-fork/src/pallet/expand/storage.rs @@ -454,7 +454,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #(#cfg_attrs)* { <#full_ident as #frame_support::storage::StorageEntryMetadataBuilder>::build_metadata( - #frame_support::__private::sp_std::vec![ + #frame_support::__private::vec![ #( #docs, )* ], &mut entries, @@ -886,39 +886,40 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { storage_names.sort_by_cached_key(|ident| ident.to_string()); quote::quote!( - #[cfg(feature = "try-runtime")] - impl<#type_impl_gen> #frame_support::traits::TryDecodeEntireStorage - for #pallet_ident<#type_use_gen> #completed_where_clause - { - fn try_decode_entire_state() -> Result> { - let pallet_name = <::PalletInfo as frame_support::traits::PalletInfo> - ::name::<#pallet_ident<#type_use_gen>>() - .expect("Every active pallet has a name in the runtime; qed"); - - #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode pallet: {pallet_name}"); - - // NOTE: for now, we have to exclude storage items that are feature gated. - let mut errors = #frame_support::__private::sp_std::vec::Vec::new(); - let mut decoded = 0usize; - - #( - #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode storage: \ - {pallet_name}::{}", stringify!(#storage_names)); + #frame_support::try_runtime_enabled! { + impl<#type_impl_gen> #frame_support::traits::TryDecodeEntireStorage + for #pallet_ident<#type_use_gen> #completed_where_clause + { + fn try_decode_entire_state() -> Result> { + let pallet_name = <::PalletInfo as #frame_support::traits::PalletInfo> + ::name::<#pallet_ident<#type_use_gen>>() + .expect("Every active pallet has a name in the runtime; qed"); + + #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode pallet: {pallet_name}"); + + // NOTE: for now, we have to exclude storage items that are feature gated. + let mut errors = #frame_support::__private::Vec::new(); + let mut decoded = 0usize; + + #( + #frame_support::__private::log::debug!(target: "runtime::try-decode-state", "trying to decode storage: \ + {pallet_name}::{}", stringify!(#storage_names)); + + match <#storage_names as #frame_support::traits::TryDecodeEntireStorage>::try_decode_entire_state() { + Ok(count) => { + decoded += count; + }, + Err(err) => { + errors.extend(err); + }, + } + )* - match <#storage_names as #frame_support::traits::TryDecodeEntireStorage>::try_decode_entire_state() { - Ok(count) => { - decoded += count; - }, - Err(err) => { - errors.extend(err); - }, + if errors.is_empty() { + Ok(decoded) + } else { + Err(errors) } - )* - - if errors.is_empty() { - Ok(decoded) - } else { - Err(errors) } } } @@ -939,7 +940,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .expect("No name found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`."), entries: { #[allow(unused_mut)] - let mut entries = #frame_support::__private::sp_std::vec![]; + let mut entries = #frame_support::__private::vec![]; #( #entries_builder )* entries }, diff --git a/support/procedural-fork/src/pallet/expand/tasks.rs b/support/procedural-fork/src/pallet/expand/tasks.rs index 8c4dfb54f..8a0bd2252 100644 --- a/support/procedural-fork/src/pallet/expand/tasks.rs +++ b/support/procedural-fork/src/pallet/expand/tasks.rs @@ -163,7 +163,6 @@ impl ToTokens for TasksDef { .map(|task| &task.arg_names) .collect::>(); - let sp_std = quote!(#scrate::__private::sp_std); let impl_generics = &self.item_impl.generics; tokens.extend(quote! { impl #impl_generics #enum_use @@ -173,13 +172,13 @@ impl ToTokens for TasksDef { impl #impl_generics #scrate::traits::Task for #enum_use { - type Enumeration = #sp_std::vec::IntoIter<#enum_use>; + type Enumeration = #scrate::__private::IntoIter<#enum_use>; fn iter() -> Self::Enumeration { - let mut all_tasks = #sp_std::vec![]; + let mut all_tasks = #scrate::__private::vec![]; #(all_tasks .extend(#task_iters.map(|(#(#task_arg_names),*)| #enum_ident::#task_fn_idents { #(#task_arg_names: #task_arg_names.clone()),* }) - .collect::<#sp_std::vec::Vec<_>>()); + .collect::<#scrate::__private::Vec<_>>()); )* all_tasks.into_iter() } diff --git a/support/procedural-fork/src/pallet/expand/tt_default_parts.rs b/support/procedural-fork/src/pallet/expand/tt_default_parts.rs index 57b78339a..8e7dc39d8 100644 --- a/support/procedural-fork/src/pallet/expand/tt_default_parts.rs +++ b/support/procedural-fork/src/pallet/expand/tt_default_parts.rs @@ -208,9 +208,9 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #default_parts_unique_id_v2 { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support)*::__private::tt_return! { + $my_tt_return! { $caller tokens = [{ + Pallet #call_part_v2 #storage_part_v2 #event_part_v2 #error_part_v2 #origin_part_v2 #config_part_v2 diff --git a/support/procedural-fork/src/pallet/parse/composite.rs b/support/procedural-fork/src/pallet/parse/composite.rs index 38da1f205..239b4fd4b 100644 --- a/support/procedural-fork/src/pallet/parse/composite.rs +++ b/support/procedural-fork/src/pallet/parse/composite.rs @@ -87,8 +87,6 @@ pub mod keyword { } pub struct CompositeDef { - /// The index of the CompositeDef item in the pallet module. - pub index: usize, /// The composite keyword used (contains span). pub composite_keyword: keyword::CompositeKeyword, /// Name of the associated type. @@ -104,7 +102,6 @@ pub struct CompositeDef { impl CompositeDef { pub fn try_from( attr_span: proc_macro2::Span, - index: usize, scrate: &syn::Path, item: &mut syn::Item, ) -> syn::Result { @@ -186,7 +183,6 @@ impl CompositeDef { syn::parse2::(item.ident.to_token_stream())?; Ok(CompositeDef { - index, composite_keyword, attr_span, generics: item.generics.clone(), diff --git a/support/procedural-fork/src/pallet/parse/config.rs b/support/procedural-fork/src/pallet/parse/config.rs index cde565245..95b4143b6 100644 --- a/support/procedural-fork/src/pallet/parse/config.rs +++ b/support/procedural-fork/src/pallet/parse/config.rs @@ -62,8 +62,6 @@ pub struct ConfigDef { pub has_event_type: bool, /// The where clause on trait definition but modified so `Self` is `T`. pub where_clause: Option, - /// The span of the pallet::config attribute. - pub attr_span: proc_macro2::Span, /// Whether a default sub-trait should be generated. /// /// Contains default sub-trait items (instantiated by `#[pallet::config(with_default)]`). @@ -97,30 +95,32 @@ impl TryFrom<&syn::TraitItemType> for ConstMetadataDef { let bound = trait_ty .bounds .iter() - .find_map(|b| { - if let syn::TypeParamBound::Trait(tb) = b { - tb.path - .segments - .last() - .and_then(|s| if s.ident == "Get" { Some(s) } else { None }) - } else { - None - } + .find_map(|param_bound| { + let syn::TypeParamBound::Trait(trait_bound) = param_bound else { + return None; + }; + + trait_bound + .path + .segments + .last() + .and_then(|s| (s.ident == "Get").then(|| s)) }) .ok_or_else(|| err(trait_ty.span(), "`Get` trait bound not found"))?; - let type_arg = if let syn::PathArguments::AngleBracketed(ref ab) = bound.arguments { - if ab.args.len() == 1 { - if let syn::GenericArgument::Type(ref ty) = ab.args[0] { - Ok(ty) - } else { - Err(err(ab.args[0].span(), "Expected a type argument")) - } - } else { - Err(err(bound.span(), "Expected a single type argument")) - } - } else { - Err(err(bound.span(), "Expected trait generic args")) - }?; + + let syn::PathArguments::AngleBracketed(ref ab) = bound.arguments else { + return Err(err(bound.span(), "Expected trait generic args")); + }; + + // Only one type argument is expected. + if ab.args.len() != 1 { + return Err(err(bound.span(), "Expected a single type argument")); + } + + let syn::GenericArgument::Type(ref type_arg) = ab.args[0] else { + return Err(err(ab.args[0].span(), "Expected a type argument")); + }; + let type_ = syn::parse2::(replace_self_by_t(type_arg.to_token_stream())) .expect("Internal error: replacing `Self` by `T` should result in valid type"); @@ -229,59 +229,62 @@ fn check_event_type( trait_item: &syn::TraitItem, trait_has_instance: bool, ) -> syn::Result { - if let syn::TraitItem::Type(type_) = trait_item { - if type_.ident == "RuntimeEvent" { - // Check event has no generics - if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ - no generics nor where_clause"; - return Err(syn::Error::new(trait_item.span(), msg)); - } + let syn::TraitItem::Type(type_) = trait_item else { + return Ok(false); + }; - // Check bound contains IsType and From - let has_is_type_bound = type_.bounds.iter().any(|s| { - syn::parse2::(s.to_token_stream()) - .map_or(false, |b| has_expected_system_config(b.0, frame_system)) - }); + if type_.ident != "RuntimeEvent" { + return Ok(false); + } - if !has_is_type_bound { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ - bound: `IsType<::RuntimeEvent>`".to_string(); - return Err(syn::Error::new(type_.span(), msg)); - } + // Check event has no generics + if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ + no generics nor where_clause"; + return Err(syn::Error::new(trait_item.span(), msg)); + } - let from_event_bound = type_ - .bounds - .iter() - .find_map(|s| syn::parse2::(s.to_token_stream()).ok()); + // Check bound contains IsType and From + let has_is_type_bound = type_.bounds.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| has_expected_system_config(b.0, frame_system)) + }); + + if !has_is_type_bound { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + bound: `IsType<::RuntimeEvent>`" + .to_string(); + return Err(syn::Error::new(type_.span(), msg)); + } - let from_event_bound = if let Some(b) = from_event_bound { - b - } else { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ - bound: `From` or `From>` or `From>`"; - return Err(syn::Error::new(type_.span(), msg)); - }; + let from_event_bound = type_ + .bounds + .iter() + .find_map(|s| syn::parse2::(s.to_token_stream()).ok()); - if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) - { - let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ + let Some(from_event_bound) = from_event_bound else { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + bound: `From` or `From>` or `From>`"; + return Err(syn::Error::new(type_.span(), msg)); + }; + + if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) { + let msg = + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; - return Err(syn::Error::new(type_.span(), msg)); - } - - Ok(true) - } else { - Ok(false) - } - } else { - Ok(false) + return Err(syn::Error::new(type_.span(), msg)); } + + Ok(true) } /// Check that the path to `frame_system::Config` is valid, this is that the path is just -/// `frame_system::Config` or when using the `frame` crate it is `frame::xyz::frame_system::Config`. +/// `frame_system::Config` or when using the `frame` crate it is +/// `polkadot_sdk_frame::xyz::frame_system::Config`. fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool { // Check if `frame_system` is actually 'frame_system'. if path.segments.iter().all(|s| s.ident != "frame_system") { @@ -305,7 +308,7 @@ fn has_expected_system_config(path: syn::Path, frame_system: &syn::Path) -> bool syn::parse2::(quote::quote!(frame_system)).expect("is a valid path; qed") } (_, _) => - // They are either both `frame_system` or both `frame::xyz::frame_system`. + // They are either both `frame_system` or both `polkadot_sdk_frame::xyz::frame_system`. { frame_system.clone() } @@ -351,14 +354,11 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS impl ConfigDef { pub fn try_from( frame_system: &syn::Path, - attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, enable_default: bool, ) -> syn::Result { - let item = if let syn::Item::Trait(item) = item { - item - } else { + let syn::Item::Trait(item) = item else { let msg = "Invalid pallet::config, expected trait definition"; return Err(syn::Error::new(item.span(), msg)); }; @@ -512,7 +512,6 @@ impl ConfigDef { consts_metadata, has_event_type, where_clause, - attr_span, default_sub_trait, }) } @@ -539,14 +538,29 @@ mod tests { #[test] fn has_expected_system_config_works_with_frame() { + let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); + + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); + assert!(has_expected_system_config(path.clone(), &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); - let path = syn::parse2::(quote::quote!(frame_system::Config)).unwrap(); assert!(has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_works_with_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); + let path = syn::parse2::(quote::quote!( + polkadot_sdk_frame::deps::frame_system::Config + )) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); let path = @@ -556,6 +570,13 @@ mod tests { #[test] fn has_expected_system_config_works_with_other_frame_full_path() { + let frame_system = + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system)).unwrap(); + let path = + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system::Config)) + .unwrap(); + assert!(has_expected_system_config(path, &frame_system)); + let frame_system = syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); let path = @@ -566,26 +587,32 @@ mod tests { #[test] fn has_expected_system_config_does_not_works_with_mixed_frame_full_path() { let frame_system = - syn::parse2::(quote::quote!(frame::xyz::frame_system)).unwrap(); - let path = - syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system)).unwrap(); + let path = syn::parse2::(quote::quote!( + polkadot_sdk_frame::deps::frame_system::Config + )) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_does_not_works_with_other_mixed_frame_full_path() { let frame_system = - syn::parse2::(quote::quote!(frame::deps::frame_system)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::deps::frame_system)) + .unwrap(); let path = - syn::parse2::(quote::quote!(frame::xyz::frame_system::Config)).unwrap(); + syn::parse2::(quote::quote!(polkadot_sdk_frame::xyz::frame_system::Config)) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } #[test] fn has_expected_system_config_does_not_work_with_frame_full_path_if_not_frame_crate() { let frame_system = syn::parse2::(quote::quote!(frame_system)).unwrap(); - let path = - syn::parse2::(quote::quote!(frame::deps::frame_system::Config)).unwrap(); + let path = syn::parse2::(quote::quote!( + polkadot_sdk_frame::deps::frame_system::Config + )) + .unwrap(); assert!(!has_expected_system_config(path, &frame_system)); } diff --git a/support/procedural-fork/src/pallet/parse/error.rs b/support/procedural-fork/src/pallet/parse/error.rs index e93e2113f..7aab5732b 100644 --- a/support/procedural-fork/src/pallet/parse/error.rs +++ b/support/procedural-fork/src/pallet/parse/error.rs @@ -16,7 +16,6 @@ // limitations under the License. use super::helper; -use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use syn::{spanned::Spanned, Fields}; @@ -37,8 +36,6 @@ pub struct VariantDef { pub ident: syn::Ident, /// The variant field, if any. pub field: Option, - /// The variant doc literals. - pub docs: Vec, /// The `cfg` attributes. pub cfg_attrs: Vec, } @@ -112,7 +109,6 @@ impl ErrorDef { Ok(VariantDef { ident: variant.ident.clone(), field: field_ty, - docs: get_doc_literals(&variant.attrs), cfg_attrs, }) }) diff --git a/support/procedural-fork/src/pallet/parse/extra_constants.rs b/support/procedural-fork/src/pallet/parse/extra_constants.rs index 38acea21a..431fcf677 100644 --- a/support/procedural-fork/src/pallet/parse/extra_constants.rs +++ b/support/procedural-fork/src/pallet/parse/extra_constants.rs @@ -37,8 +37,6 @@ pub struct ExtraConstantsDef { pub where_clause: Option, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, - /// The index of call item in pallet module. - pub index: usize, /// The extra constant defined. pub extra_constants: Vec, } @@ -79,7 +77,7 @@ impl syn::parse::Parse for ExtraConstAttr { } impl ExtraConstantsDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -159,7 +157,6 @@ impl ExtraConstantsDef { } Ok(Self { - index, instances, where_clause: item.generics.where_clause.clone(), extra_constants, diff --git a/support/procedural-fork/src/pallet/parse/genesis_build.rs b/support/procedural-fork/src/pallet/parse/genesis_build.rs index 670d4d5ef..936c929af 100644 --- a/support/procedural-fork/src/pallet/parse/genesis_build.rs +++ b/support/procedural-fork/src/pallet/parse/genesis_build.rs @@ -20,8 +20,6 @@ use syn::spanned::Spanned; /// Definition for pallet genesis build implementation. pub struct GenesisBuildDef { - /// The index of item in pallet module. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Option>, /// The where_clause used. @@ -31,11 +29,7 @@ pub struct GenesisBuildDef { } impl GenesisBuildDef { - pub fn try_from( - attr_span: proc_macro2::Span, - index: usize, - item: &mut syn::Item, - ) -> syn::Result { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -58,7 +52,6 @@ impl GenesisBuildDef { Ok(Self { attr_span, - index, instances, where_clause: item.generics.where_clause.clone(), }) diff --git a/support/procedural-fork/src/pallet/parse/helper.rs b/support/procedural-fork/src/pallet/parse/helper.rs index f58c8d81c..1105046c2 100644 --- a/support/procedural-fork/src/pallet/parse/helper.rs +++ b/support/procedural-fork/src/pallet/parse/helper.rs @@ -55,23 +55,21 @@ pub(crate) fn take_first_item_pallet_attr( where Attr: syn::parse::Parse, { - let attrs = if let Some(attrs) = item.mut_item_attrs() { - attrs - } else { + let Some(attrs) = item.mut_item_attrs() else { return Ok(None); }; - if let Some(index) = attrs.iter().position(|attr| { + let Some(index) = attrs.iter().position(|attr| { attr.path() .segments .first() .map_or(false, |segment| segment.ident == "pallet") - }) { - let pallet_attr = attrs.remove(index); - Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) - } else { - Ok(None) - } + }) else { + return Ok(None); + }; + + let pallet_attr = attrs.remove(index); + Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) } /// Take all the pallet attributes (e.g. attribute like `#[pallet..]`) and decode them to `Attr` diff --git a/support/procedural-fork/src/pallet/parse/hooks.rs b/support/procedural-fork/src/pallet/parse/hooks.rs index 1cf5c72cc..bca1a3383 100644 --- a/support/procedural-fork/src/pallet/parse/hooks.rs +++ b/support/procedural-fork/src/pallet/parse/hooks.rs @@ -20,8 +20,6 @@ use syn::spanned::Spanned; /// Implementation of the pallet hooks. pub struct HooksDef { - /// The index of item in pallet. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, /// The where_clause used. @@ -33,11 +31,7 @@ pub struct HooksDef { } impl HooksDef { - pub fn try_from( - attr_span: proc_macro2::Span, - index: usize, - item: &mut syn::Item, - ) -> syn::Result { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -77,7 +71,6 @@ impl HooksDef { Ok(Self { attr_span, - index, instances, has_runtime_upgrade, where_clause: item.generics.where_clause.clone(), diff --git a/support/procedural-fork/src/pallet/parse/inherent.rs b/support/procedural-fork/src/pallet/parse/inherent.rs index 4eb04e914..911de2ffe 100644 --- a/support/procedural-fork/src/pallet/parse/inherent.rs +++ b/support/procedural-fork/src/pallet/parse/inherent.rs @@ -20,14 +20,12 @@ use syn::spanned::Spanned; /// The definition of the pallet inherent implementation. pub struct InherentDef { - /// The index of inherent item in pallet module. - pub index: usize, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, } impl InherentDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -55,6 +53,6 @@ impl InherentDef { helper::check_impl_gen(&item.generics, item.impl_token.span())?, ]; - Ok(InherentDef { index, instances }) + Ok(InherentDef { instances }) } } diff --git a/support/procedural-fork/src/pallet/parse/mod.rs b/support/procedural-fork/src/pallet/parse/mod.rs index 57c252473..69f921733 100644 --- a/support/procedural-fork/src/pallet/parse/mod.rs +++ b/support/procedural-fork/src/pallet/parse/mod.rs @@ -109,10 +109,9 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(span, with_default)) if config.is_none() => + Some(PalletAttr::Config(_, with_default)) if config.is_none() => config = Some(config::ConfigDef::try_from( &frame_system, - span, index, item, with_default, @@ -122,7 +121,7 @@ impl Def { pallet_struct = Some(p); }, Some(PalletAttr::Hooks(span)) if hooks.is_none() => { - let m = hooks::HooksDef::try_from(span, index, item)?; + let m = hooks::HooksDef::try_from(span, item)?; hooks = Some(m); }, Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => @@ -162,27 +161,27 @@ impl Def { genesis_config = Some(g); }, Some(PalletAttr::GenesisBuild(span)) if genesis_build.is_none() => { - let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; + let g = genesis_build::GenesisBuildDef::try_from(span, item)?; genesis_build = Some(g); }, Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => - origin = Some(origin::OriginDef::try_from(index, item)?), + origin = Some(origin::OriginDef::try_from(item)?), Some(PalletAttr::Inherent(_)) if inherent.is_none() => - inherent = Some(inherent::InherentDef::try_from(index, item)?), + inherent = Some(inherent::InherentDef::try_from(item)?), Some(PalletAttr::Storage(span)) => storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { - let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; + let v = validate_unsigned::ValidateUnsignedDef::try_from(item)?; validate_unsigned = Some(v); }, Some(PalletAttr::TypeValue(span)) => type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), Some(PalletAttr::ExtraConstants(_)) => extra_constants = - Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), + Some(extra_constants::ExtraConstantsDef::try_from(item)?), Some(PalletAttr::Composite(span)) => { let composite = - composite::CompositeDef::try_from(span, index, &frame_support, item)?; + composite::CompositeDef::try_from(span, &frame_support, item)?; if composites.iter().any(|def| { match (&def.composite_keyword, &composite.composite_keyword) { ( @@ -777,7 +776,6 @@ impl syn::parse::Parse for PalletAttr { #[derive(Clone)] pub struct InheritedCallWeightAttr { pub typename: syn::Type, - pub span: proc_macro2::Span, } impl syn::parse::Parse for InheritedCallWeightAttr { @@ -801,7 +799,6 @@ impl syn::parse::Parse for InheritedCallWeightAttr { Ok(Self { typename: buffer.parse()?, - span: input.span(), }) } } diff --git a/support/procedural-fork/src/pallet/parse/origin.rs b/support/procedural-fork/src/pallet/parse/origin.rs index 2dd84c40d..8232719d6 100644 --- a/support/procedural-fork/src/pallet/parse/origin.rs +++ b/support/procedural-fork/src/pallet/parse/origin.rs @@ -25,16 +25,13 @@ use syn::spanned::Spanned; /// * `struct Origin` /// * `enum Origin` pub struct OriginDef { - /// The index of item in pallet module. - pub index: usize, - pub has_instance: bool, pub is_generic: bool, /// A set of usage of instance, must be check for consistency with trait. pub instances: Vec, } impl OriginDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item_span = item.span(); let (vis, ident, generics) = match &item { syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), @@ -46,7 +43,6 @@ impl OriginDef { } }; - let has_instance = generics.params.len() == 2; let is_generic = !generics.params.is_empty(); let mut instances = vec![]; @@ -71,8 +67,6 @@ impl OriginDef { } Ok(OriginDef { - index, - has_instance, is_generic, instances, }) diff --git a/support/procedural-fork/src/pallet/parse/storage.rs b/support/procedural-fork/src/pallet/parse/storage.rs index 64a5e685b..811832427 100644 --- a/support/procedural-fork/src/pallet/parse/storage.rs +++ b/support/procedural-fork/src/pallet/parse/storage.rs @@ -718,11 +718,11 @@ fn process_generics( "CountedStorageNMap" => StorageKind::CountedNMap, found => { let msg = format!( - "Invalid pallet::storage, expected ident: `StorageValue` or \ + "Invalid pallet::storage, expected ident: `StorageValue` or \ `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` or `CountedStorageNMap` \ in order to expand metadata, found `{}`.", - found, - ); + found, + ); return Err(syn::Error::new(segment.ident.span(), msg)); } }; diff --git a/support/procedural-fork/src/pallet/parse/tasks.rs b/support/procedural-fork/src/pallet/parse/tasks.rs index 50633fbd0..2a8d14826 100644 --- a/support/procedural-fork/src/pallet/parse/tasks.rs +++ b/support/procedural-fork/src/pallet/parse/tasks.rs @@ -34,8 +34,8 @@ use syn::{ parse2, spanned::Spanned, token::{Bracket, Paren, PathSep, Pound}, - Attribute, Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, - PathArguments, Result, TypePath, + Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, PathArguments, + Result, TypePath, }; pub mod keywords { @@ -192,7 +192,6 @@ pub struct TaskDef { pub condition_attr: TaskConditionAttr, pub list_attr: TaskListAttr, pub weight_attr: TaskWeightAttr, - pub normal_attrs: Vec, pub item: ImplItemFn, pub arg_names: Vec, } @@ -202,7 +201,7 @@ impl syn::parse::Parse for TaskDef { let item = input.parse::()?; // we only want to activate TaskAttrType parsing errors for tasks-related attributes, // so we filter them here - let (task_attrs, normal_attrs) = partition_task_attrs(&item); + let task_attrs = partition_task_attrs(&item).0; let task_attrs: Vec = task_attrs .into_iter() @@ -319,7 +318,6 @@ impl syn::parse::Parse for TaskDef { condition_attr, list_attr, weight_attr, - normal_attrs, item, arg_names, }) diff --git a/support/procedural-fork/src/pallet/parse/tests/tasks.rs b/support/procedural-fork/src/pallet/parse/tests/tasks.rs index 6cd4d13bb..0097ed047 100644 --- a/support/procedural-fork/src/pallet/parse/tests/tasks.rs +++ b/support/procedural-fork/src/pallet/parse/tests/tasks.rs @@ -124,10 +124,10 @@ fn test_parse_pallet_manual_tasks_impl_without_manual_tasks_enum() { where T: TypeInfo, { - type Enumeration = sp_std::vec::IntoIter>; + type Enumeration = alloc::vec::IntoIter>; fn iter() -> Self::Enumeration { - sp_std::vec![Task::increment, Task::decrement].into_iter() + alloc::vec![Task::increment, Task::decrement].into_iter() } } diff --git a/support/procedural-fork/src/pallet/parse/type_value.rs b/support/procedural-fork/src/pallet/parse/type_value.rs index d5c85248f..1054fd74c 100644 --- a/support/procedural-fork/src/pallet/parse/type_value.rs +++ b/support/procedural-fork/src/pallet/parse/type_value.rs @@ -28,12 +28,8 @@ pub struct TypeValueDef { pub ident: syn::Ident, /// The type return by Get. pub type_: Box, - /// The block returning the value to get - pub block: Box, /// If type value is generic over `T` (or `T` and `I` for instantiable pallet) pub is_generic: bool, - /// A set of usage of instance, must be check for consistency with config. - pub instances: Vec, /// The where clause of the function. pub where_clause: Option, /// The span of the pallet::type_value attribute. @@ -90,7 +86,6 @@ impl TypeValueDef { let vis = item.vis.clone(); let ident = item.sig.ident.clone(); - let block = item.block.clone(); let type_ = match item.sig.output.clone() { syn::ReturnType::Type(_, type_) => type_, syn::ReturnType::Default => { @@ -99,10 +94,7 @@ impl TypeValueDef { } }; - let mut instances = vec![]; - if let Some(usage) = helper::check_type_value_gen(&item.sig.generics, item.sig.span())? { - instances.push(usage); - } + helper::check_type_value_gen(&item.sig.generics, item.sig.span())?; let is_generic = item.sig.generics.type_params().count() > 0; let where_clause = item.sig.generics.where_clause.clone(); @@ -113,9 +105,7 @@ impl TypeValueDef { is_generic, vis, ident, - block, type_, - instances, where_clause, docs, }) diff --git a/support/procedural-fork/src/pallet/parse/validate_unsigned.rs b/support/procedural-fork/src/pallet/parse/validate_unsigned.rs index 6e5109a74..3fcbe09e8 100644 --- a/support/procedural-fork/src/pallet/parse/validate_unsigned.rs +++ b/support/procedural-fork/src/pallet/parse/validate_unsigned.rs @@ -19,15 +19,10 @@ use super::helper; use syn::spanned::Spanned; /// The definition of the pallet validate unsigned implementation. -pub struct ValidateUnsignedDef { - /// The index of validate unsigned item in pallet module. - pub index: usize, - /// A set of usage of instance, must be check for consistency with config. - pub instances: Vec, -} +pub struct ValidateUnsignedDef {} impl ValidateUnsignedDef { - pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { @@ -52,11 +47,9 @@ impl ValidateUnsignedDef { return Err(syn::Error::new(item.span(), msg)); } - let instances = vec![ - helper::check_pallet_struct_usage(&item.self_ty)?, - helper::check_impl_gen(&item.generics, item.impl_token.span())?, - ]; + helper::check_pallet_struct_usage(&item.self_ty)?; + helper::check_impl_gen(&item.generics, item.impl_token.span())?; - Ok(ValidateUnsignedDef { index, instances }) + Ok(ValidateUnsignedDef {}) } } diff --git a/support/procedural-fork/src/runtime/expand/mod.rs b/support/procedural-fork/src/runtime/expand/mod.rs index c26cbccb7..a1a6d4d07 100644 --- a/support/procedural-fork/src/runtime/expand/mod.rs +++ b/support/procedural-fork/src/runtime/expand/mod.rs @@ -97,23 +97,26 @@ fn construct_runtime_implicit_to_explicit( quote!() }; let mut expansion = quote::quote!( - #[frame_support::runtime #attr] + #[#frame_support::runtime #attr] #input ); for pallet in definition.pallet_decls.iter() { let pallet_path = &pallet.path; let pallet_name = &pallet.name; - let pallet_instance = pallet - .instance - .as_ref() - .map(|instance| quote::quote!(<#instance>)); + let runtime_param = &pallet.runtime_param; + let pallet_segment_and_instance = match (&pallet.pallet_segment, &pallet.instance) { + (Some(segment), Some(instance)) => quote::quote!(::#segment<#runtime_param, #instance>), + (Some(segment), None) => quote::quote!(::#segment<#runtime_param>), + (None, Some(instance)) => quote::quote!(<#instance>), + (None, None) => quote::quote!(), + }; expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_default_parts_v2 }] - frame_support = [{ #frame_support }] + your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] - pattern = [{ #pallet_name = #pallet_path #pallet_instance }] + pattern = [{ #pallet_name = #pallet_path #pallet_segment_and_instance }] } } ); @@ -264,7 +267,7 @@ fn construct_runtime_final_expansion( // Prevent UncheckedExtrinsic to print unused warning. const _: () = { #[allow(unused)] - type __hidden_use_of_unchecked_extrinsic = #unchecked_extrinsic; + type __HiddenUseOfUncheckedExtrinsic = #unchecked_extrinsic; }; #[derive( @@ -294,7 +297,7 @@ fn construct_runtime_final_expansion( #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] - fn runtime_metadata(&self) -> #scrate::__private::sp_std::vec::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { + fn runtime_metadata(&self) -> #scrate::__private::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { Default::default() } } diff --git a/support/procedural-fork/src/runtime/mod.rs b/support/procedural-fork/src/runtime/mod.rs index 589acff6c..a96b21cd1 100644 --- a/support/procedural-fork/src/runtime/mod.rs +++ b/support/procedural-fork/src/runtime/mod.rs @@ -200,8 +200,6 @@ //! +----------------------+ //! ``` -#![cfg(feature = "experimental")] - pub use parse::Def; use proc_macro::TokenStream; use syn::spanned::Spanned; diff --git a/support/procedural-fork/src/runtime/parse/mod.rs b/support/procedural-fork/src/runtime/parse/mod.rs index 79cf894e8..a6a49e814 100644 --- a/support/procedural-fork/src/runtime/parse/mod.rs +++ b/support/procedural-fork/src/runtime/parse/mod.rs @@ -118,7 +118,6 @@ pub enum AllPalletsDeclaration { /// Declaration of a runtime with some pallet with implicit declaration of parts. #[derive(Debug, Clone)] pub struct ImplicitAllPalletsDeclaration { - pub name: Ident, pub pallet_decls: Vec, pub pallet_count: usize, } @@ -132,7 +131,6 @@ pub struct ExplicitAllPalletsDeclaration { pub struct Def { pub input: TokenStream2, - pub item: syn::ItemMod, pub runtime_struct: runtime_struct::RuntimeStructDef, pub pallets: AllPalletsDeclaration, pub runtime_types: Vec, @@ -161,8 +159,7 @@ impl Def { let mut pallets = vec![]; for item in items.iter_mut() { - let mut pallet_item = None; - let mut pallet_index = 0; + let mut pallet_index_and_item = None; let mut disable_call = false; let mut disable_unsigned = false; @@ -171,17 +168,16 @@ impl Def { helper::take_first_item_runtime_attr::(item)? { match runtime_attr { - RuntimeAttr::Runtime(span) if runtime_struct.is_none() => { - let p = runtime_struct::RuntimeStructDef::try_from(span, item)?; + RuntimeAttr::Runtime(_) if runtime_struct.is_none() => { + let p = runtime_struct::RuntimeStructDef::try_from(item)?; runtime_struct = Some(p); } RuntimeAttr::Derive(_, types) if runtime_types.is_none() => { runtime_types = Some(types); } RuntimeAttr::PalletIndex(span, index) => { - pallet_index = index; - pallet_item = if let syn::Item::Type(item) = item { - Some(item.clone()) + pallet_index_and_item = if let syn::Item::Type(item) = item { + Some((index, item.clone())) } else { let msg = "Invalid runtime::pallet_index, expected type definition"; return Err(syn::Error::new(span, msg)); @@ -196,11 +192,11 @@ impl Def { } } - if let Some(pallet_item) = pallet_item { + if let Some((pallet_index, pallet_item)) = pallet_index_and_item { match *pallet_item.ty.clone() { syn::Type::Path(ref path) => { let pallet_decl = - PalletDeclaration::try_from(item.span(), &pallet_item, path)?; + PalletDeclaration::try_from(item.span(), &pallet_item, &path.path)?; if let Some(used_pallet) = names.insert(pallet_decl.name.clone(), pallet_decl.name.span()) @@ -239,6 +235,11 @@ impl Def { } _ => continue, } + } else { + if let syn::Item::Type(item) = item { + let msg = "Missing pallet index for pallet declaration. Please add `#[runtime::pallet_index(...)]`"; + return Err(syn::Error::new(item.span(), &msg)); + } } } @@ -246,7 +247,6 @@ impl Def { let decl_count = pallet_decls.len(); let pallets = if decl_count > 0 { AllPalletsDeclaration::Implicit(ImplicitAllPalletsDeclaration { - name, pallet_decls, pallet_count: decl_count.saturating_add(pallets.len()), }) @@ -255,21 +255,41 @@ impl Def { }; let def = Def { - input, - item, - runtime_struct: runtime_struct.ok_or_else(|| { - syn::Error::new(item_span, + input, + runtime_struct: runtime_struct.ok_or_else(|| { + syn::Error::new(item_span, "Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]`" ) - })?, - pallets, - runtime_types: runtime_types.ok_or_else(|| { - syn::Error::new(item_span, + })?, + pallets, + runtime_types: runtime_types.ok_or_else(|| { + syn::Error::new(item_span, "Missing Runtime Types. Please annotate the runtime struct with `#[runtime::derive]`" ) - })?, - }; + })?, + }; Ok(def) } } + +#[test] +fn runtime_parsing_works() { + let def = Def::try_from(syn::parse_quote! { + #[runtime::runtime] + mod runtime { + #[runtime::derive(RuntimeCall, RuntimeEvent)] + #[runtime::runtime] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + + #[runtime::pallet_index(1)] + pub type Pallet1 = pallet1; + } + }) + .expect("Failed to parse runtime definition"); + + assert_eq!(def.runtime_struct.ident, "Runtime"); +} diff --git a/support/procedural-fork/src/runtime/parse/pallet.rs b/support/procedural-fork/src/runtime/parse/pallet.rs index 039e2631b..591c05930 100644 --- a/support/procedural-fork/src/runtime/parse/pallet.rs +++ b/support/procedural-fork/src/runtime/parse/pallet.rs @@ -15,9 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use crate::{ + construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}, + runtime::parse::PalletDeclaration, +}; +use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; +use syn::{punctuated::Punctuated, token, Error}; impl Pallet { pub fn try_from( @@ -56,26 +60,13 @@ impl Pallet { "Invalid pallet declaration, expected a path or a trait object", ))?; - let mut instance = None; - if let Some(segment) = path - .inner - .segments - .iter_mut() - .find(|seg| !seg.arguments.is_empty()) - { - if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { - args, .. - }) = segment.arguments.clone() - { - if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { - instance = Some(Ident::new( - &arg_path.to_token_stream().to_string(), - arg_path.span(), - )); - segment.arguments = PathArguments::None; - } - } - } + let PalletDeclaration { + path: inner, + instance, + .. + } = PalletDeclaration::try_from(attr_span, item, &path.inner)?; + + path = PalletPath { inner }; pallet_parts = pallet_parts .into_iter() @@ -94,6 +85,8 @@ impl Pallet { let cfg_pattern = vec![]; + let docs = get_doc_literals(&item.attrs); + Ok(Pallet { is_expanded: true, name, @@ -102,6 +95,123 @@ impl Pallet { instance, cfg_pattern, pallet_parts, + docs, }) } } + +#[test] +fn pallet_parsing_works() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = Pallet::try_from( + proc_macro2::Span::call_site(), + &item, + index, + false, + false, + &bounds, + ) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, None); +} + +#[test] +fn pallet_parsing_works_with_instance() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = Pallet::try_from( + proc_macro2::Span::call_site(), + &item, + index, + false, + false, + &bounds, + ) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, Some(parse_quote! { Instance1 })); +} + +#[test] +fn pallet_parsing_works_with_pallet() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system::Pallet + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = Pallet::try_from( + proc_macro2::Span::call_site(), + &item, + index, + false, + false, + &bounds, + ) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, None); +} + +#[test] +fn pallet_parsing_works_with_instance_and_pallet() { + use syn::{parse_quote, ItemType}; + + let item: ItemType = parse_quote! { + pub type System = frame_system::Pallet + Call; + }; + let ItemType { ty, .. } = item.clone(); + let syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) = *ty else { + panic!("Expected a trait object"); + }; + + let index = 0; + let pallet = Pallet::try_from( + proc_macro2::Span::call_site(), + &item, + index, + false, + false, + &bounds, + ) + .unwrap(); + + assert_eq!(pallet.name.to_string(), "System"); + assert_eq!(pallet.index, index); + assert_eq!(pallet.path.to_token_stream().to_string(), "frame_system"); + assert_eq!(pallet.instance, Some(parse_quote! { Instance1 })); +} diff --git a/support/procedural-fork/src/runtime/parse/pallet_decl.rs b/support/procedural-fork/src/runtime/parse/pallet_decl.rs index bb1246606..fab826eee 100644 --- a/support/procedural-fork/src/runtime/parse/pallet_decl.rs +++ b/support/procedural-fork/src/runtime/parse/pallet_decl.rs @@ -15,19 +15,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -use quote::ToTokens; -use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; +use syn::{Ident, PathArguments}; /// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { - /// The name of the pallet, e.g.`System` in `System: frame_system`. + /// The name of the pallet, e.g.`System` in `pub type System = frame_system`. pub name: Ident, - /// Optional attributes tagged right above a pallet declaration. - pub attrs: Vec, - /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. + /// The path of the pallet, e.g. `frame_system` in `pub type System = frame_system`. pub path: syn::Path, - /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. + /// The segment of the pallet, e.g. `Pallet` in `pub type System = frame_system::Pallet`. + pub pallet_segment: Option, + /// The runtime parameter of the pallet, e.g. `Runtime` in + /// `pub type System = frame_system::Pallet`. + pub runtime_param: Option, + /// The instance of the pallet, e.g. `Instance1` in `pub type Council = + /// pallet_collective`. pub instance: Option, } @@ -35,12 +38,14 @@ impl PalletDeclaration { pub fn try_from( _attr_span: proc_macro2::Span, item: &syn::ItemType, - path: &syn::TypePath, + path: &syn::Path, ) -> syn::Result { let name = item.ident.clone(); - let mut path = path.path.clone(); + let mut path = path.clone(); + let mut pallet_segment = None; + let mut runtime_param = None; let mut instance = None; if let Some(segment) = path .segments @@ -51,21 +56,131 @@ impl PalletDeclaration { args, .. }) = segment.arguments.clone() { - if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { - instance = Some(Ident::new( - &arg_path.to_token_stream().to_string(), - arg_path.span(), - )); + if segment.ident == "Pallet" { + let mut segment = segment.clone(); segment.arguments = PathArguments::None; + pallet_segment = Some(segment.clone()); + } + let mut args_iter = args.iter(); + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = + args_iter.next() + { + let ident = arg_path.path.require_ident()?.clone(); + if segment.ident == "Pallet" { + runtime_param = Some(ident); + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = + args_iter.next() + { + instance = Some(arg_path.path.require_ident()?.clone()); + } + } else { + instance = Some(ident); + segment.arguments = PathArguments::None; + } } } } + if pallet_segment.is_some() { + path = syn::Path { + leading_colon: None, + segments: path + .segments + .iter() + .filter(|seg| seg.arguments.is_empty()) + .cloned() + .collect(), + }; + } + Ok(Self { name, path, + pallet_segment, + runtime_param, instance, - attrs: item.attrs.clone(), }) } } + +#[test] +fn declaration_works() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system; }, + &parse_quote! { frame_system }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + assert_eq!(decl.pallet_segment, None); + assert_eq!(decl.runtime_param, None); + assert_eq!(decl.instance, None); +} + +#[test] +fn declaration_works_with_instance() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system; }, + &parse_quote! { frame_system }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + assert_eq!(decl.pallet_segment, None); + assert_eq!(decl.runtime_param, None); + assert_eq!(decl.instance, Some(parse_quote! { Instance1 })); +} + +#[test] +fn declaration_works_with_pallet() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system::Pallet; }, + &parse_quote! { frame_system::Pallet }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + + let segment: syn::PathSegment = syn::PathSegment { + ident: parse_quote! { Pallet }, + arguments: PathArguments::None, + }; + assert_eq!(decl.pallet_segment, Some(segment)); + assert_eq!(decl.runtime_param, Some(parse_quote! { Runtime })); + assert_eq!(decl.instance, None); +} + +#[test] +fn declaration_works_with_pallet_and_instance() { + use syn::parse_quote; + + let decl: PalletDeclaration = PalletDeclaration::try_from( + proc_macro2::Span::call_site(), + &parse_quote! { pub type System = frame_system::Pallet; }, + &parse_quote! { frame_system::Pallet }, + ) + .expect("Failed to parse pallet declaration"); + + assert_eq!(decl.name, "System"); + assert_eq!(decl.path, parse_quote! { frame_system }); + + let segment: syn::PathSegment = syn::PathSegment { + ident: parse_quote! { Pallet }, + arguments: PathArguments::None, + }; + assert_eq!(decl.pallet_segment, Some(segment)); + assert_eq!(decl.runtime_param, Some(parse_quote! { Runtime })); + assert_eq!(decl.instance, Some(parse_quote! { Instance1 })); +} diff --git a/support/procedural-fork/src/runtime/parse/runtime_struct.rs b/support/procedural-fork/src/runtime/parse/runtime_struct.rs index 7ddbdcfeb..82c6470d7 100644 --- a/support/procedural-fork/src/runtime/parse/runtime_struct.rs +++ b/support/procedural-fork/src/runtime/parse/runtime_struct.rs @@ -18,11 +18,10 @@ use syn::spanned::Spanned; pub struct RuntimeStructDef { pub ident: syn::Ident, - pub attr_span: proc_macro2::Span, } impl RuntimeStructDef { - pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { + pub fn try_from(item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Struct(item) = item { item } else { @@ -32,7 +31,6 @@ impl RuntimeStructDef { Ok(Self { ident: item.ident.clone(), - attr_span, }) } } diff --git a/support/procedural-fork/update.sh b/support/procedural-fork/update.sh index a8793b261..0cb933b33 100755 --- a/support/procedural-fork/update.sh +++ b/support/procedural-fork/update.sh @@ -5,7 +5,7 @@ set -e # Set the repository and tag REPO_URL="git@github.com:paritytech/polkadot-sdk.git" -POLKADOT_SDK_TAG="v1.10.0-rc3" +POLKADOT_SDK_TAG="v1.16.0-rc1" # Create a temporary directory for cloning TMP_DIR=$(mktemp -d) diff --git a/zepter.yaml b/zepter.yaml index 2841580e5..e448fa228 100644 --- a/zepter.yaml +++ b/zepter.yaml @@ -12,13 +12,13 @@ workflows: # Check that `A` activates the features of `B`. "propagate-feature", # These are the features to check: - "--features=try-runtime,runtime-benchmarks,std", + "--features=try-runtime,runtime-benchmarks,std,sql,rocksdb,txpool", # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. "--left-side-outside-workspace=ignore", # Some features imply that they activate a specific dependency as non-optional. Otherwise the default behaviour with a `?` is used. - "--feature-enables-dep=try-runtime:frame-try-runtime,runtime-benchmarks:frame-benchmarking", # Auxillary flags: + "--feature-enables-dep=try-runtime:frame-try-runtime,runtime-benchmarks:frame-benchmarking", # Auxillary flags: "--offline", "--locked", "--show-path",