From 1f7765b63530e362d6b1b4a225b47daddda03637 Mon Sep 17 00:00:00 2001 From: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> Date: Thu, 21 Nov 2024 15:39:08 +0200 Subject: [PATCH 01/41] github/workflows: add ARM macos build binaries job (#6427) # Description This PR adds the required changes to release `polkadot`, `polkadot-parachain` and `polkadot-omni-node` binaries built on Apple Sillicon macos. ## Integration This addresses requests from the community for such binaries: #802, and they should be part of the Github release page. ## Review Notes Test on paritytech-stg solely focused on macos binaries: https://github.com/paritytech-stg/polkadot-sdk/actions/runs/11824692766/job/32946793308, except the steps related to `pgpkms` (which need AWS credentials, missing from paritytech-stg). The binary names don't have a `darwin-arm` identifier, and conflict with the existing x86_64-linux binaries. I haven't tested building everything on `paritytech-stg` because the x86_64-linux builds run on `unbutu-latest-m` which isn't enabled on `pairtytech-stg` (and I haven't asked CI team to enable one), so testing how to go around naming conflicts should be covered next. ### TODO - [x] Test the workflow start to end (especially the last bits related to uploading the binaries on S3 and ensuring the previous binaries and the new ones coexist harmoniously on S3/action artifacts storage without naming conflicts) @EgorPopelyaev - [x] Publish the arm binaries on the Github release page - to clarify what's needed @iulianbarbu . Current practice is to manually publish the binaries built via `release-build-rc.yml` workflow, taken from S3. Would be great to have the binaries there in the first place before working on automating this, but I would also do it in a follow up PR. ### Follow ups - [ ] unify the binaries building under `release-30_publish_release_draft.yml` maybe? - [ ] automate binary artifacts upload to S3 in `release-30_publish_release_draft.yml` --------- Signed-off-by: Iulian Barbu Co-authored-by: EgorPopelyaev --- .../scripts/release/build-linux-release.sh | 4 +- .../scripts/release/build-macos-release.sh | 37 +++ .github/scripts/release/release_lib.sh | 38 ++-- .../release-30_publish_release_draft.yml | 4 +- .github/workflows/release-build-rc.yml | 91 ++++++++ .../workflows/release-reusable-rc-buid.yml | 212 +++++++++++++++++- .../workflows/release-reusable-s3-upload.yml | 17 +- 7 files changed, 376 insertions(+), 27 deletions(-) create mode 100755 .github/scripts/release/build-macos-release.sh diff --git a/.github/scripts/release/build-linux-release.sh b/.github/scripts/release/build-linux-release.sh index a6bd658d292a..874c9b44788b 100755 --- a/.github/scripts/release/build-linux-release.sh +++ b/.github/scripts/release/build-linux-release.sh @@ -3,6 +3,8 @@ # This is used to build our binaries: # - polkadot # - polkadot-parachain +# - polkadot-omni-node +# # set -e BIN=$1 @@ -21,7 +23,7 @@ time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PAC echo "Artifact target: $ARTIFACTS" cp ./target/$PROFILE/$BIN "$ARTIFACTS" -pushd "$ARTIFACTS" > /dev/nul +pushd "$ARTIFACTS" > /dev/null sha256sum "$BIN" | tee "$BIN.sha256" EXTRATAG="$($ARTIFACTS/$BIN --version | diff --git a/.github/scripts/release/build-macos-release.sh b/.github/scripts/release/build-macos-release.sh new file mode 100755 index 000000000000..ba6dcc65d650 --- /dev/null +++ b/.github/scripts/release/build-macos-release.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# This is used to build our binaries: +# - polkadot +# - polkadot-parachain +# - polkadot-omni-node +# set -e + +BIN=$1 +PACKAGE=${2:-$BIN} + +PROFILE=${PROFILE:-production} +# parity-macos runner needs a path where it can +# write, so make it relative to github workspace. +ARTIFACTS=$GITHUB_WORKSPACE/artifacts/$BIN +VERSION=$(git tag -l --contains HEAD | grep -E "^v.*") + +echo "Artifacts will be copied into $ARTIFACTS" +mkdir -p "$ARTIFACTS" + +git log --pretty=oneline -n 1 +time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE + +echo "Artifact target: $ARTIFACTS" + +cp ./target/$PROFILE/$BIN "$ARTIFACTS" +pushd "$ARTIFACTS" > /dev/null +sha256sum "$BIN" | tee "$BIN.sha256" + +EXTRATAG="$($ARTIFACTS/$BIN --version | + sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')" + +EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)" + +echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})" +echo -n ${VERSION} > "$ARTIFACTS/VERSION" +echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG" diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index f5032073b617..8b9254ec3f29 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Set the new version by replacing the value of the constant given as patetrn +# Set the new version by replacing the value of the constant given as pattern # in the file. # # input: pattern, version, file @@ -119,21 +119,23 @@ set_polkadot_parachain_binary_version() { upload_s3_release() { - alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' - - product=$1 - version=$2 - - echo "Working on product: $product " - echo "Working on version: $version " - - echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize || true - echo "Content to be uploaded:" - artifacts="artifacts/$product/" - ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/${version}/" - echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/polkadot/${version}" + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + product=$1 + version=$2 + target=$3 + + echo "Working on product: $product " + echo "Working on version: $version " + echo "Working on platform: $target " + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="artifacts/$product/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/${product}/${version}/${target}" + echo "Uploaded files:" + aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" } diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 376f5fbce909..4364b4f80457 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -34,7 +34,7 @@ jobs: strategy: matrix: # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder], [polkadot-omni-node, polkadot-omni-node] ] + binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] steps: - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 @@ -161,7 +161,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder, polkadot-omni-node] + binary: [frame-omni-bencher, chain-spec-builder] steps: - name: Download artifacts diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-build-rc.yml index 94bacf320898..a43c2b282a8d 100644 --- a/.github/workflows/release-build-rc.yml +++ b/.github/workflows/release-build-rc.yml @@ -10,6 +10,7 @@ on: options: - polkadot - polkadot-parachain + - polkadot-omni-node - all release_tag: @@ -47,6 +48,7 @@ jobs: binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' package: polkadot release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu secrets: PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} @@ -68,6 +70,95 @@ jobs: binary: '["polkadot-parachain"]' package: "polkadot-parachain-bin" release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: "polkadot-omni-node" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: "polkadot-parachain-bin" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: "polkadot-omni-node" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin secrets: PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index d925839fb84a..7e31a4744b59 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -10,7 +10,7 @@ on: type: string package: - description: Package to be built, for now is either polkadot or polkadot-parachain-bin + description: Package to be built, for now can be polkadot, polkadot-parachain-bin, or polkadot-omni-node required: true type: string @@ -19,6 +19,11 @@ on: required: true type: string + target: + description: Target triple for which the artifacts are being built (e.g. x86_64-unknown-linux-gnu) + required: true + type: string + secrets: PGP_KMS_KEY: required: true @@ -57,6 +62,7 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT build-rc: + if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [set-image] runs-on: ubuntu-latest-m environment: release @@ -130,8 +136,124 @@ jobs: name: ${{ matrix.binaries }} path: /artifacts/${{ matrix.binaries }} + build-macos-rc: + if: ${{ inputs.target == 'aarch64-apple-darwin' }} + runs-on: parity-macos + environment: release + strategy: + matrix: + binaries: ${{ fromJSON(inputs.binary) }} + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: ${{ inputs.release_tag }} + fetch-depth: 0 + + - name: Set rust version from env file + run: | + RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') + echo $RUST_VERSION + echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV + - name: Set workspace environment variable + # relevant for artifacts upload, which can not interpolate Github Action variable syntax when + # used within valid paths. We can not use root-based paths either, since it is set as read-only + # on the `parity-macos` runner. + run: echo "ARTIFACTS_PATH=${GITHUB_WORKSPACE}/artifacts/${{ matrix.binaries }}" >> $GITHUB_ENV + + - name: Set up Homebrew + uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 + - name: Set homebrew binaries location on path + run: echo "/opt/homebrew/bin" >> $GITHUB_PATH + + - name: Install rust ${{ env.RUST_VERSION }} + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 + with: + cache: false + toolchain: ${{ env.RUST_VERSION }} + target: wasm32-unknown-unknown + components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std + + - name: cargo info + run: | + echo "######## rustup show ########" + rustup show + echo "######## cargo --version ########" + cargo --version + + - name: Install protobuf + run: brew install protobuf + - name: Install gpg + run: | + brew install gnupg + # Setup for being able to resolve: keyserver.ubuntu.com. + # See: https://github.com/actions/runner-images/issues/9777 + mkdir -p ~/.gnupg/ + touch ~/.gnupg/dirmngr.conf + echo "standard-resolver" > ~/.gnupg/dirmngr.conf + - name: Install sha256sum + run: | + brew install coreutils + + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69" --break-system-packages + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Build binary + run: | + git config --global --add safe.directory "${GITHUB_WORKSPACE}" #avoid "detected dubious ownership" error + ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} + + - name: Sign artifacts + working-directory: ${{ env.ARTIFACTS_PATH }} + run: | + python3 -m pgpkms sign --input ${{matrix.binaries }} -o ${{ matrix.binaries }}.asc + + - name: Check sha256 ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + echo "Checking binary ${{ matrix.binaries }}" + check_sha256 ${{ matrix.binaries }} + + - name: Check GPG ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + check_gpg ${{ matrix.binaries }} + + - name: Upload ${{ matrix.binaries }} artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binaries }}_${{ inputs.target }} + path: ${{ env.ARTIFACTS_PATH }} + build-polkadot-deb-package: - if: ${{ inputs.package == 'polkadot' }} + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] runs-on: ubuntu-latest @@ -168,12 +290,13 @@ jobs: overwrite: true upload-polkadot-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' }} + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-polkadot-deb-package] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: ${{ inputs.package }} release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} @@ -181,12 +304,93 @@ jobs: upload-polkadot-parachain-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-parachain-bin' }} + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: polkadot-parachain release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: + # `polkadot`, `polkadot-prepare-worker` and `polkadot-execute-worker`. + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-prepare-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-prepare-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-execute-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-execute-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-parachain-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-parachain + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index 6776b78da8e6..f85466bc8c07 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -13,6 +13,11 @@ on: required: true type: string + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + secrets: AWS_DEFAULT_REGION: required: true @@ -34,12 +39,20 @@ jobs: - name: Checkout uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - name: Download artifacts + - name: Download amd64 artifacts + if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: ${{ inputs.package }} path: artifacts/${{ inputs.package }} + - name: Download arm artifacts + if: ${{ inputs.target == 'aarch64-apple-darwin' }} + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: ${{ inputs.package }}_aarch64-apple-darwin + path: artifacts/${{ inputs.package }} + - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: @@ -50,4 +63,4 @@ jobs: - name: Upload ${{ inputs.package }} artifacts to s3 run: | . ./.github/scripts/release/release_lib.sh - upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} + upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} ${{ inputs.target }} From bf20a9ee18f7215210bbbabf79e955c8c35b3360 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Thu, 21 Nov 2024 21:04:47 +0700 Subject: [PATCH 02/41] [Fix|NominationPools] Only allow apply slash to be executed if the slash amount is atleast ED (#6540) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change prevents `pools::apply_slash` from being executed when the pending slash amount of the member is lower than the ED. The issue came to light with the failing [benchmark test](https://github.com/polkadot-fellows/runtimes/actions/runs/11879471717/job/33101445269?pr=490#step:11:765) in Kusama. The problem arises from the inexact conversion between points and balance. Specifically, when points are converted to balance and then back to points, rounding can introduce a small discrepancy between the input and the resulting value. This issue surfaced in Kusama due to its ED being different from Westend and Polkadot (1 UNIT/300), making the rounding issue noticeable. This fix is also significant because applying a slash is feeless and permissionless. Allowing super small slash amounts to be applied without a fee is undesirable. With this change, such small slashes will still be applied but only when member funds are withdrawn. --------- Co-authored-by: Dónal Murray --- prdoc/pr_6540.prdoc | 16 ++++++++ .../nomination-pools/runtime-api/src/lib.rs | 3 ++ substrate/frame/nomination-pools/src/lib.rs | 23 +++++++++--- .../test-delegate-stake/Cargo.toml | 2 +- .../test-delegate-stake/src/lib.rs | 37 +++++++++++++++++-- 5 files changed, 71 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_6540.prdoc diff --git a/prdoc/pr_6540.prdoc b/prdoc/pr_6540.prdoc new file mode 100644 index 000000000000..5e0305205521 --- /dev/null +++ b/prdoc/pr_6540.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Only allow apply slash to be executed if the slash amount is atleast ED + +doc: + - audience: Runtime User + description: | + This change prevents `pools::apply_slash` from being executed when the pending slash amount of the member is lower + than the ED. With this change, such small slashes will still be applied but only when member funds are withdrawn. + +crates: +- name: pallet-nomination-pools-runtime-api + bump: patch +- name: pallet-nomination-pools + bump: major diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 4138dd22d898..644ee07fd634 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -43,6 +43,9 @@ sp_api::decl_runtime_apis! { fn pool_pending_slash(pool_id: PoolId) -> Balance; /// Returns the pending slash for a given pool member. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain. fn member_pending_slash(member: AccountId) -> Balance; /// Returns true if the pool with `pool_id` needs migration. diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 201b0af1d608..dc82bf3a37c6 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1944,6 +1944,8 @@ pub mod pallet { NothingToAdjust, /// No slash pending that can be applied to the member. NothingToSlash, + /// The slash amount is too low to be applied. + SlashTooLow, /// The pool or member delegation has already migrated to delegate stake. AlreadyMigrated, /// The pool or member delegation has not migrated yet to delegate stake. @@ -2300,7 +2302,7 @@ pub mod pallet { let slash_weight = // apply slash if any before withdraw. - match Self::do_apply_slash(&member_account, None) { + match Self::do_apply_slash(&member_account, None, false) { Ok(_) => T::WeightInfo::apply_slash(), Err(e) => { let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); @@ -2974,8 +2976,10 @@ pub mod pallet { /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: /// [`adapter::StakeStrategyType::Delegate`]. /// - /// This call can be dispatched permissionlessly (i.e. by any account). If the member has - /// slash to be applied, caller may be rewarded with the part of the slash. + /// The pending slash amount of the member must be equal or more than `ExistentialDeposit`. + /// This call can be dispatched permissionlessly (i.e. by any account). If the execution + /// is successful, fee is refunded and caller may be rewarded with a part of the slash + /// based on the [`crate::pallet::Config::StakeAdapter`] configuration. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::apply_slash())] pub fn apply_slash( @@ -2989,7 +2993,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; - Self::do_apply_slash(&member_account, Some(who))?; + Self::do_apply_slash(&member_account, Some(who), true)?; // If successful, refund the fees. Ok(Pays::No.into()) @@ -3574,15 +3578,21 @@ impl Pallet { fn do_apply_slash( member_account: &T::AccountId, reporter: Option, + enforce_min_slash: bool, ) -> DispatchResult { let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; let pending_slash = Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; - // if nothing to slash, return error. + // ensure there is something to slash. ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); + if enforce_min_slash { + // ensure slashed amount is at least the minimum balance. + ensure!(pending_slash >= T::Currency::minimum_balance(), Error::::SlashTooLow); + } + T::StakeAdapter::member_slash( Member::from(member_account.clone()), Pool::from(Pallet::::generate_bonded_account(member.pool_id)), @@ -3946,6 +3956,9 @@ impl Pallet { /// Returns the unapplied slash of a member. /// /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain via [`Call::apply_slash`]. pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { PoolMembers::::get(who.clone()) .map(|pool_member| { diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index 7940caaff775..70e1591409b8 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -26,7 +26,7 @@ sp-staking = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index 40025cdbb3cd..cc6335959ab7 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -20,7 +20,7 @@ mod mock; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, hypothetically, traits::{fungible::InspectHold, Currency}, }; use mock::*; @@ -537,10 +537,10 @@ fn pool_slash_proportional() { // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that // happened in era 100 should only affect the latter two. new_test_ext().execute_with(|| { - ExistentialDeposit::set(1); + ExistentialDeposit::set(2); BondingDuration::set(28); - assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Balances::minimum_balance(), 2); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -670,6 +670,34 @@ fn pool_slash_proportional() { // no pending slash yet. assert_eq!(Pools::api_pool_pending_slash(1), 0); + // and therefore applying slash fails + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::NothingToSlash + ); + + hypothetically!({ + // a very small amount is slashed + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 3, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + // ensure correct amount is pending to be slashed + assert_eq!(Pools::api_pool_pending_slash(1), 3); + + // 21 has pending slash lower than ED (2) + assert_eq!(Pools::api_member_pending_slash(21), 1); + + // slash fails as minimum pending slash amount not met. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::SlashTooLow + ); + }); pallet_staking::slashing::do_slash::( &POOL1_BONDED, @@ -909,6 +937,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ); }); } + #[test] fn pool_migration_e2e() { new_test_ext().execute_with(|| { From 6d59c3b1417cd07ae8d502236e8a98c3498f76b1 Mon Sep 17 00:00:00 2001 From: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> Date: Thu, 21 Nov 2024 20:19:14 +0200 Subject: [PATCH 03/41] parachain-template-node: add properties for dev chain-spec (#6560) # Description Reused as before the `properties` variable when defining a development chain spec for parachain-template-node. ## Integration N/A ## Review Notes One line change, pretty self explanatory (it got lost within the history of changes over the parachain-template-node/chain_spec.rs file). To be honest, not really sure how useful it is, but I had the choice of removing the `properties` var or reuse it as before, and I went with the latter. Signed-off-by: Iulian Barbu --- templates/parachain/node/src/chain_spec.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index 55a099dd022b..7ae3c4900e42 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -45,6 +45,7 @@ pub fn development_chain_spec() -> ChainSpec { .with_id("dev") .with_chain_type(ChainType::Development) .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) + .with_properties(properties) .build() } From d8ce550256f08c42d1ba6630990ed7e2e9ce0352 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 21 Nov 2024 21:44:42 +0100 Subject: [PATCH 04/41] [pallet-revive] Support all eth tx types (#6461) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for all eth tx types Note that js libs will continue to use the Legacy type since we don't include base_fee_per_gas yet in the block. We can think about setting these values after we revisit how we encode the gas into weight & deposit_limit in a follow up PR --------- Co-authored-by: Alexander Theißen Co-authored-by: GitHub Action --- prdoc/pr_6461.prdoc | 12 + substrate/frame/revive/rpc/src/client.rs | 24 +- substrate/frame/revive/rpc/src/example.rs | 9 +- substrate/frame/revive/rpc/src/lib.rs | 57 +-- substrate/frame/revive/src/evm/api.rs | 2 - substrate/frame/revive/src/evm/api/account.rs | 28 +- .../frame/revive/src/evm/api/rlp_codec.rs | 471 ++++++++++++++++-- .../frame/revive/src/evm/api/rpc_types.rs | 140 +++++- .../frame/revive/src/evm/api/rpc_types_gen.rs | 18 +- .../frame/revive/src/evm/api/signature.rs | 190 +++++-- substrate/frame/revive/src/evm/api/type_id.rs | 22 +- substrate/frame/revive/src/evm/runtime.rs | 39 +- substrate/frame/revive/src/lib.rs | 2 +- 13 files changed, 824 insertions(+), 190 deletions(-) create mode 100644 prdoc/pr_6461.prdoc diff --git a/prdoc/pr_6461.prdoc b/prdoc/pr_6461.prdoc new file mode 100644 index 000000000000..1b3d1e8b0364 --- /dev/null +++ b/prdoc/pr_6461.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json +title: '[pallet-revive] add support for all eth tx types' +doc: +- audience: Runtime Dev + description: Add support for 1559, 4844, and 2930 transaction types +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor + diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index 1ca1a6d37c53..d37f1d760065 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -17,13 +17,12 @@ //! The client connects to the source substrate chain //! and is used by the rpc server to query and send transactions to the substrate chain. use crate::{ - rlp, runtime::GAS_PRICE, subxt_client::{ revive::{calls::types::EthTransact, events::ContractEmitted}, runtime_types::pallet_revive::storage::ContractInfo, }, - TransactionLegacySigned, LOG_TARGET, + LOG_TARGET, }; use futures::{stream, StreamExt}; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; @@ -269,25 +268,26 @@ impl ClientInner { let extrinsics = extrinsics.iter().flat_map(|ext| { let call = ext.as_extrinsic::().ok()??; let transaction_hash = H256(keccak_256(&call.payload)); - let tx = rlp::decode::(&call.payload).ok()?; - let from = tx.recover_eth_address().ok()?; - let contract_address = if tx.transaction_legacy_unsigned.to.is_none() { - Some(create1(&from, tx.transaction_legacy_unsigned.nonce.try_into().ok()?)) + let signed_tx = TransactionSigned::decode(&call.payload).ok()?; + let from = signed_tx.recover_eth_address().ok()?; + let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); + let contract_address = if tx_info.to.is_none() { + Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?)) } else { None }; - Some((from, tx, transaction_hash, contract_address, ext)) + Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext)) }); // Map each extrinsic to a receipt stream::iter(extrinsics) - .map(|(from, tx, transaction_hash, contract_address, ext)| async move { + .map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move { let events = ext.events().await?; let tx_fees = events.find_first::()?.ok_or(ClientError::TxFeeNotFound)?; - let gas_price = tx.transaction_legacy_unsigned.gas_price; + let gas_price = tx_info.gas_price.unwrap_or_default(); let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) .checked_div(gas_price.as_u128()) .unwrap_or_default(); @@ -324,16 +324,16 @@ impl ClientInner { contract_address, from, logs, - tx.transaction_legacy_unsigned.to, + tx_info.to, gas_price, gas_used.into(), success, transaction_hash, transaction_index.into(), - tx.transaction_legacy_unsigned.r#type.as_byte() + tx_info.r#type.unwrap_or_default() ); - Ok::<_, ClientError>((receipt.transaction_hash, (tx.into(), receipt))) + Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt))) }) .buffer_unordered(10) .collect::>>() diff --git a/substrate/frame/revive/rpc/src/example.rs b/substrate/frame/revive/rpc/src/example.rs index 20f00465b146..3b9a33296ef4 100644 --- a/substrate/frame/revive/rpc/src/example.rs +++ b/substrate/frame/revive/rpc/src/example.rs @@ -20,8 +20,7 @@ use crate::{EthRpcClient, ReceiptInfo}; use anyhow::Context; use pallet_revive::evm::{ - rlp::*, Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, - U256, + Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, U256, }; /// Wait for a transaction receipt. @@ -169,11 +168,11 @@ impl TransactionBuilder { mutate(&mut unsigned_tx); - let tx = signer.sign_transaction(unsigned_tx.clone()); - let bytes = tx.rlp_bytes().to_vec(); + let tx = signer.sign_transaction(unsigned_tx.into()); + let bytes = tx.signed_payload(); let hash = client - .send_raw_transaction(bytes.clone().into()) + .send_raw_transaction(bytes.into()) .await .with_context(|| "transaction failed")?; diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 8d9d6fab829e..6a324e63a857 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -137,7 +137,7 @@ impl EthRpcServer for EthRpcServerImpl { async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { let hash = H256(keccak_256(&transaction.0)); - let tx = rlp::decode::(&transaction.0).map_err(|err| { + let tx = TransactionSigned::decode(&transaction.0).map_err(|err| { log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); EthRpcError::from(err) })?; @@ -147,21 +147,10 @@ impl EthRpcServer for EthRpcServerImpl { EthRpcError::InvalidSignature })?; + let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); + // Dry run the transaction to get the weight limit and storage deposit limit - let TransactionLegacyUnsigned { to, input, value, .. } = tx.transaction_legacy_unsigned; - let dry_run = self - .client - .dry_run( - &GenericTransaction { - from: Some(eth_addr), - input: Some(input.clone()), - to, - value: Some(value), - ..Default::default() - }, - BlockTag::Latest.into(), - ) - .await?; + let dry_run = self.client.dry_run(&tx, BlockTag::Latest.into()).await?; let EthContractResult { gas_required, storage_deposit, .. } = dry_run; let call = subxt_client::tx().revive().eth_transact( @@ -174,11 +163,10 @@ impl EthRpcServer for EthRpcServerImpl { Ok(hash) } - async fn send_transaction(&self, transaction: GenericTransaction) -> RpcResult { + async fn send_transaction(&self, mut transaction: GenericTransaction) -> RpcResult { log::debug!(target: LOG_TARGET, "{transaction:#?}"); - let GenericTransaction { from, gas, gas_price, input, to, value, r#type, .. } = transaction; - let Some(from) = from else { + let Some(from) = transaction.from else { log::debug!(target: LOG_TARGET, "Transaction must have a sender"); return Err(EthRpcError::InvalidTransaction.into()); }; @@ -189,27 +177,26 @@ impl EthRpcServer for EthRpcServerImpl { .find(|account| account.address() == from) .ok_or(EthRpcError::AccountNotFound(from))?; - let gas_price = gas_price.unwrap_or_else(|| U256::from(GAS_PRICE)); - let chain_id = Some(self.client.chain_id().into()); - let input = input.unwrap_or_default(); - let value = value.unwrap_or_default(); - let r#type = r#type.unwrap_or_default(); + if transaction.gas.is_none() { + transaction.gas = Some(self.estimate_gas(transaction.clone(), None).await?); + } - let Some(gas) = gas else { - log::debug!(target: LOG_TARGET, "Transaction must have a gas limit"); - return Err(EthRpcError::InvalidTransaction.into()); - }; + if transaction.gas_price.is_none() { + transaction.gas_price = Some(self.gas_price().await?); + } - let r#type = Type0::try_from_byte(r#type.clone()) - .map_err(|_| EthRpcError::TransactionTypeNotSupported(r#type))?; + if transaction.nonce.is_none() { + transaction.nonce = + Some(self.get_transaction_count(from, BlockTag::Latest.into()).await?); + } - let nonce = self.get_transaction_count(from, BlockTag::Latest.into()).await?; + if transaction.chain_id.is_none() { + transaction.chain_id = Some(self.chain_id().await?); + } - let tx = - TransactionLegacyUnsigned { chain_id, gas, gas_price, input, nonce, to, value, r#type }; - let tx = account.sign_transaction(tx); - let rlp_bytes = rlp::encode(&tx).to_vec(); - self.send_raw_transaction(Bytes(rlp_bytes)).await + let tx = transaction.try_into_unsigned().map_err(|_| EthRpcError::InvalidTransaction)?; + let payload = account.sign_transaction(tx).signed_payload(); + self.send_raw_transaction(Bytes(payload)).await } async fn get_block_by_hash( diff --git a/substrate/frame/revive/src/evm/api.rs b/substrate/frame/revive/src/evm/api.rs index 8185a2c8f6f8..fe18c8735bed 100644 --- a/substrate/frame/revive/src/evm/api.rs +++ b/substrate/frame/revive/src/evm/api.rs @@ -25,9 +25,7 @@ pub use rlp; mod type_id; pub use type_id::*; -#[cfg(feature = "std")] mod rpc_types; - mod rpc_types_gen; pub use rpc_types_gen::*; diff --git a/substrate/frame/revive/src/evm/api/account.rs b/substrate/frame/revive/src/evm/api/account.rs index 8365ebf83cae..ba1c68ea0cf7 100644 --- a/substrate/frame/revive/src/evm/api/account.rs +++ b/substrate/frame/revive/src/evm/api/account.rs @@ -16,10 +16,9 @@ // limitations under the License. //! Utilities for working with Ethereum accounts. use crate::{ - evm::{TransactionLegacySigned, TransactionLegacyUnsigned}, + evm::{TransactionSigned, TransactionUnsigned}, H160, }; -use rlp::Encodable; use sp_runtime::AccountId32; /// A simple account that can sign transactions @@ -38,6 +37,11 @@ impl From for Account { } impl Account { + /// Create a new account from a secret + pub fn from_secret_key(secret_key: [u8; 32]) -> Self { + subxt_signer::eth::Keypair::from_secret_key(secret_key).unwrap().into() + } + /// Get the [`H160`] address of the account. pub fn address(&self) -> H160 { H160::from_slice(&self.0.public_key().to_account_id().as_ref()) @@ -52,9 +56,21 @@ impl Account { } /// Sign a transaction. - pub fn sign_transaction(&self, tx: TransactionLegacyUnsigned) -> TransactionLegacySigned { - let rlp_encoded = tx.rlp_bytes(); - let signature = self.0.sign(&rlp_encoded); - TransactionLegacySigned::from(tx, signature.as_ref()) + pub fn sign_transaction(&self, tx: TransactionUnsigned) -> TransactionSigned { + let payload = tx.unsigned_payload(); + let signature = self.0.sign(&payload).0; + tx.with_signature(signature) } } + +#[test] +fn from_secret_key_works() { + let account = Account::from_secret_key(hex_literal::hex!( + "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" + )); + + assert_eq!( + account.address(), + H160::from(hex_literal::hex!("75e480db528101a381ce68544611c169ad7eb342")) + ) +} diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs index e5f24c28a482..3442ed73acca 100644 --- a/substrate/frame/revive/src/evm/api/rlp_codec.rs +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -21,6 +21,73 @@ use super::*; use alloc::vec::Vec; use rlp::{Decodable, Encodable}; +impl TransactionUnsigned { + /// Return the bytes to be signed by the private key. + pub fn unsigned_payload(&self) -> Vec { + use TransactionUnsigned::*; + let mut s = rlp::RlpStream::new(); + match self { + Transaction2930Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction1559Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction4844Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + TransactionLegacyUnsigned(ref tx) => { + s.append(tx); + }, + } + + s.out().to_vec() + } +} + +impl TransactionSigned { + /// Encode the Ethereum transaction into bytes. + pub fn signed_payload(&self) -> Vec { + use TransactionSigned::*; + let mut s = rlp::RlpStream::new(); + match self { + Transaction2930Signed(ref tx) => { + s.append(&tx.transaction_2930_unsigned.r#type.value()); + s.append(tx); + }, + Transaction1559Signed(ref tx) => { + s.append(&tx.transaction_1559_unsigned.r#type.value()); + s.append(tx); + }, + Transaction4844Signed(ref tx) => { + s.append(&tx.transaction_4844_unsigned.r#type.value()); + s.append(tx); + }, + TransactionLegacySigned(ref tx) => { + s.append(tx); + }, + } + + s.out().to_vec() + } + + /// Decode the Ethereum transaction from bytes. + pub fn decode(data: &[u8]) -> Result { + if data.len() < 1 { + return Err(rlp::DecoderError::RlpIsTooShort); + } + match data[0] { + TYPE_EIP2930 => rlp::decode::(&data[1..]).map(Into::into), + TYPE_EIP1559 => rlp::decode::(&data[1..]).map(Into::into), + TYPE_EIP4844 => rlp::decode::(&data[1..]).map(Into::into), + _ => rlp::decode::(data).map(Into::into), + } + } +} + impl TransactionLegacyUnsigned { /// Get the rlp encoded bytes of a signed transaction with a dummy 65 bytes signature. pub fn dummy_signed_payload(&self) -> Vec { @@ -47,8 +114,8 @@ impl Encodable for TransactionLegacyUnsigned { s.append(&self.value); s.append(&self.input.0); s.append(&chain_id); - s.append(&0_u8); - s.append(&0_u8); + s.append(&0u8); + s.append(&0u8); } else { s.begin_list(6); s.append(&self.nonce); @@ -64,7 +131,6 @@ impl Encodable for TransactionLegacyUnsigned { } } -/// See impl Decodable for TransactionLegacyUnsigned { fn decode(rlp: &rlp::Rlp) -> Result { Ok(TransactionLegacyUnsigned { @@ -95,16 +161,18 @@ impl Decodable for TransactionLegacyUnsigned { impl Encodable for TransactionLegacySigned { fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_legacy_unsigned; + s.begin_list(9); - s.append(&self.transaction_legacy_unsigned.nonce); - s.append(&self.transaction_legacy_unsigned.gas_price); - s.append(&self.transaction_legacy_unsigned.gas); - match self.transaction_legacy_unsigned.to { + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + match tx.to { Some(ref to) => s.append(to), None => s.append_empty_data(), }; - s.append(&self.transaction_legacy_unsigned.value); - s.append(&self.transaction_legacy_unsigned.input.0); + s.append(&tx.value); + s.append(&tx.input.0); s.append(&self.v); s.append(&self.r); @@ -112,6 +180,232 @@ impl Encodable for TransactionLegacySigned { } } +impl Encodable for AccessListEntry { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(2); + s.append(&self.address); + s.append_list(&self.storage_keys); + } +} + +impl Decodable for AccessListEntry { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(AccessListEntry { address: rlp.val_at(0)?, storage_keys: rlp.list_at(1)? }) + } +} + +/// See +impl Encodable for Transaction1559Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(9); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.max_priority_fee_per_gas); + s.append(&self.max_fee_per_gas); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + } +} + +/// See +impl Encodable for Transaction1559Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_1559_unsigned; + s.begin_list(12); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.max_priority_fee_per_gas); + s.append(&tx.max_fee_per_gas); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction1559Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction1559Signed { + transaction_1559_unsigned: { + Transaction1559Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + max_priority_fee_per_gas: rlp.val_at(2)?, + max_fee_per_gas: rlp.val_at(3)?, + gas: rlp.val_at(4)?, + to: { + let to = rlp.at(5)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(6)?, + input: Bytes(rlp.val_at(7)?), + access_list: rlp.list_at(8)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(9)?, + r: rlp.val_at(10)?, + s: rlp.val_at(11)?, + ..Default::default() + }) + } +} + +//See https://eips.ethereum.org/EIPS/eip-2930 +impl Encodable for Transaction2930Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(8); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + } +} + +//See https://eips.ethereum.org/EIPS/eip-2930 +impl Encodable for Transaction2930Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_2930_unsigned; + s.begin_list(11); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction2930Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction2930Signed { + transaction_2930_unsigned: { + Transaction2930Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + gas_price: rlp.val_at(2)?, + gas: rlp.val_at(3)?, + to: { + let to = rlp.at(4)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(5)?, + input: Bytes(rlp.val_at(6)?), + access_list: rlp.list_at(7)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(8)?, + r: rlp.val_at(9)?, + s: rlp.val_at(10)?, + ..Default::default() + }) + } +} + +//See https://eips.ethereum.org/EIPS/eip-4844 +impl Encodable for Transaction4844Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(11); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.max_priority_fee_per_gas); + s.append(&self.max_fee_per_gas); + s.append(&self.gas); + s.append(&self.to); + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + s.append(&self.max_fee_per_blob_gas); + s.append_list(&self.blob_versioned_hashes); + } +} + +//See https://eips.ethereum.org/EIPS/eip-4844 +impl Encodable for Transaction4844Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_4844_unsigned; + s.begin_list(14); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.max_priority_fee_per_gas); + s.append(&tx.max_fee_per_gas); + s.append(&tx.gas); + s.append(&tx.to); + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + s.append(&tx.max_fee_per_blob_gas); + s.append_list(&tx.blob_versioned_hashes); + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction4844Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction4844Signed { + transaction_4844_unsigned: { + Transaction4844Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + max_priority_fee_per_gas: rlp.val_at(2)?, + max_fee_per_gas: rlp.val_at(3)?, + gas: rlp.val_at(4)?, + to: rlp.val_at(5)?, + value: rlp.val_at(6)?, + input: Bytes(rlp.val_at(7)?), + access_list: rlp.list_at(8)?, + max_fee_per_blob_gas: rlp.val_at(9)?, + blob_versioned_hashes: rlp.list_at(10)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(11)?, + r: rlp.val_at(12)?, + s: rlp.val_at(13)?, + }) + } +} + /// See impl Decodable for TransactionLegacySigned { fn decode(rlp: &rlp::Rlp) -> Result { @@ -142,7 +436,7 @@ impl Decodable for TransactionLegacySigned { value: rlp.val_at(4)?, input: Bytes(rlp.val_at(5)?), chain_id: extract_chain_id(v).map(|v| v.into()), - r#type: Type0 {}, + r#type: TypeLegacy {}, } }, v, @@ -157,26 +451,118 @@ mod test { use super::*; #[test] - fn encode_decode_legacy_transaction_works() { - let tx = TransactionLegacyUnsigned { - chain_id: Some(596.into()), - gas: U256::from(21000), - nonce: U256::from(1), - gas_price: U256::from("0x640000006a"), - to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), - value: U256::from(123123), - input: Bytes(vec![]), - r#type: Type0, - }; + fn encode_decode_tx_works() { + let txs = [ + // Legacy + ( + "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808025a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x0", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "v": "0x25" + } + "# + ), + // type 1: EIP2930 + ( + "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x1", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + } + "# + ), + // type 2: EIP1559 + ( + "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "maxFeePerGas": "0x1", + "maxPriorityFeePerGas": "0x0", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x2", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" - let rlp_bytes = rlp::encode(&tx); - let decoded = rlp::decode::(&rlp_bytes).unwrap(); - assert_eq!(&tx, &decoded); + } + "# + ), + // type 3: EIP4844 + ( - let tx = Account::default().sign_transaction(tx); - let rlp_bytes = rlp::encode(&tx); - let decoded = rlp::decode::(&rlp_bytes).unwrap(); - assert_eq!(&tx, &decoded); + "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "blobVersionedHashes": ["0x0000000000000000000000000000000000000000000000000000000000000000"], + "chainId": "0x1", + "gas": "0x1e241", + "input": "0x", + "maxFeePerBlobGas": "0x0", + "maxFeePerGas": "0x1", + "maxPriorityFeePerGas": "0x2", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x3", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + } + "# + ) + ]; + + for (tx, json) in txs { + let raw_tx = hex::decode(tx).unwrap(); + let tx = TransactionSigned::decode(&raw_tx).unwrap(); + assert_eq!(tx.signed_payload(), raw_tx); + let expected_tx = serde_json::from_str(json).unwrap(); + assert_eq!(tx, expected_tx); + } } #[test] @@ -189,31 +575,12 @@ mod test { to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), value: U256::from(123123), input: Bytes(vec![]), - r#type: Type0, + r#type: TypeLegacy, }; - let signed_tx = Account::default().sign_transaction(tx.clone()); - let rlp_bytes = rlp::encode(&signed_tx); - assert_eq!(tx.dummy_signed_payload().len(), rlp_bytes.len()); - } - - #[test] - fn recover_address_works() { - let account = Account::default(); - - let unsigned_tx = TransactionLegacyUnsigned { - value: 200_000_000_000_000_000_000u128.into(), - gas_price: 100_000_000_200u64.into(), - gas: 100_107u32.into(), - nonce: 3.into(), - to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), - chain_id: Some(596.into()), - ..Default::default() - }; - - let tx = account.sign_transaction(unsigned_tx.clone()); - let recovered_address = tx.recover_eth_address().unwrap(); - - assert_eq!(account.address(), recovered_address); + let dummy_signed_payload = tx.dummy_signed_payload(); + let tx: TransactionUnsigned = tx.into(); + let payload = Account::default().sign_transaction(tx).signed_payload(); + assert_eq!(dummy_signed_payload.len(), payload.len()); } } diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index dd1a2642724d..1cf8d984b68b 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -16,7 +16,8 @@ // limitations under the License. //! Utility impl for the RPC types. use super::*; -use sp_core::U256; +use alloc::vec::Vec; +use sp_core::{H160, U256}; impl TransactionInfo { /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. @@ -138,3 +139,140 @@ fn logs_bloom_works() { .unwrap(); assert_eq!(receipt.logs_bloom, ReceiptInfo::logs_bloom(&receipt.logs)); } + +impl GenericTransaction { + /// Create a new [`GenericTransaction`] from a signed transaction. + pub fn from_signed(tx: TransactionSigned, from: Option) -> Self { + use TransactionSigned::*; + match tx { + TransactionLegacySigned(tx) => { + let tx = tx.transaction_legacy_unsigned; + GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: tx.chain_id, + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + ..Default::default() + } + }, + Transaction4844Signed(tx) => { + let tx = tx.transaction_4844_unsigned; + GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: Some(tx.to), + gas: Some(tx.gas), + gas_price: Some(tx.max_fee_per_blob_gas), + access_list: Some(tx.access_list), + blob_versioned_hashes: Some(tx.blob_versioned_hashes), + max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() + } + }, + Transaction1559Signed(tx) => { + let tx = tx.transaction_1559_unsigned; + GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() + } + }, + Transaction2930Signed(tx) => { + let tx = tx.transaction_2930_unsigned; + GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + ..Default::default() + } + }, + } + } + + /// Convert to a [`TransactionUnsigned`]. + pub fn try_into_unsigned(self) -> Result { + match self.r#type.unwrap_or_default().0 { + TYPE_LEGACY => Ok(TransactionLegacyUnsigned { + r#type: TypeLegacy {}, + chain_id: self.chain_id, + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + } + .into()), + TYPE_EIP1559 => Ok(Transaction1559Unsigned { + r#type: TypeEip1559 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + } + .into()), + TYPE_EIP2930 => Ok(Transaction2930Unsigned { + r#type: TypeEip2930 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + } + .into()), + TYPE_EIP4844 => Ok(Transaction4844Unsigned { + r#type: TypeEip4844 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to.unwrap_or_default(), + gas: self.gas.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_fee_per_blob_gas: self.max_fee_per_blob_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + blob_versioned_hashes: self.blob_versioned_hashes.unwrap_or_default(), + } + .into()), + _ => Err(()), + } + } +} diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index 48045a6acc86..5037ec05d881 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -17,7 +17,7 @@ //! Generated JSON-RPC types. #![allow(missing_docs)] -use super::{byte::*, Type0, Type1, Type2, Type3}; +use super::{byte::*, TypeEip1559, TypeEip2930, TypeEip4844, TypeLegacy}; use alloc::vec::Vec; use codec::{Decode, Encode}; use derive_more::{From, TryInto}; @@ -455,7 +455,7 @@ pub struct Transaction1559Unsigned { /// to address pub to: Option
, /// type - pub r#type: Type2, + pub r#type: TypeEip1559, /// value pub value: U256, } @@ -486,7 +486,7 @@ pub struct Transaction2930Unsigned { /// to address pub to: Option
, /// type - pub r#type: Type1, + pub r#type: TypeEip2930, /// value pub value: U256, } @@ -530,7 +530,7 @@ pub struct Transaction4844Unsigned { /// to address pub to: Address, /// type - pub r#type: Type3, + pub r#type: TypeEip4844, /// value pub value: U256, } @@ -557,7 +557,7 @@ pub struct TransactionLegacyUnsigned { /// to address pub to: Option
, /// type - pub r#type: Type0, + pub r#type: TypeLegacy, /// value pub value: U256, } @@ -622,8 +622,8 @@ pub struct Transaction1559Signed { pub v: Option, /// yParity /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. - #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] - pub y_parity: Option, + #[serde(rename = "yParity")] + pub y_parity: U256, } /// Signed 2930 Transaction @@ -661,8 +661,8 @@ pub struct Transaction4844Signed { pub s: U256, /// yParity /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. - #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] - pub y_parity: Option, + #[serde(rename = "yParity")] + pub y_parity: U256, } /// Signed Legacy Transaction diff --git a/substrate/frame/revive/src/evm/api/signature.rs b/substrate/frame/revive/src/evm/api/signature.rs index 957d50c8e324..9f39b92b461e 100644 --- a/substrate/frame/revive/src/evm/api/signature.rs +++ b/substrate/frame/revive/src/evm/api/signature.rs @@ -15,49 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. //! Ethereum signature utilities -use super::{TransactionLegacySigned, TransactionLegacyUnsigned}; -use rlp::Encodable; +use super::*; use sp_core::{H160, U256}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; -impl TransactionLegacyUnsigned { - /// Recover the Ethereum address, from an RLP encoded transaction and a 65 bytes signature. - pub fn recover_eth_address(rlp_encoded: &[u8], signature: &[u8; 65]) -> Result { - let hash = keccak_256(rlp_encoded); - let mut addr = H160::default(); - let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; - addr.assign_from_slice(&keccak_256(&pk[..])[12..]); - - Ok(addr) - } -} - impl TransactionLegacySigned { - /// Create a signed transaction from an [`TransactionLegacyUnsigned`] and a signature. - pub fn from( - transaction_legacy_unsigned: TransactionLegacyUnsigned, - signature: &[u8; 65], - ) -> TransactionLegacySigned { - let r = U256::from_big_endian(&signature[..32]); - let s = U256::from_big_endian(&signature[32..64]); - let recovery_id = signature[64] as u32; - let v = transaction_legacy_unsigned - .chain_id - .map(|chain_id| chain_id * 2 + 35 + recovery_id) - .unwrap_or_else(|| U256::from(27) + recovery_id); - - TransactionLegacySigned { transaction_legacy_unsigned, r, s, v } - } - - /// Get the raw 65 bytes signature from the signed transaction. - pub fn raw_signature(&self) -> Result<[u8; 65], ()> { - let mut s = [0u8; 65]; - self.r.write_as_big_endian(s[0..32].as_mut()); - self.s.write_as_big_endian(s[32..64].as_mut()); - s[64] = self.extract_recovery_id().ok_or(())?; - Ok(s) - } - /// Get the recovery ID from the signed transaction. /// See https://eips.ethereum.org/EIPS/eip-155 fn extract_recovery_id(&self) -> Option { @@ -71,10 +33,154 @@ impl TransactionLegacySigned { self.v.try_into().ok() } } +} + +impl TransactionUnsigned { + /// Extract the unsigned transaction from a signed transaction. + pub fn from_signed(tx: TransactionSigned) -> Self { + match tx { + TransactionSigned::TransactionLegacySigned(signed) => + Self::TransactionLegacyUnsigned(signed.transaction_legacy_unsigned), + TransactionSigned::Transaction4844Signed(signed) => + Self::Transaction4844Unsigned(signed.transaction_4844_unsigned), + TransactionSigned::Transaction1559Signed(signed) => + Self::Transaction1559Unsigned(signed.transaction_1559_unsigned), + TransactionSigned::Transaction2930Signed(signed) => + Self::Transaction2930Unsigned(signed.transaction_2930_unsigned), + } + } + + /// Create a signed transaction from an [`TransactionUnsigned`] and a signature. + pub fn with_signature(self, signature: [u8; 65]) -> TransactionSigned { + let r = U256::from_big_endian(&signature[..32]); + let s = U256::from_big_endian(&signature[32..64]); + let recovery_id = signature[64]; + + match self { + TransactionUnsigned::Transaction2930Unsigned(transaction_2930_unsigned) => + Transaction2930Signed { + transaction_2930_unsigned, + r, + s, + v: None, + y_parity: U256::from(recovery_id), + } + .into(), + TransactionUnsigned::Transaction1559Unsigned(transaction_1559_unsigned) => + Transaction1559Signed { + transaction_1559_unsigned, + r, + s, + v: None, + y_parity: U256::from(recovery_id), + } + .into(), + + TransactionUnsigned::Transaction4844Unsigned(transaction_4844_unsigned) => + Transaction4844Signed { + transaction_4844_unsigned, + r, + s, + y_parity: U256::from(recovery_id), + } + .into(), + + TransactionUnsigned::TransactionLegacyUnsigned(transaction_legacy_unsigned) => { + let v = transaction_legacy_unsigned + .chain_id + .map(|chain_id| { + chain_id + .saturating_mul(U256::from(2)) + .saturating_add(U256::from(35u32 + recovery_id as u32)) + }) + .unwrap_or_else(|| U256::from(27u32 + recovery_id as u32)); + + TransactionLegacySigned { transaction_legacy_unsigned, r, s, v }.into() + }, + } + } +} + +impl TransactionSigned { + /// Get the raw 65 bytes signature from the signed transaction. + pub fn raw_signature(&self) -> Result<[u8; 65], ()> { + use TransactionSigned::*; + let (r, s, v) = match self { + TransactionLegacySigned(tx) => (tx.r, tx.s, tx.extract_recovery_id().ok_or(())?), + Transaction4844Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + Transaction1559Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + Transaction2930Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + }; + let mut sig = [0u8; 65]; + r.write_as_big_endian(sig[0..32].as_mut()); + s.write_as_big_endian(sig[32..64].as_mut()); + sig[64] = v; + Ok(sig) + } - /// Recover the Ethereum address from the signed transaction. + /// Recover the Ethereum address, from a signed transaction. pub fn recover_eth_address(&self) -> Result { - let rlp_encoded = self.transaction_legacy_unsigned.rlp_bytes(); - TransactionLegacyUnsigned::recover_eth_address(&rlp_encoded, &self.raw_signature()?) + use TransactionSigned::*; + + let mut s = rlp::RlpStream::new(); + match self { + TransactionLegacySigned(tx) => { + let tx = &tx.transaction_legacy_unsigned; + s.append(tx); + }, + Transaction4844Signed(tx) => { + let tx = &tx.transaction_4844_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction1559Signed(tx) => { + let tx = &tx.transaction_1559_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction2930Signed(tx) => { + let tx = &tx.transaction_2930_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + } + let bytes = s.out().to_vec(); + let signature = self.raw_signature()?; + + let hash = keccak_256(&bytes); + let mut addr = H160::default(); + let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; + addr.assign_from_slice(&keccak_256(&pk[..])[12..]); + Ok(addr) + } +} + +#[test] +fn sign_and_recover_work() { + use crate::evm::TransactionUnsigned; + let txs = [ + // Legacy + "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808026a07b2e762a17a71a46b422e60890a04512cf0d907ccf6b78b5bd6e6977efdc2bf5a01ea673d50bbe7c2236acb498ceb8346a8607c941f0b8cbcde7cf439aa9369f1f", + //// type 1: EIP2930 + "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0c45a61b3d1d00169c649e7326e02857b850efb96e587db4b9aad29afc80d0752a070ae1eb47ab4097dbed2f19172ae286492621b46ac737ee6c32fb18a00c94c9c", + // type 2: EIP1559 + "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a055d72bbc3047d4b9d3e4b8099f187143202407746118204cc2e0cb0c85a68baea04f6ef08a1418c70450f53398d9f0f2d78d9e9d6b8a80cba886b67132c4a744f2", + // type 3: EIP4844 + "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000001a0672b8bac466e2cf1be3148c030988d40d582763ecebbc07700dfc93bb070d8a4a07c635887005b11cb58964c04669ac2857fa633aa66f662685dadfd8bcacb0f21", + ]; + let account = Account::from_secret_key(hex_literal::hex!( + "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" + )); + + for tx in txs { + let raw_tx = hex::decode(tx).unwrap(); + let tx = TransactionSigned::decode(&raw_tx).unwrap(); + + let address = tx.recover_eth_address(); + assert_eq!(address.unwrap(), account.address()); + + let unsigned = TransactionUnsigned::from_signed(tx.clone()); + let signed = account.sign_transaction(unsigned); + assert_eq!(tx, signed); } } diff --git a/substrate/frame/revive/src/evm/api/type_id.rs b/substrate/frame/revive/src/evm/api/type_id.rs index 7434ca6e9b7f..c6e018a379b3 100644 --- a/substrate/frame/revive/src/evm/api/type_id.rs +++ b/substrate/frame/revive/src/evm/api/type_id.rs @@ -17,6 +17,7 @@ //! Ethereum Typed Transaction types use super::Byte; use codec::{Decode, Encode}; +use paste::paste; use rlp::Decodable; use scale_info::TypeInfo; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -29,8 +30,14 @@ macro_rules! transaction_type { #[derive(Clone, Default, Debug, Eq, PartialEq)] pub struct $name; + // upper case const name + paste! { + #[doc = concat!("Transaction value for type identifier: ", $value)] + pub const [<$name:snake:upper>]: u8 = $value; + } + impl $name { - /// Get the value of the type + /// Convert to u8 pub fn value(&self) -> u8 { $value } @@ -107,7 +114,12 @@ macro_rules! transaction_type { }; } -transaction_type!(Type0, 0); -transaction_type!(Type1, 1); -transaction_type!(Type2, 2); -transaction_type!(Type3, 3); +transaction_type!(TypeLegacy, 0); +transaction_type!(TypeEip2930, 1); +transaction_type!(TypeEip1559, 2); +transaction_type!(TypeEip4844, 3); + +#[test] +fn transaction_type() { + assert_eq!(TYPE_EIP2930, 1u8); +} diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 21294fdf6baa..40c210304ca2 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Runtime types for integrating `pallet-revive` with the EVM. use crate::{ - evm::api::{TransactionLegacySigned, TransactionLegacyUnsigned}, + evm::api::{GenericTransaction, TransactionSigned}, AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET, }; use codec::{Decode, Encode}; @@ -293,7 +293,7 @@ pub trait EthExtra { CallOf: From>, ::Hash: frame_support::traits::IsType, { - let tx = rlp::decode::(&payload).map_err(|err| { + let tx = TransactionSigned::decode(&payload).map_err(|err| { log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); InvalidTransaction::Call })?; @@ -305,33 +305,33 @@ pub trait EthExtra { let signer = ::AddressMapper::to_fallback_account_id(&signer); - let TransactionLegacyUnsigned { nonce, chain_id, to, value, input, gas, gas_price, .. } = - tx.transaction_legacy_unsigned; + let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } = + GenericTransaction::from_signed(tx, None); if chain_id.unwrap_or_default() != ::ChainId::get().into() { log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}"); return Err(InvalidTransaction::Call); } - let value = crate::Pallet::::convert_evm_to_native(value).map_err(|err| { - log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}"); - InvalidTransaction::Call - })?; + let value = crate::Pallet::::convert_evm_to_native(value.unwrap_or_default()) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}"); + InvalidTransaction::Call + })?; + let data = input.unwrap_or_default().0; let call = if let Some(dest) = to { crate::Call::call:: { dest, value, gas_limit, storage_deposit_limit, - data: input.0, + data, } } else { - let blob = match polkavm::ProgramBlob::blob_length(&input.0) { - Some(blob_len) => blob_len - .try_into() - .ok() - .and_then(|blob_len| (input.0.split_at_checked(blob_len))), + let blob = match polkavm::ProgramBlob::blob_length(&data) { + Some(blob_len) => + blob_len.try_into().ok().and_then(|blob_len| (data.split_at_checked(blob_len))), _ => None, }; @@ -350,18 +350,18 @@ pub trait EthExtra { } }; - let nonce = nonce.try_into().map_err(|_| InvalidTransaction::Call)?; + let nonce = nonce.unwrap_or_default().try_into().map_err(|_| InvalidTransaction::Call)?; // Fees calculated with the fixed `GAS_PRICE` // When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE` let eth_fee_no_tip = U256::from(GAS_PRICE) - .saturating_mul(gas) + .saturating_mul(gas.unwrap_or_default()) .try_into() .map_err(|_| InvalidTransaction::Call)?; // Fees with the actual gas_price from the transaction. - let eth_fee: BalanceOf = U256::from(gas_price) - .saturating_mul(gas) + let eth_fee: BalanceOf = U256::from(gas_price.unwrap_or_default()) + .saturating_mul(gas.unwrap_or_default()) .try_into() .map_err(|_| InvalidTransaction::Call)?; @@ -414,7 +414,6 @@ mod test { }; use frame_support::{error::LookupError, traits::fungible::Mutate}; use pallet_revive_fixtures::compile_module; - use rlp::Encodable; use sp_runtime::{ traits::{Checkable, DispatchTransaction}, MultiAddress, MultiSignature, @@ -523,7 +522,7 @@ mod test { 100_000_000_000_000, ); - let payload = account.sign_transaction(tx).rlp_bytes().to_vec(); + let payload = account.sign_transaction(tx.into()).signed_payload(); let call = RuntimeCall::Contracts(crate::Call::eth_transact { payload, gas_limit, diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index caecf07c4071..b55854e2eec5 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -768,7 +768,7 @@ pub mod pallet { /// /// # Parameters /// - /// * `payload`: The RLP-encoded [`crate::evm::TransactionLegacySigned`]. + /// * `payload`: The encoded [`crate::evm::TransactionSigned`]. /// * `gas_limit`: The gas limit enforced during contract execution. /// * `storage_deposit_limit`: The maximum balance that can be charged to the caller for /// storage usage. From 7c5224cb01710d0c14c87bf3463cc79e49b3e7b5 Mon Sep 17 00:00:00 2001 From: gupnik Date: Fri, 22 Nov 2024 10:16:45 +0530 Subject: [PATCH 05/41] Adds `BlockNumberProvider` in multisig, proxy and nft pallets (#5723) Step in https://github.com/paritytech/polkadot-sdk/issues/3268 This PR adds the ability for these pallets to specify their source of the block number. This is useful when these pallets are migrated from the relay chain to a parachain and vice versa. This change is backwards compatible: 1. If the `BlockNumberProvider` continues to use the system pallet's block number 2. When a pallet deployed on the relay chain is moved to a parachain, but still uses the relay chain's block number However, we would need migrations if the deployed pallets are upgraded on an existing parachain, and the `BlockNumberProvider` uses the relay chain block number. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- .../assets/asset-hub-rococo/src/lib.rs | 3 ++ .../assets/asset-hub-westend/src/lib.rs | 3 ++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 + .../collectives-westend/src/lib.rs | 2 ++ .../contracts/contracts-rococo/src/lib.rs | 1 + .../coretime/coretime-rococo/src/lib.rs | 2 ++ .../coretime/coretime-westend/src/lib.rs | 2 ++ .../runtimes/people/people-rococo/src/lib.rs | 2 ++ .../runtimes/people/people-westend/src/lib.rs | 2 ++ polkadot/runtime/rococo/src/lib.rs | 2 ++ polkadot/runtime/westend/src/lib.rs | 2 ++ prdoc/pr_5723.prdoc | 24 +++++++++++++++ substrate/bin/node/runtime/src/lib.rs | 3 ++ substrate/frame/contracts/src/tests.rs | 1 + substrate/frame/multisig/src/lib.rs | 10 +++++-- substrate/frame/multisig/src/tests.rs | 1 + .../frame/nft-fractionalization/src/mock.rs | 1 + substrate/frame/nfts/src/benchmarking.rs | 22 +++++++------- .../frame/nfts/src/features/approvals.rs | 6 ++-- .../frame/nfts/src/features/atomic_swap.rs | 8 ++--- .../frame/nfts/src/features/attributes.rs | 2 +- .../nfts/src/features/create_delete_item.rs | 2 +- substrate/frame/nfts/src/features/settings.rs | 6 +--- substrate/frame/nfts/src/lib.rs | 29 ++++++++++--------- substrate/frame/nfts/src/mock.rs | 1 + substrate/frame/nfts/src/types.rs | 12 ++++---- substrate/frame/proxy/src/benchmarking.rs | 6 ++-- substrate/frame/proxy/src/lib.rs | 12 ++++++-- substrate/frame/proxy/src/tests.rs | 1 + substrate/frame/revive/src/tests.rs | 1 + substrate/frame/safe-mode/src/mock.rs | 1 + substrate/frame/src/lib.rs | 4 +-- substrate/frame/tx-pause/src/mock.rs | 1 + 34 files changed, 125 insertions(+), 52 deletions(-) create mode 100644 prdoc/pr_5723.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 2f9d83bd9d0b..bc48c2d805fd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -467,6 +467,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -652,6 +653,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -918,6 +920,7 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 2206aea78ec2..cafea3b6ff8b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -466,6 +466,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -651,6 +652,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -912,6 +914,7 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index ff7af475f5e2..3f3316d0be49 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -537,6 +537,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 065400016791..65e7d291dc37 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -513,6 +513,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index c3e105a84fb6..0ee3a4068718 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -258,6 +258,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -382,6 +383,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index f661a8bdccfe..2951662a979b 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -268,6 +268,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 31700c2e25ff..3f3126b749d8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -445,6 +445,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -577,6 +578,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 1f0f54884fa8..098a17cc9984 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -446,6 +446,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -578,6 +579,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 25356a84806d..7921030f2bb8 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -407,6 +407,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -520,6 +521,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 1c5183636c49..19a64ab8d6e8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -406,6 +406,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -519,6 +520,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 96a97faa4750..5da9da86f02e 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -767,6 +767,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -971,6 +972,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7a5562cc98c1..9f0b701f20be 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1004,6 +1004,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1204,6 +1205,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} diff --git a/prdoc/pr_5723.prdoc b/prdoc/pr_5723.prdoc new file mode 100644 index 000000000000..ded5f9cebd1d --- /dev/null +++ b/prdoc/pr_5723.prdoc @@ -0,0 +1,24 @@ +title: Adds `BlockNumberProvider` in multisig, proxy and nft pallets + +doc: + - audience: Runtime Dev + description: | + This PR adds the ability for these pallets to specify their source of the block number. + This is useful when these pallets are migrated from the relay chain to a parachain and + vice versa. + + This change is backwards compatible: + 1. If the `BlockNumberProvider` continues to use the system pallet's block number + 2. When a pallet deployed on the relay chain is moved to a parachain, but still uses the + relay chain's block number + + However, we would need migrations if the deployed pallets are upgraded on an existing parachain, + and the `BlockNumberProvider` uses the relay chain block number. + +crates: + - name: pallet-multisig + bump: major + - name: pallet-proxy + bump: major + - name: pallet-nfts + bump: major diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index e68e04840776..bff263548087 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -392,6 +392,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -479,6 +480,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -2048,6 +2050,7 @@ impl pallet_nfts::Config for Runtime { type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; type Locker = (); + type BlockNumberProvider = frame_system::Pallet; } impl pallet_transaction_storage::Config for Runtime { diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index c3b6e3273f34..b01d0aa4fa48 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -399,6 +399,7 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_dummy::Config for Test {} diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index 4a30b5c119b9..869b4adc2adc 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -77,6 +77,9 @@ macro_rules! log { type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. @@ -153,6 +156,9 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } /// The in-code storage version. @@ -235,7 +241,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet {} + impl Hooks> for Pallet {} #[pallet::call] impl Pallet { @@ -626,7 +632,7 @@ impl Pallet { /// The current `Timepoint`. pub fn timepoint() -> Timepoint> { Timepoint { - height: >::block_number(), + height: T::BlockNumberProvider::current_block_number(), index: >::extrinsic_index().unwrap_or_default(), } } diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index c5a98845270c..4065ce73f905 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -66,6 +66,7 @@ impl Config for Test { type DepositFactor = ConstU64<1>; type MaxSignatories = ConstU32<3>; type WeightInfo = (); + type BlockNumberProvider = frame_system::Pallet; } use pallet_balances::Call as BalancesCall; diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 50b41b5fc64e..762c1776e30f 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -115,6 +115,7 @@ impl pallet_nfts::Config for Test { type OffchainSignature = Signature; type OffchainPublic = AccountPublic; type WeightInfo = (); + type BlockNumberProvider = frame_system::Pallet; pallet_nfts::runtime_benchmarks_enabled! { type Helper = (); } diff --git a/substrate/frame/nfts/src/benchmarking.rs b/substrate/frame/nfts/src/benchmarking.rs index bc81096b459d..81828be5fa09 100644 --- a/substrate/frame/nfts/src/benchmarking.rs +++ b/substrate/frame/nfts/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{EnsureOrigin, Get, UnfilteredDispatchable}, BoundedVec, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; +use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::{Bounded, One}; use crate::Pallet as Nfts; @@ -577,7 +577,7 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); @@ -589,7 +589,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) verify { @@ -602,7 +602,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item) verify { @@ -712,10 +712,10 @@ benchmarks_instance_pallet! { let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; let duration = T::MaxDeadlineDuration::get(); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(caller.clone()), collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration) verify { - let current_block = frame_system::Pallet::::block_number(); + let current_block = T::BlockNumberProvider::current_block_number(); assert_last_event::(Event::SwapCreated { offered_collection: collection, offered_item: item1, @@ -735,7 +735,7 @@ benchmarks_instance_pallet! { let duration = T::MaxDeadlineDuration::get(); let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); Nfts::::create_swap(origin, collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration)?; }: _(SystemOrigin::Signed(caller.clone()), collection, item1) verify { @@ -761,7 +761,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let origin = SystemOrigin::Signed(caller.clone()); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; Nfts::::create_swap( origin.clone().into(), @@ -774,7 +774,7 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(target.clone()), collection, item2, collection, item1, Some(price_with_direction.clone())) verify { - let current_block = frame_system::Pallet::::block_number(); + let current_block = T::BlockNumberProvider::current_block_number(); assert_last_event::(Event::SwapClaimed { sent_collection: collection, sent_item: item2, @@ -822,7 +822,7 @@ benchmarks_instance_pallet! { let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(target.clone()), Box::new(mint_data), signature.into(), caller) verify { let metadata: BoundedVec<_, _> = metadata.try_into().unwrap(); @@ -865,7 +865,7 @@ benchmarks_instance_pallet! { let message = Encode::encode(&pre_signed_data); let signature = T::Helper::sign(&signer_public, &message); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(item_owner.clone()), pre_signed_data, signature.into(), signer.clone()) verify { assert_last_event::( diff --git a/substrate/frame/nfts/src/features/approvals.rs b/substrate/frame/nfts/src/features/approvals.rs index 053fa67163b9..4738f69f83c4 100644 --- a/substrate/frame/nfts/src/features/approvals.rs +++ b/substrate/frame/nfts/src/features/approvals.rs @@ -46,7 +46,7 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, delegate: T::AccountId, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Approvals), @@ -65,7 +65,7 @@ impl, I: 'static> Pallet { ensure!(check_origin == details.owner, Error::::NoPermission); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let deadline = maybe_deadline.map(|d| d.saturating_add(now)); details @@ -111,7 +111,7 @@ impl, I: 'static> Pallet { let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; let is_past_deadline = if let Some(deadline) = maybe_deadline { - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); now > *deadline } else { false diff --git a/substrate/frame/nfts/src/features/atomic_swap.rs b/substrate/frame/nfts/src/features/atomic_swap.rs index 830283b73c2a..03ebd35b81b2 100644 --- a/substrate/frame/nfts/src/features/atomic_swap.rs +++ b/substrate/frame/nfts/src/features/atomic_swap.rs @@ -53,7 +53,7 @@ impl, I: 'static> Pallet { desired_collection_id: T::CollectionId, maybe_desired_item_id: Option, maybe_price: Option>>, - duration: frame_system::pallet_prelude::BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Swaps), @@ -76,7 +76,7 @@ impl, I: 'static> Pallet { ), }; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let deadline = duration.saturating_add(now); PendingSwapOf::::insert( @@ -119,7 +119,7 @@ impl, I: 'static> Pallet { let swap = PendingSwapOf::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownSwap)?; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if swap.deadline > now { let item = Item::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownItem)?; @@ -187,7 +187,7 @@ impl, I: 'static> Pallet { ensure!(desired_item == send_item_id, Error::::UnknownSwap); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(now <= swap.deadline, Error::::DeadlineExpired); if let Some(ref price) = swap.price { diff --git a/substrate/frame/nfts/src/features/attributes.rs b/substrate/frame/nfts/src/features/attributes.rs index 28f7bd2c58ce..2cd09f7d2193 100644 --- a/substrate/frame/nfts/src/features/attributes.rs +++ b/substrate/frame/nfts/src/features/attributes.rs @@ -225,7 +225,7 @@ impl, I: 'static> Pallet { Error::::MaxAttributesLimitReached ); - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); let item_details = diff --git a/substrate/frame/nfts/src/features/create_delete_item.rs b/substrate/frame/nfts/src/features/create_delete_item.rs index 37f64ae1b1b9..57366127f142 100644 --- a/substrate/frame/nfts/src/features/create_delete_item.rs +++ b/substrate/frame/nfts/src/features/create_delete_item.rs @@ -145,7 +145,7 @@ impl, I: 'static> Pallet { ensure!(account == mint_to, Error::::WrongOrigin); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); ensure!( diff --git a/substrate/frame/nfts/src/features/settings.rs b/substrate/frame/nfts/src/features/settings.rs index d4f7533ffa4e..48719ae2c20e 100644 --- a/substrate/frame/nfts/src/features/settings.rs +++ b/substrate/frame/nfts/src/features/settings.rs @@ -96,11 +96,7 @@ impl, I: 'static> Pallet { pub(crate) fn do_update_mint_settings( maybe_check_origin: Option, collection: T::CollectionId, - mint_settings: MintSettings< - BalanceOf, - frame_system::pallet_prelude::BlockNumberFor, - T::CollectionId, - >, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { if let Some(check_origin) = &maybe_check_origin { ensure!( diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 4e5493a3c755..346ad162c503 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::traits::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, + traits::{BlockNumberProvider, IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, RuntimeDebug, }; @@ -76,7 +76,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; - use frame_system::pallet_prelude::*; + use frame_system::{ensure_signed, pallet_prelude::OriginFor}; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -210,7 +210,7 @@ pub mod pallet { /// The max duration in blocks for deadlines. #[pallet::constant] - type MaxDeadlineDuration: Get>; + type MaxDeadlineDuration: Get>; /// The max number of attributes a user could set per call. #[pallet::constant] @@ -242,6 +242,9 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } /// Details of a collection. @@ -388,7 +391,7 @@ pub mod pallet { T::CollectionId, T::ItemId, PriceWithDirection>, - BlockNumberFor, + BlockNumberFor, >, OptionQuery, >; @@ -459,7 +462,7 @@ pub mod pallet { item: T::ItemId, owner: T::AccountId, delegate: T::AccountId, - deadline: Option>, + deadline: Option>, }, /// An approval for a `delegate` account to transfer the `item` of an item /// `collection` was cancelled by its `owner`. @@ -554,7 +557,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap was cancelled. SwapCancelled { @@ -563,7 +566,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap has been claimed. SwapClaimed { @@ -574,7 +577,7 @@ pub mod pallet { received_item: T::ItemId, received_item_owner: T::AccountId, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// New attributes have been set for an `item` of the `collection`. PreSignedAttributesSet { @@ -857,7 +860,7 @@ pub mod pallet { item_config, |collection_details, collection_config| { let mint_settings = collection_config.mint_settings; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if let Some(start_block) = mint_settings.start_block { ensure!(start_block <= now, Error::::MintNotStarted); @@ -1029,7 +1032,7 @@ pub mod pallet { let deadline = details.approvals.get(&origin).ok_or(Error::::NoPermission)?; if let Some(d) = deadline { - let block_number = frame_system::Pallet::::block_number(); + let block_number = T::BlockNumberProvider::current_block_number(); ensure!(block_number <= *d, Error::::ApprovalExpired); } } @@ -1290,7 +1293,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, delegate: AccountIdLookupOf, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1713,7 +1716,7 @@ pub mod pallet { pub fn update_mint_settings( origin: OriginFor, collection: T::CollectionId, - mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1809,7 +1812,7 @@ pub mod pallet { desired_collection: T::CollectionId, maybe_desired_item: Option, maybe_price: Option>>, - duration: BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { let origin = ensure_signed(origin)?; Self::do_create_swap( diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 5b589f591ca3..291c3c081334 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -92,6 +92,7 @@ impl Config for Test { type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index d67fb404ea79..3ab85993473a 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -27,9 +27,11 @@ use frame_support::{ traits::Get, BoundedBTreeMap, BoundedBTreeSet, }; -use frame_system::pallet_prelude::BlockNumberFor; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; +pub type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A type alias for handling balance deposits. pub type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; @@ -39,7 +41,7 @@ pub type CollectionDetailsFor = /// A type alias for keeping track of approvals used by a single item. pub type ApprovalsOf = BoundedBTreeMap< ::AccountId, - Option>, + Option>, >::ApprovalsLimit, >; /// A type alias for keeping track of approvals for an item's attributes. @@ -70,13 +72,13 @@ pub type ItemTipOf = ItemTip< >; /// A type alias for the settings configuration of a collection. pub type CollectionConfigFor = - CollectionConfig, BlockNumberFor, >::CollectionId>; + CollectionConfig, BlockNumberFor, >::CollectionId>; /// A type alias for the pre-signed minting configuration for a specified collection. pub type PreSignedMintOf = PreSignedMint< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, BalanceOf, >; /// A type alias for the pre-signed minting configuration on the attribute level of an item. @@ -84,7 +86,7 @@ pub type PreSignedAttributesOf = PreSignedAttributes< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, >; /// Information about a collection. diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index eebb506bf374..b72f53af8e72 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -22,7 +22,9 @@ use super::*; use crate::Pallet as Proxy; use alloc::{boxed::Box, vec}; -use frame::benchmarking::prelude::*; +use frame::benchmarking::prelude::{ + account, benchmarks, impl_test_function, whitelisted_caller, BenchmarkError, RawOrigin, +}; const SEED: u32 = 0; @@ -317,7 +319,7 @@ mod benchmarks { BlockNumberFor::::zero(), 0, )?; - let height = frame_system::Pallet::::block_number(); + let height = T::BlockNumberProvider::current_block_number(); let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index cc8aeedcc5f9..cc21db7469b2 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -47,6 +47,9 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The parameters under which a particular account has a proxy relationship with some other @@ -163,6 +166,9 @@ pub mod pallet { /// into a pre-existing storage value. #[pallet::constant] type AnnouncementDepositFactor: Get>; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } #[pallet::call] @@ -379,7 +385,7 @@ pub mod pallet { let announcement = Announcement { real: real.clone(), call_hash, - height: frame_system::Pallet::::block_number(), + height: T::BlockNumberProvider::current_block_number(), }; Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { @@ -490,7 +496,7 @@ pub mod pallet { let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); Self::edit_announcements(&delegate, |ann| { ann.real != real || ann.call_hash != call_hash || @@ -626,7 +632,7 @@ impl Pallet { ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( - frame_system::Pallet::::block_number(), + T::BlockNumberProvider::current_block_number(), frame_system::Pallet::::extrinsic_index().unwrap_or_default(), ) }); diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index 5baf9bb9e838..afc668188e6c 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -119,6 +119,7 @@ impl Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 177b8dff706b..34afe8aabfe6 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -416,6 +416,7 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index ec1ad8249514..aaf3456272fa 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -138,6 +138,7 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } /// The calls that can always bypass safe-mode. diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 0ca36ca8545a..03d815e349df 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -219,8 +219,8 @@ pub mod prelude { /// Runtime traits #[doc(no_inline)] pub use sp_runtime::traits::{ - Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, Saturating, StaticLookup, - TrailingZeroInput, + BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, + Saturating, StaticLookup, TrailingZeroInput, }; /// Other error/result types for runtime diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 84ce45e83528..fd9b3b552ccd 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -105,6 +105,7 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { From 08ec8cdbfdbdd29c7921a8a141187e04354a449e Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 22 Nov 2024 14:19:40 +0100 Subject: [PATCH 06/41] Only mess with coretime if we are registering an actual parachain. (#6554) Co-authored-by: Robert Co-authored-by: ordian --- polkadot/runtime/common/src/paras_sudo_wrapper.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index af93c70b4783..a93c209e9279 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -24,7 +24,7 @@ pub use pallet::*; use polkadot_primitives::Id as ParaId; use polkadot_runtime_parachains::{ configuration, dmp, hrmp, - paras::{self, AssignCoretime, ParaGenesisArgs}, + paras::{self, AssignCoretime, ParaGenesisArgs, ParaKind}, ParaLifecycle, }; @@ -80,10 +80,15 @@ pub mod pallet { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; + + let assign_coretime = genesis.para_kind == ParaKind::Parachain; + polkadot_runtime_parachains::schedule_para_initialize::(id, genesis) .map_err(|_| Error::::ParaAlreadyExists)?; - T::AssignCoretime::assign_coretime(id)?; + if assign_coretime { + T::AssignCoretime::assign_coretime(id)?; + } Ok(()) } From 1e3b8e1639c1cf784eabf0a9afcab1f3987e0ca4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 24 Nov 2024 08:36:16 +0000 Subject: [PATCH 07/41] Notify telemetry only every second about the tx pool status (#6605) Before this was done for every imported transaction. When a lot of transactions got imported, the import notification channel was filled. The underlying problem was that the `status` call is read locking the `validated_pool` which will be write locked by the internal submitting logic. Thus, the submitting and status reading was interferring which each other. --------- Co-authored-by: GitHub Action --- Cargo.lock | 1 + cumulus/client/service/Cargo.toml | 1 + prdoc/pr_6605.prdoc | 10 +++++ substrate/client/service/src/builder.rs | 58 +++++++++++++++++-------- 4 files changed, 53 insertions(+), 17 deletions(-) create mode 100644 prdoc/pr_6605.prdoc diff --git a/Cargo.lock b/Cargo.lock index 330c2563d976..c79adee6f38e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4680,6 +4680,7 @@ dependencies = [ "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "futures", + "futures-timer", "polkadot-primitives 7.0.0", "sc-client-api", "sc-consensus", diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 8e9e41ca89dc..0a77b465d96a 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -11,6 +11,7 @@ workspace = true [dependencies] futures = { workspace = true } +futures-timer = { workspace = true } # Substrate sc-client-api = { workspace = true, default-features = true } diff --git a/prdoc/pr_6605.prdoc b/prdoc/pr_6605.prdoc new file mode 100644 index 000000000000..2adb1d8aee35 --- /dev/null +++ b/prdoc/pr_6605.prdoc @@ -0,0 +1,10 @@ +title: Notify telemetry only every second about the tx pool status +doc: +- audience: Node Operator + description: |- + Before this was done for every imported transaction. When a lot of transactions got imported, the import notification channel was filled. The underlying problem was that the `status` call is read locking the `validated_pool` which will be write locked by the internal submitting logic. Thus, the submitting and status reading was interferring which each other. +crates: +- name: cumulus-client-service + bump: patch +- name: sc-service + bump: patch diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 68ac94539df8..ac9371a8941b 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -25,7 +25,7 @@ use crate::{ start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; -use futures::{future::ready, FutureExt, StreamExt}; +use futures::{select, FutureExt, StreamExt}; use jsonrpsee::RpcModule; use log::info; use prometheus_endpoint::Registry; @@ -90,7 +90,11 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; -use std::{str::FromStr, sync::Arc, time::SystemTime}; +use std::{ + str::FromStr, + sync::Arc, + time::{Duration, SystemTime}, +}; /// Full client type. pub type TFullClient = @@ -577,22 +581,42 @@ pub async fn propagate_transaction_notifications( Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, { + const TELEMETRY_INTERVAL: Duration = Duration::from_secs(1); + // transaction notifications - transaction_pool - .import_notification_stream() - .for_each(move |hash| { - tx_handler_controller.propagate_transaction(hash); - let status = transaction_pool.status(); - telemetry!( - telemetry; - SUBSTRATE_INFO; - "txpool.import"; - "ready" => status.ready, - "future" => status.future, - ); - ready(()) - }) - .await; + let mut notifications = transaction_pool.import_notification_stream().fuse(); + let mut timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); + let mut tx_imported = false; + + loop { + select! { + notification = notifications.next() => { + let Some(hash) = notification else { return }; + + tx_handler_controller.propagate_transaction(hash); + + tx_imported = true; + }, + _ = timer => { + timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); + + if !tx_imported { + continue; + } + + tx_imported = false; + let status = transaction_pool.status(); + + telemetry!( + telemetry; + SUBSTRATE_INFO; + "txpool.import"; + "ready" => status.ready, + "future" => status.future, + ); + } + } + } } /// Initialize telemetry with provided configuration and return telemetry handle From 6da7d36e060c6e5fd5a20395470db6910037a640 Mon Sep 17 00:00:00 2001 From: gupnik Date: Mon, 25 Nov 2024 10:56:21 +0530 Subject: [PATCH 08/41] Fixes cfg attributes in runtime macro (#6410) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes https://github.com/paritytech/polkadot-sdk/issues/6209 This PR adds the support for cfg attributes in the runtime macro. --------- Co-authored-by: Bastian Köcher --- .../procedural/src/runtime/parse/pallet.rs | 15 ++++- substrate/frame/support/test/tests/pallet.rs | 59 +++++++++++++++---- 2 files changed, 59 insertions(+), 15 deletions(-) diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index 52f57cd2cd8b..1397b7266a18 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -21,7 +21,7 @@ use crate::{ }; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, token, Error}; +use syn::{punctuated::Punctuated, spanned::Spanned, token, Error}; impl Pallet { pub fn try_from( @@ -78,7 +78,18 @@ impl Pallet { }) .collect(); - let cfg_pattern = vec![]; + let cfg_pattern = item + .attrs + .iter() + .filter(|attr| attr.path().segments.first().map_or(false, |s| s.ident == "cfg")) + .map(|attr| { + attr.parse_args_with(|input: syn::parse::ParseStream| { + let input = input.parse::()?; + cfg_expr::Expression::parse(&input.to_string()) + .map_err(|e| syn::Error::new(attr.span(), e.to_string())) + }) + }) + .collect::>>()?; let docs = get_doc_literals(&item.attrs); diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index b0b83f772499..de7f7eb4bc97 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -799,20 +799,43 @@ where } } -frame_support::construct_runtime!( - pub struct Runtime { - // Exclude part `Storage` in order not to check its metadata in tests. - System: frame_system exclude_parts { Pallet, Storage }, - Example: pallet, - Example2: pallet2 exclude_parts { Call }, - #[cfg(feature = "frame-feature-testing")] - Example3: pallet3, - Example4: pallet4 use_parts { Call }, +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; - #[cfg(feature = "frame-feature-testing-2")] - Example5: pallet5, - } -); + #[runtime::pallet_index(0)] + pub type System = frame_system + Call + Event; + + #[runtime::pallet_index(1)] + pub type Example = pallet; + + #[runtime::pallet_index(2)] + #[runtime::disable_call] + pub type Example2 = pallet2; + + #[cfg(feature = "frame-feature-testing")] + #[runtime::pallet_index(3)] + pub type Example3 = pallet3; + + #[runtime::pallet_index(4)] + pub type Example4 = pallet4; + + #[cfg(feature = "frame-feature-testing-2")] + #[runtime::pallet_index(5)] + pub type Example5 = pallet5; +} // Test that the part `RuntimeCall` is excluded from Example2 and included in Example4. fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { @@ -1847,6 +1870,16 @@ fn metadata() { error: None, docs: vec![" Test that the supertrait check works when we pass some parameter to the `frame_system::Config`."], }, + PalletMetadata { + index: 4, + name: "Example4", + storage: None, + calls: Some(meta_type::>().into()), + event: None, + constants: vec![], + error: None, + docs: vec![], + }, #[cfg(feature = "frame-feature-testing-2")] PalletMetadata { index: 5, From 75e79fad682bd86451464bc95117f6e22b86dfd9 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:02:37 +0100 Subject: [PATCH 09/41] ci: fix node-bench-regression-guard for master (#6589) Closes https://github.com/paritytech/ci_cd/issues/1067 --- .github/workflows/tests-misc.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index cca32650b106..decd88f2e84c 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -165,12 +165,14 @@ jobs: - name: Download artifact (master run) uses: actions/download-artifact@v4.1.8 + continue-on-error: true with: name: cargo-check-benches-master-${{ github.sha }} path: ./artifacts/master - name: Download artifact (current run) uses: actions/download-artifact@v4.1.8 + continue-on-error: true with: name: cargo-check-benches-current-${{ github.sha }} path: ./artifacts/current @@ -183,6 +185,12 @@ jobs: exit 0 fi + # fail if no artifacts + if [ ! -d ./artifacts/master ] || [ ! -d ./artifacts/current ]; then + echo "No artifacts found" + exit 1 + fi + docker run --rm \ -v $PWD/artifacts/master:/artifacts/master \ -v $PWD/artifacts/current:/artifacts/current \ From e709c9f3db017309d88d240cb32d62c2df6f0a52 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 25 Nov 2024 11:59:48 +0100 Subject: [PATCH 10/41] Error logging for send xcm to pallet-xcm (#6579) --- polkadot/xcm/pallet-xcm/src/lib.rs | 16 +++++++++++++--- .../xcm/xcm-builder/src/process_xcm_message.rs | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5e0512c6a9fd..6360298b21c3 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -363,7 +363,10 @@ pub mod pallet { let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::send", ?error, ?dest, ?message, "XCM send failed with error"); + Error::::from(error) + })?; let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; Self::deposit_event(e); Ok(message_id) @@ -1800,7 +1803,10 @@ impl Pallet { if let Some(remote_xcm) = remote_xcm { let (ticket, price) = validate_send::(dest.clone(), remote_xcm.clone()) - .map_err(Error::::from)?; + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM validate_send failed with error"); + Error::::from(error) + })?; if origin != Here.into_location() { Self::charge_fees(origin.clone(), price.clone()).map_err(|error| { tracing::error!( @@ -1810,7 +1816,11 @@ impl Pallet { Error::::FeesNotMet })?; } - let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + let message_id = T::XcmRouter::deliver(ticket) + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM deliver failed with error"); + Error::::from(error) + })?; let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; Self::deposit_event(e); diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 8dafbf66adf0..67c05c116e9d 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -58,7 +58,7 @@ impl< let message = Xcm::::try_from(versioned_message).map_err(|_| { log::trace!( target: LOG_TARGET, - "Failed to convert `VersionedXcm` into `XcmV3`.", + "Failed to convert `VersionedXcm` into `xcm::prelude::Xcm`!", ); ProcessMessageError::Unsupported From c422d8bbae8ba327597582203f05d30c26ef1392 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 25 Nov 2024 13:12:22 +0100 Subject: [PATCH 11/41] ci: improve workflow-stopper ux (#6632) PR addresses https://github.com/paritytech/polkadot-sdk/pull/6265#issuecomment-2497506857 cc https://github.com/paritytech/ci_cd/issues/1084 --- .github/workflows/build-misc.yml | 4 ++-- .github/workflows/check-frame-omni-bencher.yml | 4 ++-- .github/workflows/checks-quick.yml | 2 +- .github/workflows/checks.yml | 6 +++--- .github/workflows/docs.yml | 4 ++-- .github/workflows/tests-linux-stable.yml | 8 ++++---- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml index a9b433a94b64..c4a7281b9ebc 100644 --- a/.github/workflows/build-misc.yml +++ b/.github/workflows/build-misc.yml @@ -44,7 +44,7 @@ jobs: forklift cargo check -p rococo-runtime forklift cargo check -p polkadot-test-runtime - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -73,7 +73,7 @@ jobs: cd ./substrate/bin/utils/subkey forklift cargo build --locked --release - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml index b47c9d49feaf..bc0ff82b6774 100644 --- a/.github/workflows/check-frame-omni-bencher.yml +++ b/.github/workflows/check-frame-omni-bencher.yml @@ -41,7 +41,7 @@ jobs: forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -99,7 +99,7 @@ jobs: echo "Running command: $cmd" eval "$cmd" - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 4fcaf80c83fc..c733a2517cb8 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -30,7 +30,7 @@ jobs: id: required run: cargo +nightly fmt --all -- --check - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index c240504fa1e7..02428711811f 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -36,7 +36,7 @@ jobs: cargo clippy --all-targets --locked --workspace --quiet cargo clippy --all-targets --all-features --locked --workspace --quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -62,7 +62,7 @@ jobs: # experimental code may rely on try-runtime and vice-versa forklift cargo check --locked --all --features try-runtime,experimental --quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -91,7 +91,7 @@ jobs: ./check-features-variants.sh cd - - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index cc84e7f9ad3b..b7c70c9e6d66 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -29,7 +29,7 @@ jobs: env: RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -69,7 +69,7 @@ jobs: retention-days: 1 if-no-files-found: error - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index b9d0605b2495..3f8dc4fe1240 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -37,7 +37,7 @@ jobs: id: required run: WASM_BUILD_NO_COLOR=1 forklift cargo test -p staging-node-cli --release --locked -- --ignored - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -63,7 +63,7 @@ jobs: id: required run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet --cargo-quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -113,7 +113,7 @@ jobs: if: ${{ matrix.partition == '1/3' }} run: forklift cargo nextest run -p sp-api-test --features enable-staging-api --cargo-quiet - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} @@ -155,7 +155,7 @@ jobs: --filter-expr " !test(/all_security_features_work/) - test(/nonexistent_cache_dir/)" \ --partition count:${{ matrix.partition }} \ - name: Stop all workflows if failed - if: ${{ failure() && steps.required.conclusion == 'failure' }} + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} uses: ./.github/actions/workflow-stopper with: app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} From 41b6915ecb4b5691cdeeb585e26d46c4897ae151 Mon Sep 17 00:00:00 2001 From: jpserrat <35823283+jpserrat@users.noreply.github.com> Date: Mon, 25 Nov 2024 12:54:04 -0300 Subject: [PATCH 12/41] remove ReportCollator message (#6628) Closes #6415 # Description Remove unused message `ReportCollator` and test related to this message on the collator protocol validator side. cc: @tdimitrov --------- Co-authored-by: Tsvetomir Dimitrov Co-authored-by: command-bot <> --- .../src/collator_side/mod.rs | 2 +- .../src/validator_side/mod.rs | 3 - .../src/validator_side/tests/mod.rs | 60 ------------------- polkadot/node/subsystem-types/src/messages.rs | 15 ++--- .../src/node/collators/collator-protocol.md | 6 -- .../src/node/subsystems-and-jobs.md | 7 +-- .../src/types/overseer-protocol.md | 3 - prdoc/pr_6628.prdoc | 12 ++++ 8 files changed, 20 insertions(+), 88 deletions(-) create mode 100644 prdoc/pr_6628.prdoc diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 504b0d716043..d77480272cb4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -899,7 +899,7 @@ async fn process_msg( ); } }, - msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => { + msg @ (Invalid(..) | Seconded(..)) => { gum::warn!( target: LOG_TARGET, "{:?} message is not expected on the collator side of the protocol", diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 86358f503d04..36ec959c3406 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -1462,9 +1462,6 @@ async fn process_msg( "DistributeCollation message is not expected on the validator side of the protocol", ); }, - ReportCollator(id) => { - report_collator(&mut state.reputation, ctx.sender(), &state.peer_data, id).await; - }, NetworkBridgeUpdate(event) => { if let Err(e) = handle_network_msg(ctx, state, keystore, event).await { gum::warn!( diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 7bc61dd4ebec..f2f23c188a66 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -638,66 +638,6 @@ fn act_on_advertisement_v2() { }); } -// Test that other subsystems may modify collators' reputations. -#[test] -fn collator_reporting_works() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - respond_to_runtime_api_queries(&mut virtual_overseer, &test_state, test_state.relay_parent) - .await; - - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - test_state.collators[0].clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - connect_and_declare_collator( - &mut virtual_overseer, - peer_c, - test_state.collators[1].clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::ReportCollator(test_state.collators[0].public()), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), - ) => { - assert_eq!(peer, peer_b); - assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); - } - ); - - virtual_overseer - }); -} - // Test that we verify the signatures on `Declare` and `AdvertiseCollation` messages. #[test] fn collator_authentication_verification_works() { diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 28a3a1ab82ab..b541f9519219 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -48,12 +48,12 @@ use polkadot_primitives::{ CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, }, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash, - CandidateIndex, CollatorId, CoreIndex, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, - SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + CandidateIndex, CoreIndex, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, SessionInfo, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -250,9 +250,6 @@ pub enum CollatorProtocolMessage { /// The core index where the candidate should be backed. core_index: CoreIndex, }, - /// Report a collator as having provided an invalid collation. This should lead to disconnect - /// and blacklist of the collator. - ReportCollator(CollatorId), /// Get a network bridge update. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), diff --git a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md index 432d9ab69bab..586a4169b5bc 100644 --- a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -151,12 +151,6 @@ time per relay parent. This reduces the bandwidth requirements and as we can sec the others are probably not required anyway. If the request times out, we need to note the collator as being unreliable and reduce its priority relative to other collators. -As a validator, once the collation has been fetched some other subsystem will inspect and do deeper validation of the -collation. The subsystem will report to this subsystem with a [`CollatorProtocolMessage`][CPM]`::ReportCollator`. In -that case, if we are connected directly to the collator, we apply a cost to the `PeerId` associated with the collator -and potentially disconnect or blacklist it. If the collation is seconded, we notify the collator and apply a benefit to -the `PeerId` associated with the collator. - ### Interaction with [Candidate Backing][CB] As collators advertise the availability, a validator will simply second the first valid parablock candidate per relay diff --git a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md index a3ca7347eb63..a96f3fa3d4a0 100644 --- a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md +++ b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md @@ -129,7 +129,6 @@ digraph { cand_sel -> coll_prot [arrowhead = "diamond", label = "FetchCollation"] cand_sel -> cand_back [arrowhead = "onormal", label = "Second"] - cand_sel -> coll_prot [arrowhead = "onormal", label = "ReportCollator"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::PersistedValidationData"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::ValidationCode"] @@ -231,7 +230,7 @@ sequenceDiagram VS ->> CandidateSelection: Collation - Note over CandidateSelection: Lots of other machinery in play here,
but there are only three outcomes from the
perspective of the `CollatorProtocol`: + Note over CandidateSelection: Lots of other machinery in play here,
but there are only two outcomes from the
perspective of the `CollatorProtocol`: alt happy path CandidateSelection -->> VS: FetchCollation @@ -242,10 +241,6 @@ sequenceDiagram NB ->> VS: Collation Deactivate VS - else collation invalid or unexpected - CandidateSelection ->> VS: ReportCollator - VS ->> NB: ReportPeer - else CandidateSelection already selected a different candidate Note over CandidateSelection: silently drop end diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index 85415e42a11c..cb862440727b 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -436,9 +436,6 @@ enum CollatorProtocolMessage { DistributeCollation(CandidateReceipt, PoV, Option>), /// Fetch a collation under the given relay-parent for the given ParaId. FetchCollation(Hash, ParaId, ResponseChannel<(CandidateReceipt, PoV)>), - /// Report a collator as having provided an invalid collation. This should lead to disconnect - /// and blacklist of the collator. - ReportCollator(CollatorId), /// Note a collator as having provided a good collation. NoteGoodCollation(CollatorId, SignedFullStatement), /// Notify a collator that its collation was seconded. diff --git a/prdoc/pr_6628.prdoc b/prdoc/pr_6628.prdoc new file mode 100644 index 000000000000..7ea0c4968385 --- /dev/null +++ b/prdoc/pr_6628.prdoc @@ -0,0 +1,12 @@ +title: "Remove ReportCollator message" + +doc: + - audience: Node Dev + description: | + Remove unused message ReportCollator and test related to this message on the collator protocol validator side. + +crates: + - name: polkadot-node-subsystem-types + bump: patch + - name: polkadot-collator-protocol + bump: major \ No newline at end of file From 6d5f8141ad42d7a528a09715c492974550709b92 Mon Sep 17 00:00:00 2001 From: Tarek Mohamed Abdalla Date: Mon, 25 Nov 2024 19:33:32 +0200 Subject: [PATCH 13/41] rpc server: fix subscription id_provider being reset to default one. (#6588) # Description The PR ensures that the id_provider variable is cloned instead of taken, which can help prevent issues related id provider being reset to the default. In [a test in moonbeam](https://github.com/moonbeam-foundation/moonbeam/blob/c6d07d703dfcdd94cc311fa83b553071b7d433ff/test/suites/dev/moonbase/test-subscription/test-subscription.ts#L20-L31) we found that the id_provider is being reset somehow and changed to the default one. Changing .take() to .clone() would fix the issue. # Checklist * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above. * [ ] My PR follows the [labeling requirements]( https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process ) of this project (at minimum one label for `T` required) * External contributors: ask maintainers to put the right label on your PR. * [ ] I have made corresponding changes to the documentation (if applicable) * [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) --------- Co-authored-by: Niklas Adolfsson --- prdoc/pr_6588.prdoc | 14 ++++++++++++++ substrate/client/rpc-servers/src/lib.rs | 7 +++---- 2 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6588.prdoc diff --git a/prdoc/pr_6588.prdoc b/prdoc/pr_6588.prdoc new file mode 100644 index 000000000000..bf44b2ed3784 --- /dev/null +++ b/prdoc/pr_6588.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "rpc server: fix subscription id_provider being reset to default one" + +doc: + - audience: Node Dev + description: | + The modification ensures that the id_provider variable is cloned instead of taken, which can help prevent issues related id provider being reset to the default. + + +crates: + - name: sc-rpc-server + bump: patch \ No newline at end of file diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index 31e4042d81f2..ff21e2da768e 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -144,7 +144,7 @@ where local_addrs.push(local_addr); let cfg = cfg.clone(); - let mut id_provider2 = id_provider.clone(); + let id_provider2 = id_provider.clone(); tokio_handle.spawn(async move { loop { @@ -197,10 +197,9 @@ where .set_http_middleware(http_middleware) .set_message_buffer_capacity(max_buffer_capacity_per_connection) .set_batch_request_config(batch_config) - .custom_tokio_runtime(cfg.tokio_handle.clone()) - .set_id_provider(RandomStringIdProvider::new(16)); + .custom_tokio_runtime(cfg.tokio_handle.clone()); - if let Some(provider) = id_provider2.take() { + if let Some(provider) = id_provider2.clone() { builder = builder.set_id_provider(provider); } else { builder = builder.set_id_provider(RandomStringIdProvider::new(16)); From 7f64e66b3b01ff5e391e42a8159f601cbbeb46d8 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:25:18 +0100 Subject: [PATCH 14/41] bump zombienet-sdk version `v0.2.16` (#6633) Fix #6575. cc: @iulianbarbu Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com> --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c79adee6f38e..338d5025b662 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31910,9 +31910,9 @@ dependencies = [ [[package]] name = "zombienet-configuration" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d7a8cc4f8e8bb3f40757b62d3b054da5c95f43321c775eb321edc89d431583e" +checksum = "8ad4fc5b0f1aa54de6bf2d6771c449b41cad47e1cf30559af0a71452686b47ab" dependencies = [ "anyhow", "lazy_static", @@ -31930,9 +31930,9 @@ dependencies = [ [[package]] name = "zombienet-orchestrator" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d32fa87851f41443a78971bd7110274f9a66d139ac834de159adc08f90cf8e3" +checksum = "e4a7dd25842ded75c7f4dc4f38f05fef567bd0b37fd3057c223d4ee34d8fa817" dependencies = [ "anyhow", "async-trait", @@ -31963,9 +31963,9 @@ dependencies = [ [[package]] name = "zombienet-prom-metrics-parser" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9acb9c94bc7c2c83f8eb8e26ed403f757af1632f22b89394d8876412ede990ca" +checksum = "a63e0c6024dd19b0f8b28afa94f78c211e5c163350ecda4a48084532d74d7cfe" dependencies = [ "pest", "pest_derive", @@ -31974,9 +31974,9 @@ dependencies = [ [[package]] name = "zombienet-provider" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc8f3f71d4d974fc4a2262fa9293c2eedc423540378bd7c1dc1b66cc95d1d1af" +checksum = "8d87c29390a342d0f4f62b6796861fb82e0e56c49929a272b689e8dbf24eaab9" dependencies = [ "anyhow", "async-trait", @@ -32005,9 +32005,9 @@ dependencies = [ [[package]] name = "zombienet-sdk" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dbfddce7a6100cdc930b93301f1b6381e6577ecc013d6802258ea6902a2bebd" +checksum = "829e5111182caf00ba57cd63656cf0bde6ce6add7f6a9747d15821c202a3f27e" dependencies = [ "async-trait", "futures", @@ -32022,9 +32022,9 @@ dependencies = [ [[package]] name = "zombienet-support" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20567c52b4fd46b600cda254dedb6a6dc30cabf512de91e4f6f78f0f7f4644b" +checksum = "99568384a1d9645458ab9de377b3517cb543a1ece5aba905aeb58d269139df4e" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 533ea4c9e878..57e049885da5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1387,7 +1387,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } zeroize = { version = "1.7.0", default-features = false } -zombienet-sdk = { version = "0.2.15" } +zombienet-sdk = { version = "0.2.16" } zstd = { version = "0.12.4", default-features = false } [profile.release] From 86a917f59d04ea3b399b34edcb8effe76b6415e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 08:49:46 +0000 Subject: [PATCH 15/41] Bump rustls from 0.23.14 to 0.23.18 (#6641) Bumps [rustls](https://github.com/rustls/rustls) from 0.23.14 to 0.23.18.
Commits
  • 33af2c3 Prepare 0.23.18
  • ffe646d Add reproducer for bug 2227
  • 69b6f74 Record and restore the processed cursor in first_handshake_message
  • 4ef3532 Upgrade to mio 1
  • 092a164 Manage dependencies via the workspace
  • a01bd6b rustls-bench: fix warnings with no features
  • 7d74de2 tests: linearize new test code helper
  • 499d797 fix: do not send session_ticket(35) extension for TLS 1.3
  • faca289 chore(deps): lock file maintenance
  • d12f423 fix(deps): update rust crate asn1 to 0.20
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=rustls&package-manager=cargo&previous-version=0.23.14&new-version=0.23.18)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 28 ++++++++++++++-------------- Cargo.toml | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 338d5025b662..c2d2eb3e9644 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8530,7 +8530,7 @@ dependencies = [ "hyper 1.3.1", "hyper-util", "log", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -9145,7 +9145,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -9198,7 +9198,7 @@ dependencies = [ "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-platform-verifier", "serde", "serde_json", @@ -20609,7 +20609,7 @@ dependencies = [ "quinn-proto 0.11.8", "quinn-udp 0.5.4", "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustls 0.23.18", "socket2 0.5.7", "thiserror", "tokio", @@ -20643,7 +20643,7 @@ dependencies = [ "rand", "ring 0.17.7", "rustc-hash 2.0.0", - "rustls 0.23.14", + "rustls 0.23.18", "slab", "thiserror", "tinyvec", @@ -21130,7 +21130,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn 0.11.5", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pemfile 2.0.0", "rustls-pki-types", "serde", @@ -21734,9 +21734,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", @@ -21806,9 +21806,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-platform-verifier" @@ -21821,7 +21821,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -23128,7 +23128,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "rand", - "rustls 0.23.14", + "rustls 0.23.18", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -29529,7 +29529,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pki-types", "tokio", ] @@ -30240,7 +30240,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.14", + "rustls 0.23.18", "rustls-pki-types", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 57e049885da5..53f95406e79a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1136,7 +1136,7 @@ rstest = { version = "0.18.2" } rustc-hash = { version = "1.1.0" } rustc-hex = { version = "2.1.0", default-features = false } rustix = { version = "0.36.7", default-features = false } -rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } +rustls = { version = "0.23.18", default-features = false, features = ["logging", "ring", "std", "tls12"] } rustversion = { version = "1.0.17" } rusty-fork = { version = "0.3.0", default-features = false } safe-mix = { version = "1.0", default-features = false } From 8216235b480630c5a69636b172f5146711c88c9a Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 26 Nov 2024 10:58:48 +0200 Subject: [PATCH 16/41] ci/check-semver: Fix semver failed step (#6535) The semver-check is failing with the following [error](https://github.com/paritytech/polkadot-sdk/actions/runs/11908981132/job/33185572284): ```bash error[E0658]: use of unstable library feature 'error_in_core' --> /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/frame-decode-0.5.0/src/decoding/extrinsic_decoder.rs:56:6 | 56 | impl core::error::Error for ExtrinsicDecodeError {} | ^^^^^^^^^^^^^^^^^^ | = note: see issue #103765 for more information = help: add `#![feature(error_in_core)]` to the crate attributes to enable = note: this compiler was built on 2024-05-31; consider upgrading it if it is out of date ``` This is related to the toolchain nightly version 1.80. In rust, 1.81 the `core::error::Error` is stable. After updating the rust-toolchain, parity-publish crate must be updated as well. The `cargo-semver-checks` dependency of `parity-publish` crate is updated from 0.34 to 0.38. This update enables rustdoc v36 that fixes the following [issue](https://github.com/paritytech/polkadot-sdk/actions/runs/11912689841/job/33196936011): ```bash validating prdocs... checking file changes... checking semver changes... (1/18) building frame-support-HEAD... (2/18) building frame-support-28.0.0... Error: rustdoc format v36 for file /__w/polkadot-sdk/polkadot-sdk/target/doc/frame_support.new is not supported ``` This PR is pending on a release of parity-publish to version 0.9.0 (fixes already on origin/master) --------- Signed-off-by: Alexandru Vasile --- .github/workflows/check-semver.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 78602410cdf6..8d77b6a31b75 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true env: - TOOLCHAIN: nightly-2024-06-01 + TOOLCHAIN: nightly-2024-10-19 jobs: preflight: @@ -74,7 +74,7 @@ jobs: - name: install parity-publish # Set the target dir to cache the build. - run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.8.0 --locked -q + run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.1 --locked -q - name: check semver run: | From 3c003872178b6ce535e9f26ce52e324f36075ffd Mon Sep 17 00:00:00 2001 From: Egor_P Date: Tue, 26 Nov 2024 14:46:51 +0100 Subject: [PATCH 17/41] [Release|CI/CD] Github pipeline to publish polkadot deb package (#6640) This pipeline should replace a manual action done on the `cleamroom` server to publish the `polkadot` deb package to our apt repo with the pipeline triggered from the new paritytech-release org. Right now, this is done manually by running the [add-packages.sh](https://github.com/paritytech/cleanroom/blob/master/ansible/roles/parity-repos/files/add-packages.sh) script on the `cleanroom` machine. What is done under the hood: - Pipeline downloads `polakdot` deb package from S3, that was prebuilt in the [Build release rc pipeline](https://github.com/paritytech/polkadot-sdk/blob/master/.github/workflows/release-build-rc.yml) - Prepares and syncs local apt repository - Adds and signs deb package to it using `reprepro` - Uploads new deb package to the distributed repo Closes: https://github.com/paritytech/release-engineering/issues/239 --- .github/scripts/common/lib.sh | 33 +++- .github/scripts/release/distributions | 39 +++++ .../release-40_publish-deb-package.yml | 152 ++++++++++++++++++ 3 files changed, 222 insertions(+), 2 deletions(-) create mode 100644 .github/scripts/release/distributions create mode 100644 .github/workflows/release-40_publish-deb-package.yml diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index e3dd6224f29b..6b8f70a26d7e 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,15 +237,44 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: +# Fetch deb package from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot -fetch_release_artifacts_from_s3() { +fetch_debian_package_from_s3() { BINARY=$1 echo "Version : $VERSION" echo "Repo : $REPO" echo "Binary : $BINARY" + echo "Tag : $RELEASE_TAG" + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + echo "OUTPUT_DIR : $OUTPUT_DIR" + + URL_BASE=$(get_s3_url_base $BINARY) + echo "URL_BASE=$URL_BASE" + + URL=$URL_BASE/$RELEASE_TAG/x86_64-unknown-linux-gnu/${BINARY}_${VERSION}_amd64.deb + + mkdir -p "$OUTPUT_DIR" + pushd "$OUTPUT_DIR" > /dev/null + + echo "Fetching deb package..." + + echo "Fetching %s" "$URL" + curl --progress-bar -LO "$URL" || echo "Missing $URL" + + pwd + ls -al --color + popd > /dev/null + +} + +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: +# - RELEASE_ID +# - GITHUB_TOKEN +# - REPO in the form paritytech/polkadot +fetch_release_artifacts_from_s3() { + BINARY=$1 OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" diff --git a/.github/scripts/release/distributions b/.github/scripts/release/distributions new file mode 100644 index 000000000000..a430ec76c6ba --- /dev/null +++ b/.github/scripts/release/distributions @@ -0,0 +1,39 @@ +Origin: Parity +Label: Parity +Codename: release +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity Staging +Codename: staging +Architectures: amd64 +Components: main +Description: Staging distribution for Parity Technologies Ltd. packages +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2407 +Codename: stable2407 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2409 +Codename: stable2409 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2412 +Codename: stable2412 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE diff --git a/.github/workflows/release-40_publish-deb-package.yml b/.github/workflows/release-40_publish-deb-package.yml new file mode 100644 index 000000000000..3c5411ab16f0 --- /dev/null +++ b/.github/workflows/release-40_publish-deb-package.yml @@ -0,0 +1,152 @@ +name: Release - Publish polakdot deb package + +on: + workflow_dispatch: + inputs: + tag: + description: Current final release tag in the format polakdot-stableYYMM or polkadot-stable-YYMM-X + default: polkadot-stable2412 + required: true + type: string + + distribution: + description: Distribution where to publish deb package (release, staging, stable2407, etc) + default: staging + required: true + type: string + +jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + + fetch-artifacts-from-s3: + runs-on: ubuntu-latest + needs: [validate-inputs] + env: + REPO: ${{ github.repository }} + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} + outputs: + VERSION: ${{ steps.fetch_artifacts_from_s3.outputs.VERSION }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Fetch rc artifacts or release artifacts from s3 based on version + id: fetch_artifacts_from_s3 + run: | + . ./.github/scripts/common/lib.sh + + VERSION="$(get_polkadot_node_version_from_code)" + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + + fetch_debian_package_from_s3 polkadot + + - name: Upload artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: release-artifacts + path: release-artifacts/polkadot/*.deb + + publish-deb-package: + runs-on: ubuntu-latest + needs: [fetch-artifacts-from-s3] + environment: release + env: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_DEB_PATH: "s3://releases-package-repos/deb" + LOCAL_DEB_REPO_PATH: ${{ github.workspace }}/deb + VERSION: ${{ needs.fetch-artifacts-from-s3.outputs.VERSION }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@1f8555426662ac93a3849480a35449f683b1c89f" + echo "PGPKMS_REPREPRO_PATH=$(which pgpkms-reprepro)" >> $GITHUB_ENV + + - name: Install awscli + run: | + python3 -m pip install awscli + which aws + + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: release-artifacts + path: release-artifacts + + - name: Setup local deb repo + run: | + sudo apt-get install -y reprepro + which reprepro + + sed -i "s|^SignWith:.*|SignWith: ! ${PGPKMS_REPREPRO_PATH}|" ${{ github.workspace }}/.github/scripts/release/distributions + + mkdir -p ${{ github.workspace }}/deb/conf + cp ${{ github.workspace }}/.github/scripts/release/distributions ${{ github.workspace }}/deb/conf/distributions + cat ${{ github.workspace }}/deb/conf/distributions + + - name: Sync local deb repo + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + run: | + # Download the current state of the deb repo + aws s3 sync "$AWS_DEB_PATH/db" "$LOCAL_DEB_REPO_PATH/db" + aws s3 sync "$AWS_DEB_PATH/pool" "$LOCAL_DEB_REPO_PATH/pool" + aws s3 sync "$AWS_DEB_PATH/dists" "$LOCAL_DEB_REPO_PATH/dists" + + - name: Add deb package to local repo + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + # Add the new deb to the repo + reprepro -b "$LOCAL_DEB_REPO_PATH" includedeb "${{ inputs.distribution }}" "release-artifacts/polkadot_${VERSION}_amd64.deb" + + - name: Upload updated deb repo + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + run: | + # Upload the updated repo - dists and pool should be publicly readable + aws s3 sync "$LOCAL_DEB_REPO_PATH/pool" "$AWS_DEB_PATH/pool" --acl public-read + aws s3 sync "$LOCAL_DEB_REPO_PATH/dists" "$AWS_DEB_PATH/dists" --acl public-read + aws s3 sync "$LOCAL_DEB_REPO_PATH/db" "$AWS_DEB_PATH/db" + aws s3 sync "$LOCAL_DEB_REPO_PATH/conf" "$AWS_DEB_PATH/conf" + + # Invalidate caches to make sure latest files are served + aws cloudfront create-invalidation --distribution-id E36FKEYWDXAZYJ --paths '/deb/*' From 1c0b610078611dfebc082be52e77109ea4517e48 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 26 Nov 2024 15:52:23 +0100 Subject: [PATCH 18/41] xcm: fix local/remote exports when inner routers return `NotApplicable` (#6645) This PR addresses two small fixes: 1. Fixed a typo ("as as") found on the way. 2. Resolved a bug in the `local/remote exporters` used for bridging. Previously, they consumed `dest` and `msg` without returning them when inner routers/exporters failed with `NotApplicable`. This PR ensures compliance with the [`SendXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v5/traits.rs#L449-L450) and [`ExportXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-executor/src/traits/export.rs#L44-L45) traits. --------- Co-authored-by: GitHub Action --- polkadot/grafana/README.md | 2 +- polkadot/grafana/parachains/status.json | 2 +- polkadot/xcm/src/v3/traits.rs | 4 +- polkadot/xcm/src/v4/traits.rs | 4 +- polkadot/xcm/src/v5/traits.rs | 4 +- .../xcm/xcm-builder/src/universal_exports.rs | 265 +++++++++++++++--- .../xcm/xcm-executor/src/traits/export.rs | 4 +- prdoc/pr_6645.prdoc | 14 + .../frame/examples/default-config/src/lib.rs | 4 +- 9 files changed, 248 insertions(+), 55 deletions(-) create mode 100644 prdoc/pr_6645.prdoc diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index e909fdd29a75..0ecb0b70515b 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -90,4 +90,4 @@ and issue statement or initiate dispute. - **Assignment delay tranches**. Approval voting is designed such that validators assigned to check a specific candidate are split up into equal delay tranches (0.5 seconds each). All validators checks are ordered by the delay tranche index. Early tranches of validators have the opportunity to check the candidate first before later tranches -that act as as backups in case of no shows. +that act as backups in case of no shows. diff --git a/polkadot/grafana/parachains/status.json b/polkadot/grafana/parachains/status.json index 5942cbdf4479..22250967848d 100644 --- a/polkadot/grafana/parachains/status.json +++ b/polkadot/grafana/parachains/status.json @@ -1405,7 +1405,7 @@ "type": "prometheus", "uid": "$data_source" }, - "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as as backups in case of no shows.", + "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as backups in case of no shows.", "gridPos": { "h": 9, "w": 18, diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 1c8620708922..cbf85b454cc6 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -547,13 +547,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: MultiLocation, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v4/traits.rs b/polkadot/xcm/src/v4/traits.rs index f32b26fb163d..178093d27177 100644 --- a/polkadot/xcm/src/v4/traits.rs +++ b/polkadot/xcm/src/v4/traits.rs @@ -289,13 +289,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs index 1f5041ca8d84..dd067b774fcd 100644 --- a/polkadot/xcm/src/v5/traits.rs +++ b/polkadot/xcm/src/v5/traits.rs @@ -502,13 +502,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 5c754f01ec0a..aae8438c78d2 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -68,20 +68,28 @@ impl> SendXcm fn validate( dest: &mut Option, - xcm: &mut Option>, + msg: &mut Option>, ) -> SendResult { - let d = dest.take().ok_or(MissingArgument)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; let universal_source = UniversalLocation::get(); - let devolved = match ensure_is_remote(universal_source.clone(), d) { - Ok(x) => x, - Err(d) => { - *dest = Some(d); - return Err(NotApplicable) - }, - }; - let (network, destination) = devolved; - let xcm = xcm.take().ok_or(SendError::MissingArgument)?; - validate_export::(network, 0, universal_source, destination, xcm) + let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?; + let (remote_network, remote_location) = devolved; + let xcm = msg.take().ok_or(MissingArgument)?; + + validate_export::( + remote_network, + 0, + universal_source, + remote_location, + xcm.clone(), + ) + .inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) } fn deliver(ticket: Exporter::Ticket) -> Result { @@ -95,7 +103,7 @@ pub trait ExporterFor { /// /// The payment is specified from the local context, not the bridge chain. This is the /// total amount to withdraw in to Holding and should cover both payment for the execution on - /// the bridge chain as well as payment for the use of the `ExportMessage` instruction. + /// the bridge chain and payment for the use of the `ExportMessage` instruction. fn exporter_for( network: &NetworkId, remote_location: &InteriorLocation, @@ -205,7 +213,8 @@ impl, msg: &mut Option>, ) -> SendResult { - let d = dest.clone().ok_or(MissingArgument)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -216,7 +225,7 @@ impl(bridge, message) + validate_send::(bridge, message).inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) } fn deliver(validation: Self::Ticket) -> Result { @@ -272,9 +290,9 @@ impl, msg: &mut Option>, ) -> SendResult { - let d = dest.as_ref().ok_or(MissingArgument)?; - let devolved = - ensure_is_remote(UniversalLocation::get(), d.clone()).map_err(|_| NotApplicable)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; + let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -284,7 +302,7 @@ impl(bridge, message)?; + let (v, mut cost) = validate_send::(bridge, message).inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + })?; if let Some(bridge_payment) = maybe_payment { cost.push(bridge_payment); } @@ -476,10 +502,10 @@ impl< let Location { parents, interior: mut junctions } = BridgedNetwork::get(); match junctions.take_first() { Some(GlobalConsensus(network)) => (network, parents), - _ => return Err(SendError::NotApplicable), + _ => return Err(NotApplicable), } }; - ensure!(&network == &bridged_network, SendError::NotApplicable); + ensure!(&network == &bridged_network, NotApplicable); // We don't/can't use the `channel` for this adapter. let dest = destination.take().ok_or(SendError::MissingArgument)?; @@ -496,7 +522,7 @@ impl< }, Err((dest, _)) => { *destination = Some(dest); - return Err(SendError::NotApplicable) + return Err(NotApplicable) }, }; @@ -540,6 +566,10 @@ impl< #[cfg(test)] mod tests { use super::*; + use frame_support::{ + assert_err, assert_ok, + traits::{Contains, Equals}, + }; #[test] fn ensure_is_remote_works() { @@ -564,21 +594,48 @@ mod tests { assert_eq!(x, Err((Parent, Polkadot, Parachain(1000)).into())); } - pub struct OkSender; - impl SendXcm for OkSender { + pub struct OkFor(PhantomData); + impl> SendXcm for OkFor { type Ticket = (); fn validate( - _destination: &mut Option, + destination: &mut Option, _message: &mut Option>, ) -> SendResult { - Ok(((), Assets::new())) + if let Some(d) = destination.as_ref() { + if Filter::contains(&d) { + return Ok(((), Assets::new())) + } + } + Err(NotApplicable) } fn deliver(_ticket: Self::Ticket) -> Result { Ok([0; 32]) } } + impl> ExportXcm for OkFor { + type Ticket = (); + + fn validate( + network: NetworkId, + _: u32, + _: &mut Option, + destination: &mut Option, + _: &mut Option>, + ) -> SendResult { + if let Some(d) = destination.as_ref() { + if Filter::contains(&(network, d.clone())) { + return Ok(((), Assets::new())) + } + } + Err(NotApplicable) + } + + fn deliver(_ticket: Self::Ticket) -> Result { + Ok([1; 32]) + } + } /// Generic test case asserting that dest and msg is not consumed by `validate` implementation /// of `SendXcm` in case of expected result. @@ -598,46 +655,168 @@ mod tests { } #[test] - fn remote_exporters_does_not_consume_dest_or_msg_on_not_applicable() { + fn local_exporters_works() { frame_support::parameter_types! { pub Local: NetworkId = ByGenesis([0; 32]); pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); pub DifferentRemote: NetworkId = ByGenesis([22; 32]); - // no routers - pub BridgeTable: Vec = vec![]; + pub RemoteDestination: Junction = Parachain(9657); + pub RoutableBridgeFilter: (NetworkId, InteriorLocation) = (DifferentRemote::get(), RemoteDestination::get().into()); } + type RoutableBridgeExporter = OkFor>; + type NotApplicableBridgeExporter = OkFor<()>; + assert_ok!(validate_export::( + DifferentRemote::get(), + 0, + UniversalLocation::get(), + RemoteDestination::get().into(), + Xcm::default() + )); + assert_err!( + validate_export::( + DifferentRemote::get(), + 0, + UniversalLocation::get(), + RemoteDestination::get().into(), + Xcm::default() + ), + NotApplicable + ); - // check with local destination (should be remote) + // 1. check with local destination (should be remote) let local_dest: Location = (Parent, Parachain(5678)).into(); assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); + // UnpaidLocalExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter, OkSender, UniversalLocation>, + UnpaidLocalExporter, >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`) + let remote_dest: Location = + (Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into(); + assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + + // UnpaidLocalExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidLocalExporter, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + + // 3. Ok - deliver + // UnpaidRemoteExporter + assert_ok!(send_xcm::>( + remote_dest, + Xcm::default() + )); + } + + #[test] + fn remote_exporters_works() { + frame_support::parameter_types! { + pub Local: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); + pub DifferentRemote: NetworkId = ByGenesis([22; 32]); + pub RoutableBridge: Location = Location::new(1, Parachain(9657)); + // not routable + pub NotApplicableBridgeTable: Vec = vec![]; + // routable + pub RoutableBridgeTable: Vec = vec![ + NetworkExportTableItem::new( + DifferentRemote::get(), + None, + RoutableBridge::get(), + None + ) + ]; + } + type RoutableBridgeSender = OkFor>; + type NotApplicableBridgeSender = OkFor<()>; + assert_ok!(validate_send::(RoutableBridge::get(), Xcm::default())); + assert_err!( + validate_send::(RoutableBridge::get(), Xcm::default()), + NotApplicable + ); + + // 1. check with local destination (should be remote) + let local_dest: Location = (Parent, Parachain(5678)).into(); + assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); + + // UnpaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - OkSender, + NetworkExportTable, + RoutableBridgeSender, UniversalLocation, >, >(local_dest, |result| assert_eq!(Err(NotApplicable), result)); - // check with not applicable destination + // 2. check with not applicable destination (`NotApplicableBridgeTable`) let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + // UnpaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter, OkSender, UniversalLocation>, + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - + // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - OkSender, + NetworkExportTable, + RoutableBridgeSender, UniversalLocation, >, >(remote_dest, |result| assert_eq!(Err(NotApplicable), result)); + + // 3. check with not applicable from the inner router (using `NotApplicableBridgeSender`) + let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); + assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + + // UnpaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidRemoteExporter< + NetworkExportTable, + NotApplicableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // SovereignPaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + SovereignPaidRemoteExporter< + NetworkExportTable, + NotApplicableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + + // 4. Ok - deliver + // UnpaidRemoteExporter + assert_ok!(send_xcm::< + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), Xcm::default())); + // SovereignPaidRemoteExporter + assert_ok!(send_xcm::< + SovereignPaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(remote_dest, Xcm::default())); } #[test] diff --git a/polkadot/xcm/xcm-executor/src/traits/export.rs b/polkadot/xcm/xcm-executor/src/traits/export.rs index b356e0da7df7..3e9275edab37 100644 --- a/polkadot/xcm/xcm-executor/src/traits/export.rs +++ b/polkadot/xcm/xcm-executor/src/traits/export.rs @@ -108,7 +108,7 @@ impl ExportXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_export( network: NetworkId, channel: u32, @@ -120,7 +120,7 @@ pub fn validate_export( } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/prdoc/pr_6645.prdoc b/prdoc/pr_6645.prdoc new file mode 100644 index 000000000000..f033cadc0b6e --- /dev/null +++ b/prdoc/pr_6645.prdoc @@ -0,0 +1,14 @@ +title: 'xcm: fix local/remote exports when inner routers return `NotApplicable`' +doc: +- audience: Runtime Dev + description: |- + Resolved a bug in the `local/remote exporters` used for bridging. Previously, they consumed `dest` and `msg` without returning them when inner routers/exporters failed with `NotApplicable`. This PR ensures compliance with the [`SendXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v5/traits.rs#L449-L450) and [`ExportXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-executor/src/traits/export.rs#L44-L45) traits. +crates: +- name: staging-xcm-builder + bump: patch +- name: polkadot + bump: none +- name: staging-xcm + bump: none +- name: staging-xcm-executor + bump: none diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index ccdcd4968598..f690bffe0998 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -62,10 +62,10 @@ pub mod pallet { type OverwrittenDefaultValue: Get; /// An input parameter that relies on `::AccountId`. This can - /// too have a default, as long as as it is present in `frame_system::DefaultConfig`. + /// too have a default, as long as it is present in `frame_system::DefaultConfig`. type CanDeriveDefaultFromSystem: Get; - /// We might chose to declare as one that doesn't have a default, for whatever semantical + /// We might choose to declare as one that doesn't have a default, for whatever semantical /// reason. #[pallet::no_default] type HasNoDefault: Get; From f520adb0aacd51247ed548c7db9bef874c2cca9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dino=20Pa=C4=8Dandi?= <3002868+Dinonard@users.noreply.github.com> Date: Tue, 26 Nov 2024 15:56:02 +0100 Subject: [PATCH 19/41] Zero refund check for FungibleAdapter (#6506) `FungibleAdapter` will now check if the _refund amount_ is zero before calling deposit & emitting an event. Fixes https://github.com/paritytech/polkadot-sdk/issues/6469. --------- Co-authored-by: GitHub Action --- prdoc/pr_6506.prdoc | 10 +++++ .../frame/transaction-payment/src/payment.rs | 17 +++++---- .../frame/transaction-payment/src/tests.rs | 37 +++++++++++++++++++ 3 files changed, 56 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_6506.prdoc diff --git a/prdoc/pr_6506.prdoc b/prdoc/pr_6506.prdoc new file mode 100644 index 000000000000..7c6164a9959a --- /dev/null +++ b/prdoc/pr_6506.prdoc @@ -0,0 +1,10 @@ +title: Zero refund check for FungibleAdapter +doc: +- audience: Runtime User + description: |- + `FungibleAdapter` will now check if the _refund amount_ is zero before calling deposit & emitting an event. + + Fixes https://github.com/paritytech/polkadot-sdk/issues/6469. +crates: +- name: pallet-transaction-payment + bump: patch diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 4b39cd3fe53b..b8a047fee3e6 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -155,14 +155,15 @@ where if let Some(paid) = already_withdrawn { // Calculate how much refund we should return let refund_amount = paid.peek().saturating_sub(corrected_fee); - // refund to the the account that paid the fees if it exists. otherwise, don't refind - // anything. - let refund_imbalance = if F::total_balance(who) > F::Balance::zero() { - F::deposit(who, refund_amount, Precision::BestEffort) - .unwrap_or_else(|_| Debt::::zero()) - } else { - Debt::::zero() - }; + // Refund to the the account that paid the fees if it exists & refund is non-zero. + // Otherwise, don't refund anything. + let refund_imbalance = + if refund_amount > Zero::zero() && F::total_balance(who) > F::Balance::zero() { + F::deposit(who, refund_amount, Precision::BestEffort) + .unwrap_or_else(|_| Debt::::zero()) + } else { + Debt::::zero() + }; // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid: Credit = paid .offset(refund_imbalance) diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index 572c1d4961dd..bde1bf64728e 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -877,3 +877,40 @@ fn no_fee_and_no_weight_for_other_origins() { assert_eq!(post_info.actual_weight, Some(info.call_weight)); }) } + +#[test] +fn fungible_adapter_no_zero_refund_action() { + type FungibleAdapterT = payment::FungibleAdapter; + + ExtBuilder::default().balance_factor(10).build().execute_with(|| { + System::set_block_number(10); + + let dummy_acc = 1; + let (actual_fee, no_tip) = (10, 0); + let already_paid = >::withdraw_fee( + &dummy_acc, + CALL, + &CALL.get_dispatch_info(), + actual_fee, + no_tip, + ).expect("Account must have enough funds."); + + // Correction action with no expected side effect. + assert!(>::correct_and_deposit_fee( + &dummy_acc, + &CALL.get_dispatch_info(), + &default_post_info(), + actual_fee, + no_tip, + already_paid, + ).is_ok()); + + // Ensure no zero amount deposit event is emitted. + let events = System::events(); + assert!(!events + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Balances(pallet_balances::Event::Deposit { amount, .. }) if amount.is_zero())), + "No zero amount deposit amount event should be emitted.", + ); + }); +} From fc315ac5979e9bf1cc56b58ab6f364b6b2689635 Mon Sep 17 00:00:00 2001 From: Giuseppe Re Date: Tue, 26 Nov 2024 17:26:19 +0100 Subject: [PATCH 20/41] Hide nonce implementation details in metadata (#6562) See https://github.com/polkadot-fellows/runtimes/issues/248 : using `TypeWithDefault` having derived `TypeInfo` for `Nonce` causes a breaking change in metadata for nonce type because it's no longer `u64`. Adding a default implementation of `TypeInfo` for `TypeWithDefault` to restore the original type info in metadata. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- prdoc/pr_6562.prdoc | 14 +++++++++++ .../runtime/src/type_with_default.rs | 23 +++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_6562.prdoc diff --git a/prdoc/pr_6562.prdoc b/prdoc/pr_6562.prdoc new file mode 100644 index 000000000000..250b656aefb5 --- /dev/null +++ b/prdoc/pr_6562.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Hide nonce implementation details in metadata + +doc: + - audience: Runtime Dev + description: | + Use custom implementation of TypeInfo for TypeWithDefault to show inner value's type info. + This should bring back nonce to u64 in metadata. + +crates: +- name: sp-runtime + bump: minor \ No newline at end of file diff --git a/substrate/primitives/runtime/src/type_with_default.rs b/substrate/primitives/runtime/src/type_with_default.rs index 5790e3ab6bf6..b0eca22e5c1a 100644 --- a/substrate/primitives/runtime/src/type_with_default.rs +++ b/substrate/primitives/runtime/src/type_with_default.rs @@ -31,7 +31,7 @@ use num_traits::{ CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, CheckedShr, CheckedSub, Num, NumCast, PrimInt, Saturating, ToPrimitive, }; -use scale_info::TypeInfo; +use scale_info::{StaticTypeInfo, TypeInfo}; use sp_core::Get; #[cfg(feature = "serde")] @@ -40,7 +40,8 @@ use serde::{Deserialize, Serialize}; /// A type that wraps another type and provides a default value. /// /// Passes through arithmetical and many other operations to the inner value. -#[derive(Encode, Decode, TypeInfo, Debug, MaxEncodedLen)] +/// Type information for metadata is the same as the inner value's type. +#[derive(Encode, Decode, Debug, MaxEncodedLen)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TypeWithDefault>(T, PhantomData); @@ -50,6 +51,17 @@ impl> TypeWithDefault { } } +// Hides implementation details from the outside (for metadata type information). +// +// The type info showed in metadata is the one of the inner value's type. +impl + 'static> TypeInfo for TypeWithDefault { + type Identity = Self; + + fn type_info() -> scale_info::Type { + T::type_info() + } +} + impl> Clone for TypeWithDefault { fn clone(&self) -> Self { Self(self.0.clone(), PhantomData) @@ -511,6 +523,7 @@ impl> CompactAs for TypeWithDefault { #[cfg(test)] mod tests { use super::TypeWithDefault; + use scale_info::TypeInfo; use sp_arithmetic::traits::{AtLeast16Bit, AtLeast32Bit, AtLeast8Bit}; use sp_core::Get; @@ -565,5 +578,11 @@ mod tests { } type U128WithDefault = TypeWithDefault; impl WrapAtLeast32Bit for U128WithDefault {} + + assert_eq!(U8WithDefault::type_info(), ::type_info()); + assert_eq!(U16WithDefault::type_info(), ::type_info()); + assert_eq!(U32WithDefault::type_info(), ::type_info()); + assert_eq!(U64WithDefault::type_info(), ::type_info()); + assert_eq!(U128WithDefault::type_info(), ::type_info()); } } From 139691b17c66aa074d2b4ae935158d4296068f72 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Tue, 26 Nov 2024 16:24:43 -0300 Subject: [PATCH 21/41] Fix `XcmPaymentApi::query_weight_to_asset_fee` version conversion (#6459) The `query_weight_to_asset_fee` function was trying to convert versions by using `try_as`, this function [doesn't convert from a versioned to a concrete type](https://github.com/paritytech/polkadot-sdk/blob/0156ca8f959d5cf3787c18113ce48acaaf1a8345/polkadot/xcm/src/lib.rs#L131). This would cause all calls with a lower version to fail. The correct function to use is the good old [try_into](https://github.com/paritytech/polkadot-sdk/blob/0156ca8f959d5cf3787c18113ce48acaaf1a8345/polkadot/xcm/src/lib.rs#L184). Now those calls work :) --------- Co-authored-by: command-bot <> Co-authored-by: Branislav Kontur Co-authored-by: GitHub Action --- Cargo.lock | 13 +++ .../assets/asset-hub-rococo/Cargo.toml | 1 + .../assets/asset-hub-rococo/src/lib.rs | 28 ++--- .../assets/asset-hub-rococo/tests/tests.rs | 24 +++- .../assets/asset-hub-westend/Cargo.toml | 1 + .../assets/asset-hub-westend/src/lib.rs | 29 ++--- .../assets/asset-hub-westend/tests/tests.rs | 18 ++- .../runtimes/assets/common/src/lib.rs | 79 +++++++++---- .../runtimes/assets/test-utils/Cargo.toml | 4 + .../assets/test-utils/src/test_cases.rs | 110 +++++++++++++++++- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 1 + .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 3 +- .../bridge-hub-rococo/tests/tests.rs | 16 ++- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 1 + .../bridge-hubs/bridge-hub-westend/src/lib.rs | 3 +- .../bridge-hub-westend/tests/tests.rs | 16 ++- .../collectives-westend/Cargo.toml | 1 + .../collectives-westend/src/lib.rs | 3 +- .../collectives-westend/tests/tests.rs | 14 ++- .../coretime/coretime-rococo/Cargo.toml | 4 + .../coretime/coretime-rococo/src/lib.rs | 3 +- .../coretime/coretime-rococo/tests/tests.rs | 14 ++- .../coretime/coretime-westend/Cargo.toml | 3 + .../coretime/coretime-westend/src/lib.rs | 3 +- .../coretime/coretime-westend/tests/tests.rs | 14 ++- .../runtimes/people/people-rococo/Cargo.toml | 3 + .../runtimes/people/people-rococo/src/lib.rs | 3 +- .../people/people-rococo/tests/tests.rs | 14 ++- .../runtimes/people/people-westend/Cargo.toml | 3 + .../runtimes/people/people-westend/src/lib.rs | 3 +- .../people/people-westend/tests/tests.rs | 14 ++- .../parachains/runtimes/test-utils/Cargo.toml | 4 + .../runtimes/test-utils/src/test_cases.rs | 67 ++++++++++- .../xcm-runtime-apis/tests/fee_estimation.rs | 23 ++++ polkadot/xcm/xcm-runtime-apis/tests/mock.rs | 3 +- prdoc/pr_6459.prdoc | 22 ++++ 36 files changed, 483 insertions(+), 82 deletions(-) create mode 100644 prdoc/pr_6459.prdoc diff --git a/Cargo.lock b/Cargo.lock index c2d2eb3e9644..58b8b222cce4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -899,6 +899,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub-router 0.5.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -1036,6 +1037,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub-router 0.5.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -1077,6 +1079,7 @@ dependencies = [ "frame-support 28.0.0", "frame-system 28.0.0", "hex-literal", + "pallet-asset-conversion 10.0.0", "pallet-assets 29.1.0", "pallet-balances 28.0.0", "pallet-collator-selection 9.0.0", @@ -1094,6 +1097,7 @@ dependencies = [ "staging-xcm-builder 7.0.0", "staging-xcm-executor 7.0.0", "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -2578,6 +2582,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -2815,6 +2820,7 @@ dependencies = [ "pallet-xcm-benchmarks 7.0.0", "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -3550,6 +3556,7 @@ dependencies = [ "pallet-utility 28.0.0", "pallet-xcm 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -3991,6 +3998,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -4090,6 +4098,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -16165,6 +16174,7 @@ dependencies = [ "pallet-session 28.0.0", "pallet-timestamp 27.0.0", "pallet-xcm 7.0.0", + "parachains-common 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "sp-consensus-aura 0.32.0", @@ -16176,6 +16186,7 @@ dependencies = [ "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -16574,6 +16585,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", @@ -16675,6 +16687,7 @@ dependencies = [ "pallet-xcm 7.0.0", "pallet-xcm-benchmarks 7.0.0", "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", "polkadot-runtime-common 7.0.0", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 42adaba7a27c..bfe8ed869758 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -99,6 +99,7 @@ snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index bc48c2d805fd..b6f3ccd3901b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1415,37 +1415,31 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); - acceptable_assets.extend(assets_in_pool_with_native); + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::TokenLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == native_asset => { // for native token Ok(fee_in_native) }, Ok(asset_id) => { - let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; - if assets_in_pool_with_this_asset - .into_iter() - .map(|asset_id| asset_id.0) - .any(|location| location == native_asset) { - pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_id.clone().0, + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), native_asset, fee_in_native, true, // We include the fee. - ).ok_or(XcmPaymentApiError::AssetNotFound) + ) { + Ok(swapped_in_native) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 5da8b45417a3..d056405adff8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -24,10 +24,10 @@ use asset_hub_rococo_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, - AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, - ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - SessionKeys, TrustBackedAssetsInstance, XcmpQueue, + AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, Block, + CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, + MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, + RuntimeEvent, RuntimeOrigin, SessionKeys, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -1471,3 +1471,19 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index d5eaa43ab834..a3eaebb59153 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -101,6 +101,7 @@ snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index cafea3b6ff8b..f20b6b1fece0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1528,38 +1528,31 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); - acceptable_assets.extend(assets_in_pool_with_native); + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { let native_asset = xcm_config::WestendLocation::get(); let fee_in_native = WeightToFee::weight_to_fee(&weight); - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == native_asset => { // for native asset Ok(fee_in_native) }, Ok(asset_id) => { - // We recognize assets in a pool with the native one. - let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< - Runtime, - xcm::v5::Location - >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; - if assets_in_pool_with_this_asset - .into_iter() - .map(|asset_id| asset_id.0) - .any(|location| location == native_asset) { - pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( - asset_id.clone().0, + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), native_asset, fee_in_native, true, // We include the fee. - ).ok_or(XcmPaymentApiError::AssetNotFound) + ) { + Ok(swapped_in_native) } else { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); Err(XcmPaymentApiError::AssetNotFound) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 5d0f843554a1..109a5dd2c029 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -24,7 +24,7 @@ use asset_hub_westend_runtime::{ ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, XcmConfig, }, - AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, + AllPalletsWithoutSystem, Assets, Balances, Block, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, TrustBackedAssetsInstance, XcmpQueue, @@ -1446,3 +1446,19 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 1d2d45b42c5d..25c2df6b68d1 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -28,7 +28,7 @@ extern crate alloc; use crate::matching::{LocalLocationPattern, ParentLocation}; use alloc::vec::Vec; use codec::{Decode, EncodeLike}; -use core::cmp::PartialEq; +use core::{cmp::PartialEq, marker::PhantomData}; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; use sp_runtime::traits::TryConvertInto; @@ -137,24 +137,62 @@ pub type PoolAssetsConvertedConcreteId = TryConvertInto, >; -/// Returns an iterator of all assets in a pool with `asset`. -/// -/// Should only be used in runtime APIs since it iterates over the whole -/// `pallet_asset_conversion::Pools` map. -/// -/// It takes in any version of an XCM Location but always returns the latest one. -/// This is to allow some margin of migrating the pools when updating the XCM version. -/// -/// An error of type `()` is returned if the version conversion fails for XCM locations. -/// This error should be mapped by the caller to a more descriptive one. -pub fn get_assets_in_pool_with< - Runtime: pallet_asset_conversion::Config, - L: TryInto + Clone + Decode + EncodeLike + PartialEq, ->( - asset: &L, -) -> Result, ()> { - pallet_asset_conversion::Pools::::iter_keys() - .filter_map(|(asset_1, asset_2)| { +/// Adapter implementation for accessing pools (`pallet_asset_conversion`) that uses `AssetKind` as +/// a `xcm::v*` which could be different from the `xcm::latest`. +pub struct PoolAdapter(PhantomData); +impl< + Runtime: pallet_asset_conversion::Config, + L: TryFrom + TryInto + Clone + Decode + EncodeLike + PartialEq, + > PoolAdapter +{ + /// Returns a vector of all assets in a pool with `asset`. + /// + /// Should only be used in runtime APIs since it iterates over the whole + /// `pallet_asset_conversion::Pools` map. + /// + /// It takes in any version of an XCM Location but always returns the latest one. + /// This is to allow some margin of migrating the pools when updating the XCM version. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn get_assets_in_pool_with(asset: Location) -> Result, ()> { + // convert latest to the `L` version. + let asset: L = asset.try_into().map_err(|_| ())?; + Self::iter_assets_in_pool_with(&asset) + .map(|location| { + // convert `L` to the latest `AssetId` + location.try_into().map_err(|_| ()).map(AssetId) + }) + .collect::, _>>() + } + + /// Provides a current prices. Wrapper over + /// `pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens`. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn quote_price_tokens_for_exact_tokens( + asset_1: Location, + asset_2: Location, + amount: Runtime::Balance, + include_fees: bool, + ) -> Result, ()> { + // Convert latest to the `L` version. + let asset_1: L = asset_1.try_into().map_err(|_| ())?; + let asset_2: L = asset_2.try_into().map_err(|_| ())?; + + // Quote swap price. + Ok(pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_1, + asset_2, + amount, + include_fees, + )) + } + + /// Helper function for filtering pool. + pub fn iter_assets_in_pool_with(asset: &L) -> impl Iterator + '_ { + pallet_asset_conversion::Pools::::iter_keys().filter_map(|(asset_1, asset_2)| { if asset_1 == *asset { Some(asset_2) } else if asset_2 == *asset { @@ -163,8 +201,7 @@ pub fn get_assets_in_pool_with< None } }) - .map(|location| location.try_into().map_err(|_| ()).map(AssetId)) - .collect::, _>>() + } } #[cfg(test)] diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 529d6460fc4e..f6b3c13e8102 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -16,6 +16,7 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-assets = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-balances = { workspace = true } pallet-timestamp = { workspace = true } pallet-session = { workspace = true } @@ -36,6 +37,7 @@ xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Bridges pallet-xcm-bridge-hub-router = { workspace = true } @@ -55,6 +57,7 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", + "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", @@ -69,5 +72,6 @@ std = [ "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 8dc720e27753..aeacc1a5471e 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -34,11 +34,14 @@ use parachains_runtimes_test_utils::{ CollatorSessionKeys, ExtBuilder, SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ - traits::{MaybeEquivalence, StaticLookup, Zero}, + traits::{Block as BlockT, MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; @@ -1584,3 +1587,108 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< ); }) } + +pub fn xcm_payment_api_with_pools_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config + + pallet_assets::Config< + pallet_assets::Instance1, + AssetId = u32, + Balance = ::Balance, + > + pallet_asset_conversion::Config< + AssetKind = xcm::v5::Location, + Balance = ::Balance, + >, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + + ExtBuilder::::default().build().execute_with(|| { + let test_account = AccountId::from([0u8; 32]); + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token.clone())); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // We need some balance to create an asset. + assert_ok!( + pallet_balances::Pallet::::mint_into(&test_account, 3_000_000_000_000,) + ); + + // Now we try to use an asset that's not in a pool. + let asset_id = 1984u32; // USDT. + let asset_not_in_pool: Location = + (PalletInstance(50), GeneralIndex(asset_id.into())).into(); + assert_ok!(pallet_assets::Pallet::::create( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 1000 + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We add it to a pool with native. + assert_ok!(pallet_asset_conversion::Pallet::::create_pool( + RuntimeOrigin::signed(test_account.clone()), + native_token.clone().try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap() + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + // Still not enough because it doesn't have any liquidity. + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We mint some of the asset... + assert_ok!(pallet_assets::Pallet::::mint( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 3_000_000_000_000, + )); + // ...so we can add liquidity to the pool. + assert_ok!(pallet_asset_conversion::Pallet::::add_liquidity( + RuntimeOrigin::signed(test_account.clone()), + native_token.try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap(), + 1_000_000_000_000, + 2_000_000_000_000, + 0, + 0, + test_account + )); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), asset_not_in_pool.into()); + // Now it works! + assert_ok!(execution_fees); + }); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 4af8a9f43850..3eb06e3a18c1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -122,6 +122,7 @@ bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 3f3316d0be49..598afeddb984 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -847,7 +847,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 6ca858e961d3..29f9615bff6a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -20,9 +20,9 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -838,3 +838,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 637e7c710640..871bf44ec5b2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -121,6 +121,7 @@ bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 65e7d291dc37..ae3dbfa06cba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -779,7 +779,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 84025c4cefeb..d7e70ed769b1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -27,9 +27,9 @@ use bridge_hub_westend_runtime::{ bridge_common_config, bridge_to_rococo_config, bridge_to_rococo_config::RococoGlobalConsensusNetwork, xcm_config::{LocationToAccountId, RelayNetwork, WestendLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TransactionPayment, TxExtension, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoLocation, BridgeParachainRococoInstance, @@ -525,3 +525,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index e03fc934ceaf..810abcf572d4 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -94,6 +94,7 @@ substrate-wasm-builder = { optional = true, workspace = true, default-features = [dev-dependencies] sp-io = { features = ["std"], workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 0ee3a4068718..f4c62f212e8c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -963,7 +963,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs index 7add10559d84..c9191eba49f6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use collectives_westend_runtime::xcm_config::LocationToAccountId; +use collectives_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index a38b7400cfa3..02807827cf92 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -80,6 +80,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true } + [features] default = ["std"] std = [ @@ -120,6 +123,7 @@ std = [ "pallet-xcm/std", "parachain-info/std", "parachains-common/std", + "parachains-runtimes-test-utils/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "rococo-runtime-constants/std", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 3f3126b749d8..ae3ad93a9e85 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -835,7 +835,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs index 2cabce567b6e..89a593ab0f57 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use coretime_rococo_runtime::xcm_config::LocationToAccountId; +use coretime_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 149fa5d0b045..34353d312b1f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -80,6 +80,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 098a17cc9984..39ea39f25a8b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -827,7 +827,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs index e391d71a9ab7..976ce23d6e87 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs @@ -16,7 +16,9 @@ #![cfg(test)] -use coretime_westend_runtime::xcm_config::LocationToAccountId; +use coretime_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use parachains_common::AccountId; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 34458c2352fb..a55143b62071 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -77,6 +77,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 7921030f2bb8..dc5f2ac0997c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -783,7 +783,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs index 3627d9c40ec2..00fe7781822a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs @@ -17,7 +17,9 @@ #![cfg(test)] use parachains_common::AccountId; -use people_rococo_runtime::xcm_config::LocationToAccountId; +use people_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 6840b97d8c3f..4d66332e96dd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -77,6 +77,9 @@ parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 19a64ab8d6e8..1b9a3b60a2c4 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -781,7 +781,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) diff --git a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs index fa9331952b4b..5cefec44b1cd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs @@ -17,7 +17,9 @@ #![cfg(test)] use parachains_common::AccountId; -use people_westend_runtime::xcm_config::LocationToAccountId; +use people_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; use sp_core::crypto::Ss58Codec; use xcm::latest::prelude::*; use xcm_runtime_apis::conversions::LocationToAccountHelper; @@ -132,3 +134,13 @@ fn location_conversion_works() { assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 01d7fcc2b5c8..e9d666617ee2 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -29,6 +29,7 @@ cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } +parachains-common = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true } @@ -37,6 +38,7 @@ cumulus-test-relay-sproof-builder = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } polkadot-parachain-primitives = { workspace = true } [dev-dependencies] @@ -62,11 +64,13 @@ std = [ "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", + "parachains-common/std", "polkadot-parachain-primitives/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index a66163154cf6..6bdf3ef09d1b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -18,7 +18,15 @@ use crate::{AccountIdOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf}; use codec::Encode; -use frame_support::{assert_ok, traits::Get}; +use frame_support::{ + assert_ok, + traits::{Get, OriginTrait}, +}; +use parachains_common::AccountId; +use sp_runtime::traits::{Block as BlockT, StaticLookup}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = crate::RuntimeHelper; @@ -128,3 +136,60 @@ pub fn set_storage_keys_by_governance_works( assert_storage(); }); } + +pub fn xcm_payment_api_with_native_token_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + ExtBuilder::::default().build().execute_with(|| { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + // We first try calling it with a lower XCM version. + let lower_version_xcm_to_weigh = + versioned_xcm_to_weigh.clone().into_version(XCM_VERSION - 1).unwrap(); + let xcm_weight = Runtime::query_xcm_weight(lower_version_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token)); + let lower_version_native_token = + native_token_versioned.clone().into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), lower_version_native_token); + assert!(execution_fees.is_ok()); + + // Now we call it with the latest version. + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // If we call it with anything other than the native token it will error. + let non_existent_token: Location = Here.into(); + let non_existent_token_versioned = VersionedAssetId::from(AssetId(non_existent_token)); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), non_existent_token_versioned); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + }); +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 2d14b4e571c6..c3046b134d1f 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -353,3 +353,26 @@ fn dry_run_xcm() { ); }); } + +#[test] +fn calling_payment_api_with_a_lower_version_works() { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + let lower_version_xcm_to_weigh = versioned_xcm_to_weigh.into_version(XCM_VERSION - 1).unwrap(); + let client = TestClient; + let runtime_api = client.runtime_api(); + let xcm_weight = + runtime_api.query_xcm_weight(H256::zero(), lower_version_xcm_to_weigh).unwrap(); + assert!(xcm_weight.is_ok()); + let native_token = VersionedAssetId::from(AssetId(Here.into())); + let lower_version_native_token = native_token.into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = runtime_api + .query_weight_to_asset_fee(H256::zero(), xcm_weight.unwrap(), lower_version_native_token) + .unwrap(); + assert!(execution_fees.is_ok()); +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index f0a5be908f69..fb5d1ae7c0e5 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -453,7 +453,8 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == HereLocation::get() => { Ok(WeightToFee::weight_to_fee(&weight)) }, diff --git a/prdoc/pr_6459.prdoc b/prdoc/pr_6459.prdoc new file mode 100644 index 000000000000..592ba4c6b29d --- /dev/null +++ b/prdoc/pr_6459.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix version conversion in XcmPaymentApi::query_weight_to_asset_fee. + +doc: + - audience: Runtime Dev + description: | + The `query_weight_to_asset_fee` function of the `XcmPaymentApi` was trying + to convert versions in the wrong way. + This resulted in all calls made with lower versions failing. + The version conversion is now done correctly and these same calls will now succeed. + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: xcm-runtime-apis + bump: patch + - name: assets-common + bump: patch From 445c1c80d438d5b937c347b04e4daa66ad25d878 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 27 Nov 2024 10:28:26 +0100 Subject: [PATCH 22/41] Add stable2412 to target_branches for command-backport.yml (#6666) The backport bot opens PR for `A4-needs-backport` only for stable2407 stable2409, but we have already stable2412. The question is, when should we append a new `stable*` branch here? Should it be done when a new `stable*` branch is created? Can we automate this process somehow? --- .github/workflows/command-backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index 8f23bcd75f01..eecf0ac72d2c 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -40,7 +40,7 @@ jobs: uses: korthout/backport-action@v3 id: backport with: - target_branches: stable2407 stable2409 + target_branches: stable2407 stable2409 stable2412 merge_commits: skip github_token: ${{ steps.generate_token.outputs.token }} pull_description: | From 2a0b2680b8df9c18d2543f82629917f759827e1c Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 27 Nov 2024 13:11:01 +0200 Subject: [PATCH 23/41] litep2p/req-resp: Always provide main protocol name in responses (#6603) Request responses are initialized with a main protocol name, and optional protocol names as a fallback. Running litep2p in kusama as a validator has surfaced a `debug_asserts` coming from the sync component: https://github.com/paritytech/polkadot-sdk/blob/3906c578c96d97a8a099a4bdac4685acbe375a7c/substrate/client/network/sync/src/strategy/chain_sync.rs#L640-L646 The issue is that we initiate a request-response over the main protocol name `/genesis/sync/2` but receive a response over the legacy procotol `ksm/sync/2`. This behavior is correct because litep2p propagates to the higher levels the protocol that responded. In contrast, libp2p provides the main protocol name regardless of negotiating a legacy protocol. Because of this, higher level components assume that only the main protocol name will respond. To not break this assumption, this PR alings litep2p shim layer with the libp2p behavior. Closes: https://github.com/paritytech/polkadot-sdk/issues/6581 --------- Signed-off-by: Alexandru Vasile Co-authored-by: Dmitry Markin --- prdoc/pr_6603.prdoc | 16 ++++++++++++++++ .../src/litep2p/shim/request_response/mod.rs | 7 ++----- 2 files changed, 18 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_6603.prdoc diff --git a/prdoc/pr_6603.prdoc b/prdoc/pr_6603.prdoc new file mode 100644 index 000000000000..20c5e7294dfa --- /dev/null +++ b/prdoc/pr_6603.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Always provide main protocol name in litep2p responses + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR aligns litep2p behavior with libp2p. Previously, litep2p network backend + would provide the actual negotiated request-response protocol that produced a + response message. After this PR, only the main protocol name is reported to other + subsystems. + +crates: + - name: sc-network + bump: patch diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index bfd7a60ef9fe..146f2e4add97 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -320,7 +320,7 @@ impl RequestResponseProtocol { &mut self, peer: litep2p::PeerId, request_id: RequestId, - fallback: Option, + _fallback: Option, response: Vec, ) { match self.pending_inbound_responses.remove(&request_id) { @@ -337,10 +337,7 @@ impl RequestResponseProtocol { response.len(), ); - let _ = tx.send(Ok(( - response, - fallback.map_or_else(|| self.protocol.clone(), Into::into), - ))); + let _ = tx.send(Ok((response, self.protocol.clone()))); self.metrics.register_outbound_request_success(started.elapsed()); }, } From 5b1b34db0c03057c7962eb9682b4d581c324ae26 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:10:36 +0200 Subject: [PATCH 24/41] v16: Expose the unstable metadata v16 (#5732) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR exposes the *unstable* metadata V16. The metadata is exposed under the unstable u32::MAX number. Developers can start experimenting with the new features of the metadata v16. *Please note that this metadata is under development and expect breaking changes until stabilization.* The `ExtrinsicMetadata` trait receives a breaking change. Its associated type `VERSION` is rename to `VERSIONS` and now supports a constant static list of metadata versions. The versions implemented for `UncheckedExtrinsic` are v4 (legacy version) and v5 (new version). For metadata collection, it is assumed that all `TransactionExtensions` are under version 0. Builds on top of: https://github.com/paritytech/polkadot-sdk/pull/5274 Closes: https://github.com/paritytech/polkadot-sdk/issues/5980 Closes: https://github.com/paritytech/polkadot-sdk/issues/5347 Closes: https://github.com/paritytech/polkadot-sdk/issues/5285 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: Niklas Adolfsson Co-authored-by: Bastian Köcher Co-authored-by: James Wilson Co-authored-by: GitHub Action --- Cargo.lock | 28 ++- Cargo.toml | 4 +- prdoc/pr_5732.prdoc | 29 +++ .../frame/metadata-hash-extension/Cargo.toml | 2 +- substrate/frame/revive/src/evm/runtime.rs | 8 +- substrate/frame/support/Cargo.toml | 1 + .../src/construct_runtime/expand/metadata.rs | 2 +- substrate/frame/support/test/Cargo.toml | 2 +- substrate/frame/support/test/tests/pallet.rs | 8 +- substrate/primitives/metadata-ir/Cargo.toml | 2 +- substrate/primitives/metadata-ir/src/lib.rs | 25 ++- substrate/primitives/metadata-ir/src/types.rs | 6 +- .../primitives/metadata-ir/src/unstable.rs | 211 ++++++++++++++++++ substrate/primitives/metadata-ir/src/v14.rs | 5 +- substrate/primitives/metadata-ir/src/v15.rs | 2 +- .../src/generic/unchecked_extrinsic.rs | 3 +- .../primitives/runtime/src/traits/mod.rs | 6 +- .../src/overhead/remark_builder.rs | 8 +- .../src/overhead/runtime_utilities.rs | 15 +- substrate/utils/wasm-builder/Cargo.toml | 2 +- 20 files changed, 329 insertions(+), 40 deletions(-) create mode 100644 prdoc/pr_5732.prdoc create mode 100644 substrate/primitives/metadata-ir/src/unstable.rs diff --git a/Cargo.lock b/Cargo.lock index 58b8b222cce4..2c938ec17bd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7224,6 +7224,18 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "frame-metadata-hash-extension" version = "0.1.0" @@ -7231,7 +7243,7 @@ dependencies = [ "array-bytes", "const-hex", "docify", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support 28.0.0", "frame-system 28.0.0", "log", @@ -7316,7 +7328,7 @@ dependencies = [ "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support-procedural 23.0.0", "frame-system 28.0.0", "impl-trait-for-tuples", @@ -7494,7 +7506,7 @@ version = "3.0.0" dependencies = [ "frame-benchmarking 28.0.0", "frame-executive 28.0.0", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "frame-support 28.0.0", "frame-support-test-pallet", "frame-system 28.0.0", @@ -10520,13 +10532,13 @@ dependencies = [ [[package]] name = "merkleized-metadata" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" +checksum = "943f6d92804ed0100803d51fa9b21fd9432b5d122ba4c713dc26fe6d2f619cf6" dependencies = [ "array-bytes", "blake3", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", "scale-decode 0.13.1", "scale-info", @@ -26664,7 +26676,7 @@ dependencies = [ name = "sp-metadata-ir" version = "0.6.0" dependencies = [ - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", "scale-info", ] @@ -28601,7 +28613,7 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "jobserver", "merkleized-metadata", "parity-scale-codec", diff --git a/Cargo.toml b/Cargo.toml index 53f95406e79a..b1a52712e736 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -779,7 +779,7 @@ frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/fr frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "substrate/frame/executive", default-features = false } -frame-metadata = { version = "16.0.0", default-features = false } +frame-metadata = { version = "18.0.0", default-features = false } frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "substrate/frame/support", default-features = false } frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } @@ -854,7 +854,7 @@ macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } memory-db = { version = "0.32.0", default-features = false } -merkleized-metadata = { version = "0.1.0" } +merkleized-metadata = { version = "0.1.2" } merlin = { version = "3.0", default-features = false } messages-relay = { path = "bridges/relays/messages" } metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } diff --git a/prdoc/pr_5732.prdoc b/prdoc/pr_5732.prdoc new file mode 100644 index 000000000000..6f3f9b8a1668 --- /dev/null +++ b/prdoc/pr_5732.prdoc @@ -0,0 +1,29 @@ +title: Expose the unstable metadata v16 +doc: +- audience: Node Dev + description: | + This PR exposes the *unstable* metadata V16. The metadata is exposed under the unstable u32::MAX number. + Developers can start experimenting with the new features of the metadata v16. *Please note that this metadata is under development and expect breaking changes until stabilization.* + The `ExtrinsicMetadata` trait receives a breaking change. Its associated type `VERSION` is rename to `VERSIONS` and now supports a constant static list of metadata versions. + The versions implemented for `UncheckedExtrinsic` are v4 (legacy version) and v5 (new version). + For metadata collection, it is assumed that all `TransactionExtensions` are under version 0. + +crates: + - name: sp-metadata-ir + bump: major + - name: frame-support-procedural + bump: patch + - name: frame-support + bump: minor + - name: frame-support-test + bump: major + - name: frame-metadata-hash-extension + bump: patch + - name: substrate-wasm-builder + bump: minor + - name: pallet-revive + bump: minor + - name: sp-runtime + bump: major + - name: frame-benchmarking-cli + bump: patch diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index bca2c3ffb198..8f4ba922984c 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -25,7 +25,7 @@ substrate-test-runtime-client = { workspace = true } sp-api = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } merkleized-metadata = { workspace = true } -frame-metadata = { features = ["current"], workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 40c210304ca2..b5dc9a36065b 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -92,8 +92,12 @@ impl ExtrinsicLike impl ExtrinsicMetadata for UncheckedExtrinsic { - const VERSION: u8 = - generic::UncheckedExtrinsic::, Signature, E::Extension>::VERSION; + const VERSIONS: &'static [u8] = generic::UncheckedExtrinsic::< + Address, + CallOf, + Signature, + E::Extension, + >::VERSIONS; type TransactionExtensions = E::Extension; } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index d7da034b3492..d48c80510581 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -28,6 +28,7 @@ scale-info = { features = [ ], workspace = true } frame-metadata = { features = [ "current", + "unstable", ], workspace = true } sp-api = { features = [ "frame-metadata", diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index c12fc20bc8b8..4590a3a7f490 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -117,7 +117,7 @@ pub fn expand_runtime_metadata( pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, - version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, + versions: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSIONS.into_iter().map(|ref_version| *ref_version).collect(), address_ty, call_ty, signature_ty, diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 2187ee22b395..17ee3130b741 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -19,7 +19,7 @@ static_assertions = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index de7f7eb4bc97..9df1f461bba2 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -53,6 +53,9 @@ parameter_types! { /// Latest stable metadata version used for testing. const LATEST_METADATA_VERSION: u32 = 15; +/// Unstable metadata version. +const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; + pub struct SomeType1; impl From for u64 { fn from(_t: SomeType1) -> Self { @@ -1977,7 +1980,10 @@ fn metadata_at_version() { #[test] fn metadata_versions() { - assert_eq!(vec![14, LATEST_METADATA_VERSION], Runtime::metadata_versions()); + assert_eq!( + vec![14, LATEST_METADATA_VERSION, UNSTABLE_METADATA_VERSION], + Runtime::metadata_versions() + ); } #[test] diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index d7786347dd02..046441104b88 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } scale-info = { features = ["derive"], workspace = true } [features] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index 4bd13b935afd..bf234432a1a6 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -30,6 +30,7 @@ mod types; use frame_metadata::RuntimeMetadataPrefixed; pub use types::*; +mod unstable; mod v14; mod v15; @@ -39,23 +40,33 @@ const V14: u32 = 14; /// Metadata V15. const V15: u32 = 15; +/// Unstable metadata V16. +const UNSTABLE_V16: u32 = u32::MAX; + /// Transform the IR to the specified version. /// /// Use [`supported_versions`] to find supported versions. pub fn into_version(metadata: MetadataIR, version: u32) -> Option { // Note: Unstable metadata version is `u32::MAX` until stabilized. match version { - // Latest stable version. + // Version V14. This needs to be around until the + // deprecation of the `Metadata_metadata` runtime call in favor of + // `Metadata_metadata_at_version. V14 => Some(into_v14(metadata)), - // Unstable metadata. + + // Version V15 - latest stable. V15 => Some(into_latest(metadata)), + + // Unstable metadata under `u32::MAX`. + UNSTABLE_V16 => Some(into_unstable(metadata)), + _ => None, } } /// Returns the supported metadata versions. pub fn supported_versions() -> alloc::vec::Vec { - alloc::vec![V14, V15] + alloc::vec![V14, V15, UNSTABLE_V16] } /// Transform the IR to the latest stable metadata version. @@ -70,6 +81,12 @@ pub fn into_v14(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// Transform the IR to unstable metadata version 16. +pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { + let latest: frame_metadata::v16::RuntimeMetadataV16 = metadata.into(); + latest.into() +} + #[cfg(test)] mod test { use super::*; @@ -81,7 +98,7 @@ mod test { pallets: vec![], extrinsic: ExtrinsicMetadataIR { ty: meta_type::<()>(), - version: 0, + versions: vec![0], address_ty: meta_type::<()>(), call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index 199b692fbd8c..af217ffe16ee 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -170,8 +170,8 @@ pub struct ExtrinsicMetadataIR { /// /// Note: Field used for metadata V14 only. pub ty: T::Type, - /// Extrinsic version. - pub version: u8, + /// Extrinsic versions. + pub versions: Vec, /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. @@ -191,7 +191,7 @@ impl IntoPortable for ExtrinsicMetadataIR { fn into_portable(self, registry: &mut Registry) -> Self::Output { ExtrinsicMetadataIR { ty: registry.register_type(&self.ty), - version: self.version, + versions: self.versions, address_ty: registry.register_type(&self.address_ty), call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), diff --git a/substrate/primitives/metadata-ir/src/unstable.rs b/substrate/primitives/metadata-ir/src/unstable.rs new file mode 100644 index 000000000000..d46ce3ec6a7d --- /dev/null +++ b/substrate/primitives/metadata-ir/src/unstable.rs @@ -0,0 +1,211 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convert the IR to V16 metadata. + +use crate::{ + DeprecationInfoIR, DeprecationStatusIR, OuterEnumsIR, PalletAssociatedTypeMetadataIR, + PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, + PalletStorageMetadataIR, StorageEntryMetadataIR, +}; + +use super::types::{ + ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, +}; + +use frame_metadata::v16::{ + CustomMetadata, DeprecationInfo, DeprecationStatus, ExtrinsicMetadata, OuterEnums, + PalletAssociatedTypeMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, + RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV16, + StorageEntryMetadata, TransactionExtensionMetadata, +}; + +impl From for RuntimeMetadataV16 { + fn from(ir: MetadataIR) -> Self { + RuntimeMetadataV16::new( + ir.pallets.into_iter().map(Into::into).collect(), + ir.extrinsic.into(), + ir.apis.into_iter().map(Into::into).collect(), + ir.outer_enums.into(), + // Substrate does not collect yet the custom metadata fields. + // This allows us to extend the V16 easily. + CustomMetadata { map: Default::default() }, + ) + } +} + +impl From for RuntimeApiMetadata { + fn from(ir: RuntimeApiMetadataIR) -> Self { + RuntimeApiMetadata { + name: ir.name, + methods: ir.methods.into_iter().map(Into::into).collect(), + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodMetadata { + fn from(ir: RuntimeApiMethodMetadataIR) -> Self { + RuntimeApiMethodMetadata { + name: ir.name, + inputs: ir.inputs.into_iter().map(Into::into).collect(), + output: ir.output, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodParamMetadata { + fn from(ir: RuntimeApiMethodParamMetadataIR) -> Self { + RuntimeApiMethodParamMetadata { name: ir.name, ty: ir.ty } + } +} + +impl From for PalletMetadata { + fn from(ir: PalletMetadataIR) -> Self { + PalletMetadata { + name: ir.name, + storage: ir.storage.map(Into::into), + calls: ir.calls.map(Into::into), + event: ir.event.map(Into::into), + constants: ir.constants.into_iter().map(Into::into).collect(), + error: ir.error.map(Into::into), + index: ir.index, + docs: ir.docs, + associated_types: ir.associated_types.into_iter().map(Into::into).collect(), + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletStorageMetadata { + fn from(ir: PalletStorageMetadataIR) -> Self { + PalletStorageMetadata { + prefix: ir.prefix, + entries: ir.entries.into_iter().map(Into::into).collect(), + } + } +} + +impl From for StorageEntryMetadata { + fn from(ir: StorageEntryMetadataIR) -> Self { + StorageEntryMetadata { + name: ir.name, + modifier: ir.modifier.into(), + ty: ir.ty.into(), + default: ir.default, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletAssociatedTypeMetadata { + fn from(ir: PalletAssociatedTypeMetadataIR) -> Self { + PalletAssociatedTypeMetadata { name: ir.name, ty: ir.ty, docs: ir.docs } + } +} + +impl From for PalletErrorMetadata { + fn from(ir: PalletErrorMetadataIR) -> Self { + PalletErrorMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletEventMetadata { + fn from(ir: PalletEventMetadataIR) -> Self { + PalletEventMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletCallMetadata { + fn from(ir: PalletCallMetadataIR) -> Self { + PalletCallMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletConstantMetadata { + fn from(ir: PalletConstantMetadataIR) -> Self { + PalletConstantMetadata { + name: ir.name, + ty: ir.ty, + value: ir.value, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for TransactionExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { + TransactionExtensionMetadata { identifier: ir.identifier, ty: ir.ty, implicit: ir.implicit } + } +} + +impl From for ExtrinsicMetadata { + fn from(ir: ExtrinsicMetadataIR) -> Self { + // Assume version 0 for all extensions. + let indexes = (0..ir.extensions.len()).map(|index| index as u32).collect(); + let transaction_extensions_by_version = [(0, indexes)].iter().cloned().collect(); + + ExtrinsicMetadata { + versions: ir.versions, + address_ty: ir.address_ty, + signature_ty: ir.signature_ty, + transaction_extensions_by_version, + transaction_extensions: ir.extensions.into_iter().map(Into::into).collect(), + } + } +} + +impl From for OuterEnums { + fn from(ir: OuterEnumsIR) -> Self { + OuterEnums { + call_enum_ty: ir.call_enum_ty, + event_enum_ty: ir.event_enum_ty, + error_enum_ty: ir.error_enum_ty, + } + } +} + +impl From for DeprecationStatus { + fn from(ir: DeprecationStatusIR) -> Self { + match ir { + DeprecationStatusIR::NotDeprecated => DeprecationStatus::NotDeprecated, + DeprecationStatusIR::DeprecatedWithoutNote => DeprecationStatus::DeprecatedWithoutNote, + DeprecationStatusIR::Deprecated { since, note } => + DeprecationStatus::Deprecated { since, note }, + } + } +} + +impl From for DeprecationInfo { + fn from(ir: DeprecationInfoIR) -> Self { + match ir { + DeprecationInfoIR::NotDeprecated => DeprecationInfo::NotDeprecated, + DeprecationInfoIR::ItemDeprecated(status) => + DeprecationInfo::ItemDeprecated(status.into()), + DeprecationInfoIR::VariantsDeprecated(btree) => DeprecationInfo::VariantsDeprecated( + btree.into_iter().map(|(key, value)| (key.0, value.into())).collect(), + ), + } + } +} diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index 70e84532add9..f3cb5973f5bd 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -149,9 +149,12 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { + let lowest_supported_version = + ir.versions.iter().min().expect("Metadata V14 supports one version; qed"); + ExtrinsicMetadata { ty: ir.ty, - version: ir.version, + version: *lowest_supported_version, signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index 4b3b6106d27f..ed315a31e6dc 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -100,7 +100,7 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { ExtrinsicMetadata { - version: ir.version, + version: *ir.versions.iter().min().expect("Metadata V15 supports only one version"), address_ty: ir.address_ty, call_ty: ir.call_ty, signature_ty: ir.signature_ty, diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 91ba37451909..d8510a60a789 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -389,8 +389,7 @@ where impl> ExtrinsicMetadata for UncheckedExtrinsic { - // TODO: Expose both version 4 and version 5 in metadata v16. - const VERSION: u8 = LEGACY_EXTRINSIC_FORMAT_VERSION; + const VERSIONS: &'static [u8] = &[LEGACY_EXTRINSIC_FORMAT_VERSION, EXTRINSIC_FORMAT_VERSION]; type TransactionExtensions = Extension; } diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 02bc7adc8ba5..cfcc3e5a354d 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1410,10 +1410,10 @@ impl SignaturePayload for () { /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { - /// The format version of the `Extrinsic`. + /// The format versions of the `Extrinsic`. /// - /// By format is meant the encoded representation of the `Extrinsic`. - const VERSION: u8; + /// By format we mean the encoded representation of the `Extrinsic`. + const VERSIONS: &'static [u8]; /// Transaction extensions attached to this `Extrinsic`. type TransactionExtensions; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs index a1d5f282d9f8..3a2d8776d1e1 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs @@ -54,13 +54,15 @@ impl> DynamicRemarkBuilder { log::debug!("Found metadata API version {}.", metadata_api_version); let opaque_metadata = if metadata_api_version > 1 { - let Ok(mut supported_metadata_versions) = api.metadata_versions(genesis) else { + let Ok(supported_metadata_versions) = api.metadata_versions(genesis) else { return Err("Unable to fetch metadata versions".to_string().into()); }; let latest = supported_metadata_versions - .pop() - .ok_or("No metadata version supported".to_string())?; + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or("No stable metadata versions supported".to_string())?; api.metadata_at_version(genesis, latest) .map_err(|e| format!("Unable to fetch metadata: {:?}", e))? diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs index c498da38afb0..3081197dc033 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/runtime_utilities.rs @@ -35,14 +35,19 @@ pub fn fetch_latest_metadata_from_code_blob( let opaque_metadata: OpaqueMetadata = match version_result { Ok(supported_versions) => { - let latest_version = Vec::::decode(&mut supported_versions.as_slice()) - .map_err(|e| format!("Unable to decode version list: {e}"))? - .pop() - .ok_or("No metadata versions supported".to_string())?; + let supported_versions = Vec::::decode(&mut supported_versions.as_slice()) + .map_err(|e| format!("Unable to decode version list: {e}"))?; + + let latest_stable = supported_versions + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or("No stable metadata versions supported".to_string())?; let encoded = runtime_caller - .call("Metadata_metadata_at_version", latest_version) + .call("Metadata_metadata_at_version", latest_stable) .map_err(|_| "Unable to fetch metadata from blob".to_string())?; + Option::::decode(&mut encoded.as_slice())? .ok_or_else(|| "Metadata not found".to_string())? }, diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 8f0e8a23e54a..fb15e8619a38 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -35,7 +35,7 @@ sc-executor = { optional = true, workspace = true, default-features = true } sp-core = { optional = true, workspace = true, default-features = true } sp-io = { optional = true, workspace = true, default-features = true } sp-version = { optional = true, workspace = true, default-features = true } -frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], optional = true, workspace = true, default-features = true } codec = { optional = true, workspace = true, default-features = true } array-bytes = { optional = true, workspace = true, default-features = true } sp-tracing = { optional = true, workspace = true, default-features = true } From afd065fa7267494246a9a8d767dfd030e2681fce Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 27 Nov 2024 20:12:39 +0200 Subject: [PATCH 25/41] rpc-v2: Implement `archive_unstable_storageDiff` (#5997) This PR implements the `archive_unstable_storageDiff`. The implementation follows the rpc-v2 spec from: - https://github.com/paritytech/json-rpc-interface-spec/pull/159. - builds on top of https://github.com/paritytech/json-rpc-interface-spec/pull/161 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- Cargo.lock | 7 +- prdoc/pr_5997.prdoc | 18 + substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../client/rpc-spec-v2/src/archive/api.rs | 22 +- .../client/rpc-spec-v2/src/archive/archive.rs | 89 +- .../src/archive/archive_storage.rs | 829 +++++++++++++++++- .../client/rpc-spec-v2/src/archive/tests.rs | 277 +++++- .../client/rpc-spec-v2/src/common/events.rs | 208 ++++- .../client/rpc-spec-v2/src/common/storage.rs | 15 + substrate/client/service/src/builder.rs | 1 + 10 files changed, 1449 insertions(+), 18 deletions(-) create mode 100644 prdoc/pr_5997.prdoc diff --git a/Cargo.lock b/Cargo.lock index 2c938ec17bd0..12e642bc9d06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20959,7 +20959,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", + "regex-automata 0.4.8", "regex-syntax 0.8.5", ] @@ -20980,9 +20980,9 @@ checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -23277,6 +23277,7 @@ dependencies = [ "futures", "futures-util", "hex", + "itertools 0.11.0", "jsonrpsee", "log", "parity-scale-codec", diff --git a/prdoc/pr_5997.prdoc b/prdoc/pr_5997.prdoc new file mode 100644 index 000000000000..6bac36a44586 --- /dev/null +++ b/prdoc/pr_5997.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement archive_unstable_storageDiff method + +doc: + - audience: Node Dev + description: | + This PR implements the `archive_unstable_storageDiff` rpc-v2 method. + Developers can use this method to fetch the storage differences + between two blocks. This is useful for oracles and archive nodes. + For more details see: https://github.com/paritytech/json-rpc-interface-spec/blob/main/src/api/archive_unstable_storageDiff.md. + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-service + bump: patch diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index daa805912fb9..b304bc905925 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -42,6 +42,7 @@ log = { workspace = true, default-features = true } futures-util = { workspace = true } rand = { workspace = true, default-features = true } schnellru = { workspace = true } +itertools = { workspace = true } [dev-dependencies] async-trait = { workspace = true } diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index b19738304000..dcfeaecb147b 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -19,7 +19,10 @@ //! API trait of the archive methods. use crate::{ - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, + common::events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, + PaginatedStorageQuery, + }, MethodResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -104,4 +107,21 @@ pub trait ArchiveApi { items: Vec>, child_trie: Option, ) -> RpcResult; + + /// Returns the storage difference between two blocks. + /// + /// # Unstable + /// + /// This method is unstable and can change in minor or patch releases. + #[subscription( + name = "archive_unstable_storageDiff" => "archive_unstable_storageDiffEvent", + unsubscribe = "archive_unstable_storageDiff_stopStorageDiff", + item = ArchiveStorageDiffEvent, + )] + fn archive_unstable_storage_diff( + &self, + hash: Hash, + items: Vec>, + previous_hash: Option, + ); } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index dd6c566a76ed..55054d91d85d 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -19,17 +19,29 @@ //! API implementation for `archive`. use crate::{ - archive::{error::Error as ArchiveError, ArchiveApiServer}, - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, - hex_string, MethodResult, + archive::{ + archive_storage::{ArchiveStorage, ArchiveStorageDiff}, + error::Error as ArchiveError, + ArchiveApiServer, + }, + common::events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, + PaginatedStorageQuery, + }, + hex_string, MethodResult, SubscriptionTaskExecutor, }; use codec::Encode; -use jsonrpsee::core::{async_trait, RpcResult}; +use futures::FutureExt; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + PendingSubscriptionSink, +}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, StorageProvider, }; +use sc_rpc::utils::Subscription; use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, @@ -41,7 +53,9 @@ use sp_runtime::{ }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; -use super::archive_storage::ArchiveStorage; +use tokio::sync::mpsc; + +pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; /// The configuration of [`Archive`]. pub struct ArchiveConfig { @@ -64,6 +78,12 @@ const MAX_DESCENDANT_RESPONSES: usize = 5; /// `MAX_DESCENDANT_RESPONSES`. const MAX_QUERIED_ITEMS: usize = 8; +/// The buffer capacity for each storage query. +/// +/// This is small because the underlying JSON-RPC server has +/// its down buffer capacity per connection as well. +const STORAGE_QUERY_BUF: usize = 16; + impl Default for ArchiveConfig { fn default() -> Self { Self { @@ -79,6 +99,8 @@ pub struct Archive, Block: BlockT, Client> { client: Arc, /// Backend of the chain. backend: Arc, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, /// The maximum number of items the `archive_storage` can return for a descendant query before @@ -96,12 +118,14 @@ impl, Block: BlockT, Client> Archive { client: Arc, backend: Arc, genesis_hash: GenesisHash, + executor: SubscriptionTaskExecutor, config: ArchiveConfig, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); Self { client, backend, + executor, genesis_hash, storage_max_descendant_responses: config.max_descendant_responses, storage_max_queried_items: config.max_queried_items, @@ -278,4 +302,59 @@ where Ok(storage_client.handle_query(hash, items, child_trie)) } + + fn archive_unstable_storage_diff( + &self, + pending: PendingSubscriptionSink, + hash: Block::Hash, + items: Vec>, + previous_hash: Option, + ) { + let storage_client = ArchiveStorageDiff::new(self.client.clone()); + let client = self.client.clone(); + + log::trace!(target: LOG_TARGET, "Storage diff subscription started"); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let previous_hash = if let Some(previous_hash) = previous_hash { + previous_hash + } else { + let Ok(Some(current_header)) = client.header(hash) else { + let message = format!("Block header is not present: {hash}"); + let _ = sink.send(&ArchiveStorageDiffEvent::err(message)).await; + return + }; + *current_header.parent_hash() + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = + storage_client.handle_trie_queries(hash, items, previous_hash, tx.clone()); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_events(&mut rx, &mut sink)).await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } +} + +/// Sends all the events to the sink. +async fn process_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + while let Some(event) = rx.recv().await { + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); + } + + if sink.send(&event).await.is_err() { + return + } + } } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 26e7c299de41..5a3920882f00 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -18,15 +18,28 @@ //! Implementation of the `archive_storage` method. -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; +use itertools::Itertools; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; -use crate::common::{ - events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType}, - storage::{IterQueryType, QueryIter, Storage}, +use super::error::Error as ArchiveError; +use crate::{ + archive::archive::LOG_TARGET, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageResult, + PaginatedStorageQuery, StorageQueryType, StorageResult, + }, + storage::{IterQueryType, QueryIter, Storage}, + }, }; +use tokio::sync::mpsc; /// Generates the events of the `archive_storage` method. pub struct ArchiveStorage { @@ -127,3 +140,811 @@ where ArchiveStorageResult::ok(storage_results, discarded_items) } } + +/// Parse hex-encoded string parameter as raw bytes. +/// +/// If the parsing fails, returns an error propagated to the RPC method. +pub fn parse_hex_param(param: String) -> Result, ArchiveError> { + // Methods can accept empty parameters. + if param.is_empty() { + return Ok(Default::default()) + } + + array_bytes::hex2bytes(¶m).map_err(|_| ArchiveError::InvalidParam(param)) +} + +#[derive(Debug, PartialEq, Clone)] +pub struct DiffDetails { + key: StorageKey, + return_type: ArchiveStorageDiffType, + child_trie_key: Option, + child_trie_key_string: Option, +} + +/// The type of storage query. +#[derive(Debug, PartialEq, Clone, Copy)] +enum FetchStorageType { + /// Only fetch the value. + Value, + /// Only fetch the hash. + Hash, + /// Fetch both the value and the hash. + Both, +} + +/// The return value of the `fetch_storage` method. +#[derive(Debug, PartialEq, Clone)] +enum FetchedStorage { + /// Storage value under a key. + Value(StorageResult), + /// Storage hash under a key. + Hash(StorageResult), + /// Both storage value and hash under a key. + Both { value: StorageResult, hash: StorageResult }, +} + +pub struct ArchiveStorageDiff { + client: Storage, +} + +impl ArchiveStorageDiff { + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client) } + } +} + +impl ArchiveStorageDiff +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Fetch the storage from the given key. + fn fetch_storage( + &self, + hash: Block::Hash, + key: StorageKey, + maybe_child_trie: Option, + ty: FetchStorageType, + ) -> Result, String> { + match ty { + FetchStorageType::Value => { + let result = self.client.query_value(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Value)) + }, + + FetchStorageType::Hash => { + let result = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Hash)) + }, + + FetchStorageType::Both => { + let Some(value) = self.client.query_value(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + let Some(hash) = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + Ok(Some(FetchedStorage::Both { value, hash })) + }, + } + } + + /// Check if the key belongs to the provided query items. + /// + /// A key belongs to the query items when: + /// - the provided key is a prefix of the key in the query items. + /// - the query items are empty. + /// + /// Returns an optional `FetchStorageType` based on the query items. + /// If the key does not belong to the query items, returns `None`. + fn belongs_to_query(key: &StorageKey, items: &[DiffDetails]) -> Option { + // User has requested all keys, by default this fallbacks to fetching the value. + if items.is_empty() { + return Some(FetchStorageType::Value) + } + + let mut value = false; + let mut hash = false; + + for item in items { + if key.as_ref().starts_with(&item.key.as_ref()) { + match item.return_type { + ArchiveStorageDiffType::Value => value = true, + ArchiveStorageDiffType::Hash => hash = true, + } + } + } + + match (value, hash) { + (true, true) => Some(FetchStorageType::Both), + (true, false) => Some(FetchStorageType::Value), + (false, true) => Some(FetchStorageType::Hash), + (false, false) => None, + } + } + + /// Send the provided result to the `tx` sender. + /// + /// Returns `false` if the sender has been closed. + fn send_result( + tx: &mpsc::Sender, + result: FetchedStorage, + operation_type: ArchiveStorageDiffOperationType, + child_trie_key: Option, + ) -> bool { + let items = match result { + FetchedStorage::Value(storage_result) | FetchedStorage::Hash(storage_result) => + vec![storage_result], + FetchedStorage::Both { value, hash } => vec![value, hash], + }; + + for item in items { + let res = ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: item.key, + result: item.result, + operation_type, + child_trie_key: child_trie_key.clone(), + }); + if tx.blocking_send(res).is_err() { + return false + } + } + + true + } + + fn handle_trie_queries_inner( + &self, + hash: Block::Hash, + previous_hash: Block::Hash, + items: Vec, + tx: &mpsc::Sender, + ) -> Result<(), String> { + // Parse the child trie key as `ChildInfo` and `String`. + let maybe_child_trie = items.first().and_then(|item| item.child_trie_key.clone()); + let maybe_child_trie_str = + items.first().and_then(|item| item.child_trie_key_string.clone()); + + // Iterator over the current block and previous block + // at the same time to compare the keys. This approach effectively + // leverages backpressure to avoid memory consumption. + let keys_iter = self.client.raw_keys_iter(hash, maybe_child_trie.clone())?; + let previous_keys_iter = + self.client.raw_keys_iter(previous_hash, maybe_child_trie.clone())?; + + let mut diff_iter = lexicographic_diff(keys_iter, previous_keys_iter); + + while let Some(item) = diff_iter.next() { + let (operation_type, key) = match item { + Diff::Added(key) => (ArchiveStorageDiffOperationType::Added, key), + Diff::Deleted(key) => (ArchiveStorageDiffOperationType::Deleted, key), + Diff::Equal(key) => (ArchiveStorageDiffOperationType::Modified, key), + }; + + let Some(fetch_type) = Self::belongs_to_query(&key, &items) else { + // The key does not belong the the query items. + continue; + }; + + let maybe_result = match operation_type { + ArchiveStorageDiffOperationType::Added => + self.fetch_storage(hash, key.clone(), maybe_child_trie.clone(), fetch_type)?, + ArchiveStorageDiffOperationType::Deleted => self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )?, + ArchiveStorageDiffOperationType::Modified => { + let Some(storage_result) = self.fetch_storage( + hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + let Some(previous_storage_result) = self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + // For modified records we need to check the actual storage values. + if storage_result == previous_storage_result { + continue + } + + Some(storage_result) + }, + }; + + if let Some(storage_result) = maybe_result { + if !Self::send_result( + &tx, + storage_result, + operation_type, + maybe_child_trie_str.clone(), + ) { + return Ok(()) + } + } + } + + Ok(()) + } + + /// This method will iterate over the keys of the main trie or a child trie and fetch the + /// given keys. The fetched keys will be sent to the provided `tx` sender to leverage + /// the backpressure mechanism. + pub async fn handle_trie_queries( + &self, + hash: Block::Hash, + items: Vec>, + previous_hash: Block::Hash, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = ArchiveStorageDiff { client: self.client.clone() }; + + tokio::task::spawn_blocking(move || { + // Deduplicate the items. + let mut trie_items = match deduplicate_storage_diff_items(items) { + Ok(items) => items, + Err(error) => { + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error.to_string())); + return + }, + }; + // Default to using the main storage trie if no items are provided. + if trie_items.is_empty() { + trie_items.push(Vec::new()); + } + log::trace!(target: LOG_TARGET, "Storage diff deduplicated items: {:?}", trie_items); + + for items in trie_items { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: hash={:?}, previous_hash={:?}, items={:?}", + hash, + previous_hash, + items + ); + + let result = this.handle_trie_queries_inner(hash, previous_hash, items, &tx); + + if let Err(error) = result { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending error={:?}", + error, + ); + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error)); + + return + } else { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending storage diff done", + ); + } + } + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::StorageDiffDone); + }) + .await?; + + Ok(()) + } +} + +/// The result of the `lexicographic_diff` method. +#[derive(Debug, PartialEq)] +enum Diff { + Added(T), + Deleted(T), + Equal(T), +} + +/// Compare two iterators lexicographically and return the differences. +fn lexicographic_diff( + mut left: LeftIter, + mut right: RightIter, +) -> impl Iterator> +where + T: Ord, + LeftIter: Iterator, + RightIter: Iterator, +{ + let mut a = left.next(); + let mut b = right.next(); + + core::iter::from_fn(move || match (a.take(), b.take()) { + (Some(a_value), Some(b_value)) => + if a_value < b_value { + b = Some(b_value); + a = left.next(); + + Some(Diff::Added(a_value)) + } else if a_value > b_value { + a = Some(a_value); + b = right.next(); + + Some(Diff::Deleted(b_value)) + } else { + a = left.next(); + b = right.next(); + + Some(Diff::Equal(a_value)) + }, + (Some(a_value), None) => { + a = left.next(); + Some(Diff::Added(a_value)) + }, + (None, Some(b_value)) => { + b = right.next(); + Some(Diff::Deleted(b_value)) + }, + (None, None) => None, + }) +} + +/// Deduplicate the provided items and return a list of `DiffDetails`. +/// +/// Each list corresponds to a single child trie or the main trie. +fn deduplicate_storage_diff_items( + items: Vec>, +) -> Result>, ArchiveError> { + let mut deduplicated: HashMap, Vec> = HashMap::new(); + + for diff_item in items { + // Ensure the provided hex keys are valid before deduplication. + let key = StorageKey(parse_hex_param(diff_item.key)?); + let child_trie_key_string = diff_item.child_trie_key.clone(); + let child_trie_key = diff_item + .child_trie_key + .map(|child_trie_key| parse_hex_param(child_trie_key)) + .transpose()? + .map(ChildInfo::new_default_from_vec); + + let diff_item = DiffDetails { + key, + return_type: diff_item.return_type, + child_trie_key: child_trie_key.clone(), + child_trie_key_string, + }; + + match deduplicated.entry(child_trie_key.clone()) { + Entry::Occupied(mut entry) => { + let mut should_insert = true; + + for existing in entry.get() { + // This points to a different return type. + if existing.return_type != diff_item.return_type { + continue + } + // Keys and return types are identical. + if existing.key == diff_item.key { + should_insert = false; + break + } + + // The following two conditions ensure that we keep the shortest key. + + // The current key is a longer prefix of the existing key. + if diff_item.key.as_ref().starts_with(&existing.key.as_ref()) { + should_insert = false; + break + } + + // The existing key is a longer prefix of the current key. + // We need to keep the current key and remove the existing one. + if existing.key.as_ref().starts_with(&diff_item.key.as_ref()) { + let to_remove = existing.clone(); + entry.get_mut().retain(|item| item != &to_remove); + break; + } + } + + if should_insert { + entry.get_mut().push(diff_item); + } + }, + Entry::Vacant(entry) => { + entry.insert(vec![diff_item]); + }, + } + } + + Ok(deduplicated + .into_iter() + .sorted_by_key(|(child_trie_key, _)| child_trie_key.clone()) + .map(|(_, values)| values) + .collect()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn dedup_empty() { + let items = vec![]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert!(result.is_empty()); + } + + #[test] + fn dedup_single() { + let items = vec![ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }; + assert_eq!(result[0][0], expected); + } + + #[test] + fn dedup_with_different_keys() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_keys() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_prefix() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_return_types() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result[0].len(), 1); + assert_eq!(result[1].len(), 1); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }], + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }], + ]; + assert_eq!(result, expected); + } + + #[test] + fn dedup_with_same_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_shorter_key_reverse_order() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_multiple_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + + let result = deduplicate_storage_diff_items(items).unwrap(); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + ], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + ], + ]; + + assert_eq!(result, expected); + } + + #[test] + fn test_lexicographic_diff() { + let left = vec![1, 2, 3, 4, 5]; + let right = vec![2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Equal(2), + Diff::Equal(3), + Diff::Equal(4), + Diff::Equal(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + } + + #[test] + fn test_lexicographic_diff_one_side_empty() { + let left = vec![]; + let right = vec![1, 2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Deleted(1), + Diff::Deleted(2), + Diff::Deleted(3), + Diff::Deleted(4), + Diff::Deleted(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + + let left = vec![1, 2, 3, 4, 5, 6]; + let right = vec![]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Added(2), + Diff::Added(3), + Diff::Added(4), + Diff::Added(5), + Diff::Added(6), + ]; + assert_eq!(diff, expected); + } +} diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 078016f5b3e2..994c5d28bd61 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -18,8 +18,9 @@ use crate::{ common::events::{ - ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, - StorageResultType, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageMethodOk, + ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, StorageResultType, }, hex_string, MethodResult, }; @@ -32,10 +33,13 @@ use super::{ use assert_matches::assert_matches; use codec::{Decode, Encode}; use jsonrpsee::{ - core::EmptyServerParams as EmptyParams, rpc_params, MethodsError as Error, RpcModule, + core::{server::Subscription as RpcSubscription, EmptyServerParams as EmptyParams}, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; +use sc_rpc::testing::TokioTestExecutor; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{Blake2Hasher, Hasher}; @@ -78,6 +82,7 @@ fn setup_api( client.clone(), backend, CHAIN_GENESIS, + Arc::new(TokioTestExecutor::default()), ArchiveConfig { max_descendant_responses, max_queried_items }, ) .into_rpc(); @@ -85,6 +90,15 @@ fn setup_api( (client, api) } +async fn get_next_event(sub: &mut RpcSubscription) -> T { + let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + event +} + #[tokio::test] async fn archive_genesis() { let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); @@ -838,3 +852,260 @@ async fn archive_storage_discarded_items() { _ => panic!("Unexpected result"), }; } + +#[tokio::test] +async fn archive_storage_diff_main_trie() { + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"11".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"22".to_vec())).unwrap(); + builder.push_storage_change(b":AAA".to_vec(), Some(b"222".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Search for items in the main trie: + // - values of keys under ":A" + // - hashes of keys under ":AA" + let items = vec![ + ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem:: { + key: hex_string(b":AA"), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":A"), + result: StorageResultType::Value(hex_string(b"11")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"22")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"22"))), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + // Added key. + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Value(hex_string(b"222")), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"222"))), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_no_changes() { + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + + // Build 2 identical blocks. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_deleted_changes() { + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + + // Blocks are imported as forks. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"BB")), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_invalid_params() { + let invalid_hash = hex_string(&INVALID_HASH); + let (_, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + + // Invalid shape for parameters. + let items: Vec> = Vec::new(); + let err = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params!["123", items.clone(), &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(ref err) if err.code() == crate::chain_head::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + // The shape is right, but the block hash is invalid. + let items: Vec> = Vec::new(); + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&invalid_hash, items.clone(), &invalid_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_matches!(event, + ArchiveStorageDiffEvent::StorageDiffError(ref err) if err.error.contains("Header was not found") + ); +} diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index b1627d74c844..198a60bf4cac 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -81,7 +81,7 @@ pub struct StorageResult { } /// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum StorageResultType { /// Fetch the value of the provided key. @@ -136,17 +136,221 @@ pub struct ArchiveStorageMethodOk { } /// The error of a storage call. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageMethodErr { /// Reported error. pub error: String, } +/// The type of the archive storage difference query. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffType { + /// The result is provided as value of the key. + Value, + /// The result the hash of the value of the key. + Hash, +} + +/// The storage item to query. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffItem { + /// The provided key. + pub key: Key, + /// The type of the storage query. + pub return_type: ArchiveStorageDiffType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The result of a storage difference call. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffMethodResult { + /// Reported results. + pub result: Vec, +} + +/// The result of a storage difference call operation type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffOperationType { + /// The key is added. + Added, + /// The key is modified. + Modified, + /// The key is removed. + Deleted, +} + +/// The result of an individual storage difference key. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffResult { + /// The hex-encoded key of the result. + pub key: String, + /// The result of the query. + #[serde(flatten)] + pub result: StorageResultType, + /// The operation type. + #[serde(rename = "type")] + pub operation_type: ArchiveStorageDiffOperationType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The event generated by the `archive_storageDiff` method. +/// +/// The `archive_storageDiff` can generate the following events: +/// - `storageDiff` event - generated when a `ArchiveStorageDiffResult` is produced. +/// - `storageDiffError` event - generated when an error is produced. +/// - `storageDiffDone` event - generated when the `archive_storageDiff` method completed. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageDiffEvent { + /// The `storageDiff` event. + StorageDiff(ArchiveStorageDiffResult), + /// The `storageDiffError` event. + StorageDiffError(ArchiveStorageMethodErr), + /// The `storageDiffDone` event. + StorageDiffDone, +} + +impl ArchiveStorageDiffEvent { + /// Create a new `ArchiveStorageDiffEvent::StorageDiffError` event. + pub fn err(error: String) -> Self { + Self::StorageDiffError(ArchiveStorageMethodErr { error }) + } + + /// Checks if the event is a `StorageDiffDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDiffDone) + } + + /// Checks if the event is a `StorageDiffError` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageDiffError(_)) + } +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn archive_diff_input() { + // Item with Value. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Value and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + + #[test] + fn archive_diff_output() { + // Item with Value. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","value":"res","type":"added"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"modified"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash, child trie key and removed. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: Some("0x2".into()), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"deleted","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + #[test] fn storage_result() { // Item with Value. diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 2e24a8da8ca8..673e20b2bc78 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -248,4 +248,19 @@ where }); Ok((ret, maybe_next_query)) } + + /// Raw iterator over the keys. + pub fn raw_keys_iter( + &self, + hash: Block::Hash, + child_key: Option, + ) -> Result, String> { + let keys_iter = if let Some(child_key) = child_key { + self.client.child_storage_keys(hash, child_key, None, None) + } else { + self.client.storage_keys(hash, None, None) + }; + + keys_iter.map_err(|err| err.to_string()) + } } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index ac9371a8941b..027a444012af 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -755,6 +755,7 @@ where client.clone(), backend.clone(), genesis_hash, + task_executor.clone(), // Defaults to sensible limits for the `Archive`. sc_rpc_spec_v2::archive::ArchiveConfig::default(), ) From 2ef2723126584dfcd6d2a9272282ee78375dbcd3 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 27 Nov 2024 21:40:23 +0100 Subject: [PATCH 26/41] chain-spec-guide-runtime: path to wasm blob fixed (#6673) In `chain-spec-guide-runtime` crate's tests, there was assumption that release version of wasm blob exists. This PR uses `chain_spec_guide_runtime::runtime::WASM_BINARY_PATH` const to use correct path to runtime blob. --------- Co-authored-by: GitHub Action --- .../tests/chain_spec_builder_tests.rs | 14 ++++++++------ prdoc/pr_6673.prdoc | 7 +++++++ 2 files changed, 15 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_6673.prdoc diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs index c2fe5a6727e6..df400b68f79d 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs @@ -1,8 +1,10 @@ use serde_json::{json, Value}; use std::{process::Command, str}; -const WASM_FILE_PATH: &str = - "../../../../../target/release/wbuild/chain-spec-guide-runtime/chain_spec_guide_runtime.wasm"; +fn wasm_file_path() -> &'static str { + chain_spec_guide_runtime::runtime::WASM_BINARY_PATH + .expect("chain_spec_guide_runtime wasm should exist. qed") +} const CHAIN_SPEC_BUILDER_PATH: &str = "../../../../../target/release/chain-spec-builder"; @@ -26,7 +28,7 @@ fn list_presets() { let output = Command::new(get_chain_spec_builder_path()) .arg("list-presets") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .output() .expect("Failed to execute command"); @@ -50,7 +52,7 @@ fn get_preset() { let output = Command::new(get_chain_spec_builder_path()) .arg("display-preset") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .arg("-p") .arg("preset_2") .output() @@ -83,7 +85,7 @@ fn generate_chain_spec() { .arg("/dev/stdout") .arg("create") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .arg("named-preset") .arg("preset_2") .output() @@ -140,7 +142,7 @@ fn generate_para_chain_spec() { .arg("-p") .arg("1000") .arg("-r") - .arg(WASM_FILE_PATH) + .arg(wasm_file_path()) .arg("named-preset") .arg("preset_2") .output() diff --git a/prdoc/pr_6673.prdoc b/prdoc/pr_6673.prdoc new file mode 100644 index 000000000000..d2ca3c61ff39 --- /dev/null +++ b/prdoc/pr_6673.prdoc @@ -0,0 +1,7 @@ +title: 'chain-spec-guide-runtime: path to wasm blob fixed' +doc: +- audience: Runtime Dev + description: In `chain-spec-guide-runtime` crate's tests, there was assumption that + release version of wasm blob exists. This PR uses `chain_spec_guide_runtime::runtime::WASM_BINARY_PATH` + const to use correct path to runtime blob. +crates: [] From 51c3e95a05c528b3869ad267aeb5c356551e38db Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 28 Nov 2024 11:16:06 +0200 Subject: [PATCH 27/41] chore: Update litep2p to v0.8.2 (#6677) This includes a critical fix for debug release versions of litep2p (which are running in Kusama as validators). While at it, have stopped the oncall pain of alerts around `incoming_connections_total`. We can rethink the metric expose of litep2p in Q1. ## [0.8.2] - 2024-11-27 This release ensures that the provided peer identity is verified at the crypto/noise protocol level, enhancing security and preventing potential misuses. The release also includes a fix that caused `TransportService` component to panic on debug builds. ### Fixed - req-resp: Fix panic on connection closed for substream open failure ([#291](https://github.com/paritytech/litep2p/pull/291)) - crypto/noise: Verify crypto/noise signature payload ([#278](https://github.com/paritytech/litep2p/pull/278)) ### Changed - transport_service/logs: Provide less details for trace logs ([#292](https://github.com/paritytech/litep2p/pull/292)) ## Testing Done This has been extensively tested in Kusama on all validators, that are now running litep2p. Deployed PR: https://github.com/paritytech/polkadot-sdk/pull/6638 ### Litep2p Dashboards ![Screenshot 2024-11-26 at 19 19 41](https://github.com/user-attachments/assets/e00b2b2b-7e64-4d96-ab26-165e2b8d0dc9) ### Libp2p vs Litep2p CPU usage After deploying litep2p we have reduced CPU usage from around 300-400% to 200%, this is a significant boost in performance, freeing resources for other subcomponents to function more optimally. ![image(1)](https://github.com/user-attachments/assets/fa793df5-4d58-4601-963d-246e56dd2a26) cc @paritytech/sdk-node --------- Signed-off-by: Alexandru Vasile Co-authored-by: GitHub Action --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- prdoc/pr_6677.prdoc | 11 +++++++++++ substrate/client/network/src/litep2p/mod.rs | 11 ++++++++++- 4 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_6677.prdoc diff --git a/Cargo.lock b/Cargo.lock index 12e642bc9d06..84477cd05416 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10216,9 +10216,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b67484b8ac41e1cfdf012f65fa81e88c2ef5f8a7d6dec0e2678c2d06dc04530" +checksum = "569e7dbec8a0d4b08d30f4942cd579cfe8db5d3f83f8604abe61697c38d17e73" dependencies = [ "async-trait", "bs58", diff --git a/Cargo.toml b/Cargo.toml index b1a52712e736..964964908a9b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -848,7 +848,7 @@ linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.8.1", features = ["websocket"] } +litep2p = { version = "0.8.2", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } diff --git a/prdoc/pr_6677.prdoc b/prdoc/pr_6677.prdoc new file mode 100644 index 000000000000..c6766889e68d --- /dev/null +++ b/prdoc/pr_6677.prdoc @@ -0,0 +1,11 @@ +title: 'chore: Update litep2p to v0.8.2' +doc: +- audience: Node Dev + description: |- + This includes a critical fix for debug release versions of litep2p (which are running in Kusama as validators). + + While at it, have stopped the oncall pain of alerts around `incoming_connections_total`. We can rethink the metric expose of litep2p in Q1. + +crates: +- name: sc-network + bump: minor diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 10cf9f4da36d..6d3575fc2b6b 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -986,7 +986,15 @@ impl NetworkBackend for Litep2pNetworkBac let direction = match endpoint { Endpoint::Dialer { .. } => "out", - Endpoint::Listener { .. } => "in", + Endpoint::Listener { .. } => { + // Increment incoming connections counter. + // + // Note: For litep2p these are represented by established negotiated connections, + // while for libp2p (legacy) these represent not-yet-negotiated connections. + metrics.incoming_connections_total.inc(); + + "in" + }, }; metrics.connections_opened_total.with_label_values(&[direction]).inc(); @@ -1058,6 +1066,7 @@ impl NetworkBackend for Litep2pNetworkBac NegotiationError::ParseError(_) => "parse-error", NegotiationError::IoError(_) => "io-error", NegotiationError::WebSocket(_) => "webscoket-error", + NegotiationError::BadSignature => "bad-signature", } }; From 9ec8009c5a8fdf89499fcd2a40df0292d3950efa Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 28 Nov 2024 12:41:09 +0100 Subject: [PATCH 28/41] Multiple instances for pallet-bridge-relayers fix (#6684) Previously, we added multi-instance pallet support for `pallet-bridge-relayers`, but missed fixing it in this one place. --------- Co-authored-by: GitHub Action Co-authored-by: Adrian Catangiu --- bridges/modules/relayers/src/extension/mod.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/bridges/modules/relayers/src/extension/mod.rs b/bridges/modules/relayers/src/extension/mod.rs index 34d280d26d6e..d562ed9bcd0e 100644 --- a/bridges/modules/relayers/src/extension/mod.rs +++ b/bridges/modules/relayers/src/extension/mod.rs @@ -129,7 +129,7 @@ pub struct BridgeRelayersTransactionExtension( impl BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -250,7 +250,7 @@ where // let's also replace the weight of slashing relayer with the weight of rewarding relayer if call_info.is_receive_messages_proof_call() { post_info_weight = post_info_weight.saturating_sub( - ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), + >::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), ); } @@ -278,7 +278,7 @@ impl TransactionExtension for BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig + R: RelayersConfig + BridgeMessagesConfig + TransactionPaymentConfig, C: ExtensionConfig, @@ -326,7 +326,9 @@ where }; // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active(&data.relayer) { + if !RelayersPallet::::is_registration_active( + &data.relayer, + ) { return Ok((Default::default(), Some(data), origin)) } @@ -382,7 +384,11 @@ where match call_result { RelayerAccountAction::None => (), RelayerAccountAction::Reward(relayer, reward_account, reward) => { - RelayersPallet::::register_relayer_reward(reward_account, &relayer, reward); + RelayersPallet::::register_relayer_reward( + reward_account, + &relayer, + reward, + ); log::trace!( target: LOG_TARGET, @@ -394,7 +400,7 @@ where ); }, RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister( + RelayersPallet::::slash_and_deregister( &relayer, ExplicitOrAccountParams::Params(slash_account), ), From 23369acd34411cfae924718a2834df1a7ea8bc05 Mon Sep 17 00:00:00 2001 From: Ludovic_Domingues Date: Thu, 28 Nov 2024 15:27:49 +0100 Subject: [PATCH 29/41] Migrating pallet-xcm-benchmarks to V2 (#6618) # Description Migrated pallet-xcm-benchmarks to benchmaking syntax V2 This is part of #6202 --------- Co-authored-by: Giuseppe Re --- .../src/generic/benchmarking.rs | 652 +++++++++++------- 1 file changed, 394 insertions(+), 258 deletions(-) diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 87bf27e4ff18..f4836b7cdde1 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -13,12 +13,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] use super::*; use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use alloc::{vec, vec::Vec}; use codec::Encode; -use frame_benchmarking::{benchmarks, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::traits::fungible::Inspect; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, @@ -29,16 +30,21 @@ use xcm_executor::{ ExecutorError, FeesMode, }; -benchmarks! { - report_holding { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn report_holding() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // generate holding and add possible required fees @@ -64,21 +70,33 @@ benchmarks! { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. - assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), + // Worst case is looking through all holdings for every asset explicitly - respecting + // the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite( + holding + .into_inner() + .into_iter() + .take(MAX_ITEMS_IN_ASSETS) + .collect::>() + .into(), + ), }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } // This benchmark does not use any additional orders or instructions. This should be managed // by the `deep` and `shallow` implementation. - buy_execution { + #[benchmark] + fn buy_execution() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -92,13 +110,16 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - pay_fees { + #[benchmark] + fn pay_fees() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -111,40 +132,53 @@ benchmarks! { let instruction = Instruction::>::PayFees { asset: fee_asset }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify {} + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) + } - set_asset_claimer { + #[benchmark] + fn set_asset_claimer() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (_, sender_location) = account_and_location::(1); - let instruction = Instruction::SetAssetClaimer{ location:sender_location.clone() }; + let instruction = Instruction::SetAssetClaimer { location: sender_location.clone() }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.asset_claimer(), Some(sender_location.clone())); + + Ok(()) } - query_response { + #[benchmark] + fn query_response() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (query_id, response) = T::worst_case_response(); let max_weight = Weight::MAX; let querier: Option = Some(Here.into()); let instruction = Instruction::QueryResponse { query_id, response, max_weight, querier }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + + #[block] + { + executor.bench_process(xcm)?; + } // The assert above is enough to show this XCM succeeded + + Ok(()) } // We don't care about the call itself, since that is accounted for in the weight parameter // and included in the final weight calculation. So this is just the overhead of submitting // a noop call. - transact { + #[benchmark] + fn transact() -> Result<(), BenchmarkError> { let (origin, noop_call) = T::transact_origin_and_runtime_call()?; let mut executor = new_executor::(origin); let double_encoded_noop_call: DoubleEncoded<_> = noop_call.encode().into(); @@ -154,119 +188,145 @@ benchmarks! { call: double_encoded_noop_call, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // TODO Make the assertion configurable? + + Ok(()) } - refund_surplus { + #[benchmark] + fn refund_surplus() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let holding_assets = T::worst_case_holding(1); // We can already buy execution since we'll load the holding register manually let asset_for_fees = T::fee_asset().unwrap(); - let previous_xcm = Xcm(vec![BuyExecution { fees: asset_for_fees, weight_limit: Limited(Weight::from_parts(1337, 1337)) }]); + let previous_xcm = Xcm(vec![BuyExecution { + fees: asset_for_fees, + weight_limit: Limited(Weight::from_parts(1337, 1337)), + }]); executor.set_holding(holding_assets.into()); executor.set_total_surplus(Weight::from_parts(1337, 1337)); executor.set_total_refunded(Weight::zero()); - executor.bench_process(previous_xcm).expect("Holding has been loaded, so we can buy execution here"); + executor + .bench_process(previous_xcm) + .expect("Holding has been loaded, so we can buy execution here"); let instruction = Instruction::>::RefundSurplus; let xcm = Xcm(vec![instruction]); - } : { - let result = executor.bench_process(xcm)?; - } verify { + #[block] + { + let _result = executor.bench_process(xcm)?; + } assert_eq!(executor.total_surplus(), &Weight::from_parts(1337, 1337)); assert_eq!(executor.total_refunded(), &Weight::from_parts(1337, 1337)); + + Ok(()) } - set_error_handler { + #[benchmark] + fn set_error_handler() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::>::SetErrorHandler(Xcm(vec![])); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.error_handler(), &Xcm(vec![])); + + Ok(()) } - set_appendix { + #[benchmark] + fn set_appendix() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let appendix = Xcm(vec![]); let instruction = Instruction::>::SetAppendix(appendix); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.appendix(), &Xcm(vec![])); + Ok(()) } - clear_error { + #[benchmark] + fn clear_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((5u32, XcmError::Overflow))); let instruction = Instruction::>::ClearError; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(executor.error().is_none()) + #[block] + { + executor.bench_process(xcm)?; + } + assert!(executor.error().is_none()); + Ok(()) } - descend_origin { + #[benchmark] + fn descend_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who = Junctions::from([OnlyChild, OnlyChild]); let instruction = Instruction::DescendOrigin(who.clone()); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: who, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: who }),); + + Ok(()) } - execute_with_origin { + #[benchmark] + fn execute_with_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who: Junctions = Junctions::from([AccountId32 { id: [0u8; 32], network: None }]); - let instruction = Instruction::ExecuteWithOrigin { descendant_origin: Some(who.clone()), xcm: Xcm(vec![]) }; + let instruction = Instruction::ExecuteWithOrigin { + descendant_origin: Some(who.clone()), + xcm: Xcm(vec![]), + }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: Here, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: Here }),); + + Ok(()) } - clear_origin { + #[benchmark] + fn clear_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::ClearOrigin; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &None); + Ok(()) } - report_error { + #[benchmark] + fn report_error() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let max_weight = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -278,18 +338,21 @@ benchmarks! { } executor.set_error(Some((0u32, XcmError::Unimplemented))); - let instruction = Instruction::ReportError(QueryResponseInfo { - query_id, destination, max_weight - }); + let instruction = + Instruction::ReportError(QueryResponseInfo { query_id, destination, max_weight }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } - claim_asset { + #[benchmark] + fn claim_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::DropAssets; let (origin, ticket, assets) = T::claimable_asset()?; @@ -298,11 +361,7 @@ benchmarks! { ::AssetTrap::drop_assets( &origin, assets.clone().into(), - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, ); // Assets should be in the trap now. @@ -310,28 +369,32 @@ benchmarks! { let mut executor = new_executor::(origin); let instruction = Instruction::ClaimAsset { assets: assets.clone(), ticket }; let xcm = Xcm(vec![instruction]); - } :{ - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().ensure_contains(&assets).is_ok()); + Ok(()) } - trap { + #[benchmark] + fn trap() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::Trap(10); let xcm = Xcm(vec![instruction]); // In order to access result in the verification below, it needs to be defined here. - let mut _result = Ok(()); - } : { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::Trap(10), - .. - }))); + let result; + #[block] + { + result = executor.bench_process(xcm); + } + assert!(matches!(result, Err(ExecutorError { xcm_error: XcmError::Trap(10), .. }))); + + Ok(()) } - subscribe_version { + #[benchmark] + fn subscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; let origin = T::subscribe_origin()?; let query_id = Default::default(); @@ -339,13 +402,18 @@ benchmarks! { let mut executor = new_executor::(origin.clone()); let instruction = Instruction::SubscribeVersion { query_id, max_response_weight }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(::SubscriptionService::is_subscribed(&origin)); + #[block] + { + executor.bench_process(xcm)?; + } + assert!(::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - unsubscribe_version { + #[benchmark] + fn unsubscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; // First we need to subscribe to notifications. let (origin, _) = T::transact_origin_and_runtime_call()?; @@ -355,24 +423,28 @@ benchmarks! { &origin, query_id, max_response_weight, - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, - ).map_err(|_| "Could not start subscription")?; - assert!(::SubscriptionService::is_subscribed(&origin)); + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, + ) + .map_err(|_| "Could not start subscription")?; + assert!(::SubscriptionService::is_subscribed( + &origin + )); let mut executor = new_executor::(origin.clone()); let instruction = Instruction::UnsubscribeVersion; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(!::SubscriptionService::is_subscribed(&origin)); + #[block] + { + executor.bench_process(xcm)?; + } + assert!(!::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - burn_asset { + #[benchmark] + fn burn_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -381,13 +453,16 @@ benchmarks! { let instruction = Instruction::BurnAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().is_empty()); + Ok(()) } - expect_asset { + #[benchmark] + fn expect_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -396,71 +471,86 @@ benchmarks! { let instruction = Instruction::ExpectAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // `execute` completing successfully is as good as we can check. + + Ok(()) } - expect_origin { + #[benchmark] + fn expect_origin() -> Result<(), BenchmarkError> { let expected_origin = Parent.into(); let mut executor = new_executor::(Default::default()); let instruction = Instruction::ExpectOrigin(Some(expected_origin)); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_error { + #[benchmark] + fn expect_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((3u32, XcmError::Overflow))); let instruction = Instruction::ExpectError(None); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_transact_status { + #[benchmark] + fn expect_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); - let worst_error = || -> MaybeErrorCode { - vec![0; MaxDispatchErrorLen::get() as usize].into() - }; + let worst_error = + || -> MaybeErrorCode { vec![0; MaxDispatchErrorLen::get() as usize].into() }; executor.set_transact_status(worst_error()); let instruction = Instruction::ExpectTransactStatus(worst_error()); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { + #[block] + { + _result = executor.bench_process(xcm); + } assert!(matches!(_result, Ok(..))); + Ok(()) } - query_pallet { + #[benchmark] + fn query_pallet() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::QueryPallet, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::QueryPallet, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); if let Some(expected_fees_mode) = expected_fees_mode { @@ -476,15 +566,19 @@ benchmarks! { response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + + Ok(()) } - expect_pallet { + #[benchmark] + fn expect_pallet() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { @@ -495,23 +589,27 @@ benchmarks! { min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // the execution succeeding is all we need to verify this xcm was successful + Ok(()) } - report_transact_status { + #[benchmark] + fn report_transact_status() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -529,84 +627,102 @@ benchmarks! { max_weight, }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - clear_transact_status { + #[benchmark] + fn clear_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_transact_status(b"MyError".to_vec().into()); let instruction = Instruction::ClearTransactStatus; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.transact_status(), &MaybeErrorCode::Success); + Ok(()) } - set_topic { + #[benchmark] + fn set_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::SetTopic([1; 32]); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &Some([1; 32])); + Ok(()) } - clear_topic { + #[benchmark] + fn clear_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_topic(Some([2; 32])); let instruction = Instruction::ClearTopic; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &None); + Ok(()) } - exchange_asset { + #[benchmark] + fn exchange_asset() -> Result<(), BenchmarkError> { let (give, want) = T::worst_case_asset_exchange().map_err(|_| BenchmarkError::Skip)?; let assets = give.clone(); let mut executor = new_executor::(Default::default()); executor.set_holding(give.into()); - let instruction = Instruction::ExchangeAsset { - give: assets.into(), - want: want.clone(), - maximal: true, - }; + let instruction = + Instruction::ExchangeAsset { give: assets.into(), want: want.clone(), maximal: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.holding(), &want.into()); + Ok(()) } - universal_origin { + #[benchmark] + fn universal_origin() -> Result<(), BenchmarkError> { let (origin, alias) = T::universal_alias().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::UniversalOrigin(alias); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } use frame_support::traits::Get; let universal_location = ::UniversalLocation::get(); - assert_eq!(executor.origin(), &Some(Junctions::from([alias]).relative_to(&universal_location))); + assert_eq!( + executor.origin(), + &Some(Junctions::from([alias]).relative_to(&universal_location)) + ); + + Ok(()) } - export_message { - let x in 1 .. 1000; + #[benchmark] + fn export_message(x: Linear<1, 1000>) -> Result<(), BenchmarkError> { // The `inner_xcm` influences `ExportMessage` total weight based on // `inner_xcm.encoded_size()`, so for this benchmark use smallest encoded instruction // to approximate weight per "unit" of encoded size; then actual weight can be estimated @@ -616,11 +732,12 @@ benchmarks! { // Get `origin`, `network` and `destination` from configured runtime. let (origin, network, destination) = T::export_message_origin_and_destination()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &origin, - &destination.clone().into(), - FeeReason::Export { network, destination: destination.clone() }, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &origin, + &destination.clone().into(), + FeeReason::Export { network, destination: destination.clone() }, + ); let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -631,37 +748,39 @@ benchmarks! { if let Some(expected_assets_in_holding) = expected_assets_in_holding { executor.set_holding(expected_assets_in_holding.into()); } - let xcm = Xcm(vec![ExportMessage { - network, destination: destination.clone(), xcm: inner_xcm, - }]); - }: { - executor.bench_process(xcm)?; - } verify { + let xcm = + Xcm(vec![ExportMessage { network, destination: destination.clone(), xcm: inner_xcm }]); + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - set_fees_mode { + #[benchmark] + fn set_fees_mode() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_fees_mode(FeesMode { jit_withdraw: false }); let instruction = Instruction::SetFeesMode { jit_withdraw: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.fees_mode(), &FeesMode { jit_withdraw: true }); + Ok(()) } - lock_asset { + #[benchmark] + fn lock_asset() -> Result<(), BenchmarkError> { let (unlocker, owner, asset) = T::unlockable_asset()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &unlocker, - FeeReason::LockAsset, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery(&owner, &unlocker, FeeReason::LockAsset); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -681,15 +800,18 @@ benchmarks! { let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unlock_asset { + #[benchmark] + fn unlock_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -709,13 +831,15 @@ benchmarks! { // ... then unlock them with the UnlockAsset instruction. let instruction = Instruction::UnlockAsset { asset, target: owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - note_unlockable { + #[benchmark] + fn note_unlockable() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -735,13 +859,15 @@ benchmarks! { // ... then note them as unlockable with the NoteUnlockable instruction. let instruction = Instruction::NoteUnlockable { asset, owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - request_unlock { + #[benchmark] + fn request_unlock() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (locker, owner, asset) = T::unlockable_asset()?; @@ -756,11 +882,12 @@ benchmarks! { .enact() .map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &locker, - FeeReason::RequestUnlock, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &owner, + &locker, + FeeReason::RequestUnlock, + ); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -774,15 +901,18 @@ benchmarks! { } let instruction = Instruction::RequestUnlock { asset, locker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unpaid_execution { + #[benchmark] + fn unpaid_execution() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_origin(Some(Here.into())); @@ -792,21 +922,27 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - alias_origin { + #[benchmark] + fn alias_origin() -> Result<(), BenchmarkError> { let (origin, target) = T::alias_origin().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::AliasOrigin(target.clone()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &Some(target)); + Ok(()) } impl_benchmark_test_suite!( From fdb264d0df6fdbed32f001ba43c3282a01dd3d65 Mon Sep 17 00:00:00 2001 From: Ludovic_Domingues Date: Thu, 28 Nov 2024 17:18:30 +0100 Subject: [PATCH 30/41] Migrating pallet-state-trie-migration to benchmarking V2 (#6617) # Description Migrated pallet-state-trie-migration benchmarking to the new benchmarking syntax v2. This is part of #6202 Co-authored-by: Shawn Tabrizi Co-authored-by: Giuseppe Re --- .../frame/state-trie-migration/src/lib.rs | 176 +++++++++++------- 1 file changed, 108 insertions(+), 68 deletions(-) diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 3fe5abb81031..61323b70b33d 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -249,13 +249,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()) + return Ok(()); } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e) + return Err(e); } } @@ -332,7 +332,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()) + return Ok(()); }, }; @@ -374,7 +374,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()) + return Ok(()); }, }; @@ -669,7 +669,7 @@ pub mod pallet { // ensure that the migration witness data was correct. if real_size_upper < task.dyn_size { Self::slash(who, deposit)?; - return Ok(().into()) + return Ok(().into()); } Self::deposit_event(Event::::Migrated { @@ -957,6 +957,7 @@ pub mod pallet { mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; use alloc::vec; + use frame_benchmarking::v2::*; use frame_support::traits::fungible::{Inspect, Mutate}; // The size of the key seemingly makes no difference in the read/write time, so we make it @@ -970,8 +971,12 @@ mod benchmarks { stash } - frame_benchmarking::benchmarks! { - continue_migrate { + #[benchmarks] + mod inner_benchmarks { + use super::*; + + #[benchmark] + fn continue_migrate() -> Result<(), BenchmarkError> { // note that this benchmark should migrate nothing, as we only want the overhead weight // of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight` // function. @@ -980,116 +985,151 @@ mod benchmarks { let stash = set_balance_for_deposit::(&caller, null.item); // Allow signed migrations. SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); - }: _(frame_system::RawOrigin::Signed(caller.clone()), null, 0, StateTrieMigration::::migration_process()) - verify { + + #[extrinsic_call] + _( + frame_system::RawOrigin::Signed(caller.clone()), + null, + 0, + StateTrieMigration::::migration_process(), + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - continue_migrate_wrong_witness { + #[benchmark] + fn continue_migrate_wrong_witness() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let bad_witness = MigrationTask { progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), ..Default::default() }; - }: { - assert!( - StateTrieMigration::::continue_migrate( + let bad_witness = MigrationTask { + progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), + ..Default::default() + }; + #[block] + { + assert!(StateTrieMigration::::continue_migrate( frame_system::RawOrigin::Signed(caller).into(), null, 0, bad_witness, ) - .is_err() - ) - } - verify { - assert_eq!(StateTrieMigration::::migration_process(), Default::default()) + .is_err()); + } + + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + + Ok(()) } - migrate_custom_top_success { + #[benchmark] + fn migrate_custom_top_success() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); - }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) - verify { + #[extrinsic_call] + migrate_custom_top( + frame_system::RawOrigin::Signed(caller.clone()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + Ok(()) } - migrate_custom_top_fail { + #[benchmark] + fn migrate_custom_top_fail() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_top( + sp_io::storage::set(b"foo", vec![1u8; 33].as_ref()); + #[block] + { + assert!(StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_ok() - ); + ) + .is_ok()); + + frame_system::Pallet::::assert_last_event( + ::RuntimeEvent::from(crate::Event::Slashed { + who: caller.clone(), + amount: StateTrieMigration::::calculate_deposit_for(1u32), + }) + .into(), + ); + } - frame_system::Pallet::::assert_last_event( - ::RuntimeEvent::from(crate::Event::Slashed { - who: caller.clone(), - amount: StateTrieMigration::::calculate_deposit_for(1u32), - }).into(), - ); - } - verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + + Ok(()) } - migrate_custom_child_success { + #[benchmark] + fn migrate_custom_child_success() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 0); - }: migrate_custom_child( - frame_system::RawOrigin::Signed(caller.clone()), - StateTrieMigration::::childify(Default::default()), - Default::default(), - 0 - ) - verify { + + #[extrinsic_call] + migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()), + StateTrieMigration::::childify(Default::default()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - migrate_custom_child_fail { + #[benchmark] + fn migrate_custom_child_fail() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 1); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::default_child_storage::set(b"top", b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_child( + sp_io::default_child_storage::set(b"top", b"foo", vec![1u8; 33].as_ref()); + + #[block] + { + assert!(StateTrieMigration::::migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()).into(), StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_ok() - ) - } - verify { + ) + .is_ok()); + } assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + Ok(()) } - process_top_key { - let v in 1 .. (4 * 1024 * 1024); - + #[benchmark] + fn process_top_key(v: Linear<1, { 4 * 1024 * 1024 }>) -> Result<(), BenchmarkError> { let value = alloc::vec![1u8; v as usize]; sp_io::storage::set(KEY, &value); - }: { - let data = sp_io::storage::get(KEY).unwrap(); - sp_io::storage::set(KEY, &data); - let _next = sp_io::storage::next_key(KEY); - assert_eq!(data, value); + #[block] + { + let data = sp_io::storage::get(KEY).unwrap(); + sp_io::storage::set(KEY, &data); + let _next = sp_io::storage::next_key(KEY); + assert_eq!(data, value); + } + + Ok(()) } impl_benchmark_test_suite!( @@ -1741,7 +1781,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight) + return (true, weight); } duration += One::one(); now += One::one(); @@ -1768,7 +1808,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break + break; } } From 6416b280a7d0032ba3c265e4506504c6d6536637 Mon Sep 17 00:00:00 2001 From: Cyrill Leutwiler Date: Thu, 28 Nov 2024 19:01:41 +0100 Subject: [PATCH 31/41] [pallet-revive] bugfix decoding 64bit args in the decoder (#6695) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The argument index of the next argument is dictated by the size of the current one. --------- Signed-off-by: xermicus Co-authored-by: GitHub Action Co-authored-by: Alexander Theißen --- prdoc/pr_6695.prdoc | 8 ++++++++ substrate/frame/revive/proc-macro/src/lib.rs | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_6695.prdoc diff --git a/prdoc/pr_6695.prdoc b/prdoc/pr_6695.prdoc new file mode 100644 index 000000000000..7a950e8546cd --- /dev/null +++ b/prdoc/pr_6695.prdoc @@ -0,0 +1,8 @@ +title: '[pallet-revive] bugfix decoding 64bit args in the decoder' +doc: +- audience: Runtime Dev + description: The argument index of the next argument is dictated by the size of + the current one. +crates: +- name: pallet-revive-proc-macro + bump: patch diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 012b4bfab9a9..7232c6342824 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -342,7 +342,8 @@ where const ALLOWED_REGISTERS: u32 = 6; let mut registers_used = 0; let mut bindings = vec![]; - for (idx, (name, ty)) in param_names.clone().zip(param_types.clone()).enumerate() { + let mut idx = 0; + for (name, ty) in param_names.clone().zip(param_types.clone()) { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; @@ -380,6 +381,7 @@ where } }; bindings.push(binding); + idx += size; } quote! { #( #bindings )* From 72fb8bd3cd4a5051bb855415b360657d7ce247fb Mon Sep 17 00:00:00 2001 From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com> Date: Fri, 29 Nov 2024 10:33:46 +0000 Subject: [PATCH 32/41] Expose types from `sc-service` (#5855) # Description At moonbeam we have worked on a `lazy-loading` feature which is a client mode that forks a live parachain and fetches its state on-demand, we have been able to do this by duplicating some code from `sc_service::client`. The objective of this PR is to simplify the implementation by making public some types in polkadot-sdk. - Modules: - `sc_service::client` **I do not see a point to only expose this type when `test-helpers` feature is enabled** ## Integration Not applicable, the PR just makes some types public. ## Review Notes The changes included in this PR give more flexibility for client developers by exposing important types. --- prdoc/pr_5855.prdoc | 15 +++++++ substrate/bin/node/testing/Cargo.toml | 2 +- substrate/client/network/test/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- .../src/chain_head/subscription/inner.rs | 6 +-- .../rpc-spec-v2/src/chain_head/tests.rs | 6 +-- substrate/client/service/Cargo.toml | 2 - substrate/client/service/src/client/client.rs | 40 +------------------ substrate/client/service/src/client/mod.rs | 3 +- substrate/client/service/src/lib.rs | 5 +-- substrate/client/service/test/Cargo.toml | 2 +- .../client/service/test/src/client/mod.rs | 6 +-- substrate/test-utils/client/Cargo.toml | 4 +- substrate/test-utils/runtime/Cargo.toml | 2 +- 14 files changed, 34 insertions(+), 63 deletions(-) create mode 100644 prdoc/pr_5855.prdoc diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc new file mode 100644 index 000000000000..7735cfee9f37 --- /dev/null +++ b/prdoc/pr_5855.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove feature `test-helpers` from sc-service + +doc: + - audience: Node Dev + description: | + Removes feature `test-helpers` from sc-service. + +crates: + - name: sc-service + bump: major + - name: sc-rpc-spec-v2 + bump: major diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 16112386ad7c..1972c03a368b 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -37,7 +37,7 @@ sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index ebece1762f29..6340d1dfb2f4 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -33,7 +33,7 @@ sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index b304bc905925..70f68436767f 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -56,7 +56,7 @@ sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } assert_matches = { workspace = true } pretty_assertions = { workspace = true } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 95a7c7fe1832..3e1bd23776d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -784,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_in_mem; + use sc_service::client::new_with_backend; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -811,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c505566d887d..21e8365622a1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,7 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_rpc::testing::TokioTestExecutor; -use sc_service::client::new_in_mem; +use sc_service::client::new_with_backend; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -2547,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TokioTestExecutor::default()), None, None, - Box::new(TokioTestExecutor::default()), client_config, ) .unwrap(), diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index f2fc65ef2439..3981395d9768 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,8 +20,6 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] -# exposes the client type -test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index ce5b92551bf2..eddbb9260c05 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,10 +85,8 @@ use std::{ sync::Arc, }; -#[cfg(feature = "test-helpers")] -use { - super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, -}; +use super::call_executor::LocalCallExecutor; +use sp_core::traits::CodeExecutor; type NotificationSinks = Mutex>>; @@ -152,39 +150,6 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } - -/// Create an instance of in-memory client. -#[cfg(feature = "test-helpers")] -pub fn new_in_mem( - backend: Arc>, - executor: E, - genesis_block_builder: G, - prometheus_registry: Option, - telemetry: Option, - spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result< - Client, LocalCallExecutor, E>, Block, RA>, -> -where - E: CodeExecutor + sc_executor::RuntimeVersionOf, - Block: BlockT, - G: BuildGenesisBlock< - Block, - BlockImportOperation = as backend::Backend>::BlockImportOperation, - >, -{ - new_with_backend( - backend, - executor, - genesis_block_builder, - spawn_handle, - prometheus_registry, - telemetry, - config, - ) -} - /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -218,7 +183,6 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index ec77a92f162f..3020b3d296f4 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,5 +56,4 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -#[cfg(feature = "test-helpers")] -pub use self::client::{new_in_mem, new_with_backend}; +pub use self::client::new_with_backend; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 9c01d7288a81..b5a38d875e3b 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,14 +23,11 @@ #![recursion_limit = "1024"] pub mod chain_ops; +pub mod client; pub mod config; pub mod error; mod builder; -#[cfg(feature = "test-helpers")] -pub mod client; -#[cfg(not(feature = "test-helpers"))] -mod client; mod metrics; mod task_manager; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 0edfc5b19314..632b98104f6b 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -31,7 +31,7 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 55bbfcdd8594..ead90c4c65d8 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -2087,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::<_, Block, _, RuntimeApi>( + let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(); diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index ebd1eab5980d..a67c91fc5f79 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -29,9 +29,7 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { features = [ - "test-helpers", -], workspace = true } +sc-service = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..96a888052876 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -45,7 +45,7 @@ sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-trie = { workspace = true } sp-transaction-pool = { workspace = true } trie-db = { workspace = true } -sc-service = { features = ["test-helpers"], optional = true, workspace = true } +sc-service = { optional = true, workspace = true } sp-state-machine = { workspace = true } sp-externalities = { workspace = true } From 1dd21bcc1406e0f07f70e604f9cef4dc2115c989 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 29 Nov 2024 12:00:52 +0100 Subject: [PATCH 33/41] ci: update nightly in ci-unified to 2024-11-19 (#6691) cc https://github.com/paritytech/ci_cd/issues/1088 --- .github/env | 2 +- .gitlab-ci.yml | 2 +- docs/contributor/container.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/env b/.github/env index bb61e1f4cd99..730c37f1db80 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f508404f1efa..42a7e87bda43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ workflow: variables: # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034" + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/docs/contributor/container.md b/docs/contributor/container.md index ec51b8b9d7cc..e387f568d7b5 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ + docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` From b3ab312724ee8c3a0c7f3d9b5ea6c98513b5c951 Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Fri, 29 Nov 2024 20:02:59 +0800 Subject: [PATCH 34/41] Migrate pallet-preimage to benchmark v2 (#6277) Part of: - #6202. --------- Co-authored-by: Giuseppe Re Co-authored-by: command-bot <> --- substrate/frame/preimage/src/benchmarking.rs | 296 ++++++++++--------- substrate/frame/preimage/src/weights.rs | 150 +++++----- 2 files changed, 231 insertions(+), 215 deletions(-) diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 3d0c5b900579..ea635bf3ef77 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,14 +17,13 @@ //! Preimage pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Preimage; +use crate::*; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -43,206 +42,225 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -benchmarks! { +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} + +#[benchmarks] +mod benchmarks { + use super::*; + // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( + assert_ok!(Pallet::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + note_preimage(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + note_preimage(o as T::RuntimeOrigin, preimage); + + assert!(Pallet::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - unnote_preimage { + #[benchmark] + fn unnote_preimage() { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hash); + + assert!(!Pallet::::have_preimage(&hash)); } + // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { + #[benchmark] + fn unnote_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + unnote_preimage(o as T::RuntimeOrigin, hash); + + assert!(!Pallet::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - request_preimage { + #[benchmark] + fn request_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + + let ticket = + TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { + maybe_ticket: Some((noter, ticket)), + count: 1, + maybe_len: Some(MAX_SIZE), + }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { + #[benchmark] + fn request_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + + let s = + RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { + #[benchmark] + fn request_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { + #[benchmark] + fn request_requested_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { + #[benchmark] + fn unrequest_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { + #[benchmark] + fn unrequest_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { + #[benchmark] + fn unrequest_multi_referenced_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - + #[benchmark] + fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hashes); + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index edb2eed9c75a..a3aec7e7546e 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/preimage/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -84,10 +82,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -102,10 +100,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -120,10 +118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -154,8 +152,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,8 +165,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,8 +178,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -193,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -234,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -247,8 +245,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,10 +265,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) @@ -295,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `7` // Estimated: `6012` - // Minimum execution time: 51_981_000 picoseconds. - Weight::from_parts(52_228_000, 6012) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -313,10 +311,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_835_000 picoseconds. - Weight::from_parts(16_429_000, 3556) - // Standard Error: 8 - .saturating_add(Weight::from_parts(2_647, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,10 +329,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_578_000, 3556) - // Standard Error: 7 - .saturating_add(Weight::from_parts(2_598, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -350,8 +348,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `206` // Estimated: `3820` - // Minimum execution time: 64_189_000 picoseconds. - Weight::from_parts(70_371_000, 3820) + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -365,8 +363,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 27_582_000 picoseconds. - Weight::from_parts(31_256_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -378,8 +376,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `150` // Estimated: `3556` - // Minimum execution time: 27_667_000 picoseconds. - Weight::from_parts(32_088_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,8 +389,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 16_065_000 picoseconds. - Weight::from_parts(20_550_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -404,8 +402,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3556` - // Minimum execution time: 13_638_000 picoseconds. - Weight::from_parts(16_979_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -417,8 +415,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 11_383_000 picoseconds. - Weight::from_parts(12_154_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -432,8 +430,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3556` - // Minimum execution time: 22_832_000 picoseconds. - Weight::from_parts(30_716_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -445,8 +443,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(12_129_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -458,8 +456,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_394_000 picoseconds. - Weight::from_parts(10_951_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -478,10 +476,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` // Estimated: `6012 + n * (2830 ±0)` - // Minimum execution time: 62_203_000 picoseconds. - Weight::from_parts(63_735_000, 6012) - // Standard Error: 59_589 - .saturating_add(Weight::from_parts(59_482_352, 0).saturating_mul(n.into())) + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) From 447902eff4a574e66894ad60cb41999b05bf5e84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Fri, 29 Nov 2024 13:46:31 +0100 Subject: [PATCH 35/41] pallet_revive: Switch to 64bit RISC-V (#6565) This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. ## Fixtures The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. ## Syscall interface ### Passing pointer Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. ### Functions with more than 6 arguments We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. ## TODO - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface --------- Co-authored-by: GitHub Action --- .github/workflows/checks-quick.yml | 1 - Cargo.lock | 72 +++++++------- prdoc/pr_6565.prdoc | 35 +++++++ substrate/frame/revive/Cargo.toml | 2 +- substrate/frame/revive/fixtures/Cargo.toml | 4 +- substrate/frame/revive/fixtures/build.rs | 96 +++++++++++++------ .../build/{Cargo.toml => _Cargo.toml} | 5 +- .../fixtures/build/_rust-toolchain.toml | 4 + .../riscv32emac-unknown-none-polkavm.json | 26 ----- substrate/frame/revive/fixtures/src/lib.rs | 13 +-- substrate/frame/revive/proc-macro/src/lib.rs | 91 ++++++++++-------- substrate/frame/revive/rpc/src/tests.rs | 6 ++ substrate/frame/revive/src/chain_extension.rs | 12 +-- substrate/frame/revive/src/limits.rs | 21 +++- substrate/frame/revive/src/wasm/mod.rs | 20 +++- substrate/frame/revive/src/wasm/runtime.rs | 33 ++----- substrate/frame/revive/uapi/Cargo.toml | 6 +- substrate/frame/revive/uapi/src/host.rs | 4 +- .../uapi/src/host/{riscv32.rs => riscv64.rs} | 86 ++++++++--------- substrate/frame/revive/uapi/src/lib.rs | 6 ++ 20 files changed, 309 insertions(+), 234 deletions(-) create mode 100644 prdoc/pr_6565.prdoc rename substrate/frame/revive/fixtures/build/{Cargo.toml => _Cargo.toml} (80%) create mode 100644 substrate/frame/revive/fixtures/build/_rust-toolchain.toml delete mode 100644 substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json rename substrate/frame/revive/uapi/src/host/{riscv32.rs => riscv64.rs} (93%) diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index c733a2517cb8..4c26b85a6303 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -97,7 +97,6 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" - "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . diff --git a/Cargo.lock b/Cargo.lock index 84477cd05416..e1abeea49283 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5975,6 +5975,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -14646,7 +14655,7 @@ dependencies = [ "pallet-utility 28.0.0", "parity-scale-codec", "paste", - "polkavm 0.13.0", + "polkavm 0.17.0", "pretty_assertions", "rlp 0.6.1", "scale-info", @@ -14742,12 +14751,10 @@ dependencies = [ "anyhow", "frame-system 28.0.0", "log", - "parity-wasm", - "polkavm-linker 0.14.0", + "polkavm-linker 0.17.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "tempfile", "toml 0.8.12", ] @@ -14864,7 +14871,7 @@ dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.14.0", + "polkavm-derive 0.17.0", "scale-info", ] @@ -19699,15 +19706,15 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" +checksum = "84979be196ba2855f73616413e7b1d18258128aa396b3dc23f520a00a807720e" dependencies = [ "libc", "log", - "polkavm-assembler 0.13.0", - "polkavm-common 0.13.0", - "polkavm-linux-raw 0.13.0", + "polkavm-assembler 0.17.0", + "polkavm-common 0.17.0", + "polkavm-linux-raw 0.17.0", ] [[package]] @@ -19730,9 +19737,9 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" +checksum = "0ba7b434ff630b0f73a1560e8baea807246ca22098abe49f97821e0e2d2accc4" dependencies = [ "log", ] @@ -19764,20 +19771,14 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" +checksum = "8f0dbafef4ab6ceecb4982ac3b550df430ef4f9fdbf07c108b7d4f91a0682fce" dependencies = [ "log", - "polkavm-assembler 0.13.0", + "polkavm-assembler 0.17.0", ] -[[package]] -name = "polkavm-common" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711952a783e9c5ad407cdacb1ed147f36d37c5d43417c1091d86456d2999417b" - [[package]] name = "polkavm-derive" version = "0.8.0" @@ -19807,11 +19808,11 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4832a0aebf6cefc988bb7b2d74ea8c86c983164672e2fc96300f356a1babfc1" +checksum = "c0c3dbb6c8c7bd3e5f5b05aa7fc9355acf14df7ce5d392911e77d01090a38d0d" dependencies = [ - "polkavm-derive-impl-macro 0.14.0", + "polkavm-derive-impl-macro 0.17.0", ] [[package]] @@ -19852,11 +19853,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e339fc7c11310fe5adf711d9342278ac44a75c9784947937cce12bd4f30842f2" +checksum = "42565aed4adbc4034612d0b17dea8db3681fb1bd1aed040d6edc5455a9f478a1" dependencies = [ - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -19894,11 +19895,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b569754b15060d03000c09e3bf11509d527f60b75d79b4c30c3625b5071d9702" +checksum = "86d9838e95241b0bce4fe269cdd4af96464160505840ed5a8ac8536119ba19e2" dependencies = [ - "polkavm-derive-impl 0.14.0", + "polkavm-derive-impl 0.17.0", "syn 2.0.87", ] @@ -19934,15 +19935,16 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.14.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0959ac3b0f4fd5caf5c245c637705f19493efe83dba31a83bbba928b93b0116a" +checksum = "d359dc721d2cc9b555ebb3558c305112ddc5bdac09d26f95f2f7b49c1f2db7e9" dependencies = [ + "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.14.0", + "polkavm-common 0.17.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -19961,9 +19963,9 @@ checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" [[package]] name = "polkavm-linux-raw" -version = "0.13.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" +checksum = "e64c3d93a58ffbc3099d1227f0da9675a025a9ea6c917038f266920c1de1e568" [[package]] name = "polling" diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc new file mode 100644 index 000000000000..f9a75a16a6a7 --- /dev/null +++ b/prdoc/pr_6565.prdoc @@ -0,0 +1,35 @@ +title: 'pallet_revive: Switch to 64bit RISC-V' +doc: +- audience: Runtime Dev + description: |- + This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. + + Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. + + ## Fixtures + + The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. + + ## Syscall interface + + ### Passing pointer + + Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. + + ### Functions with more than 6 arguments + + We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. + + This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. + + ## TODO + - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 81fbbc8cf38e..677ef0e1367f 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] environmental = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.13.0", default-features = false } +polkavm = { version = "0.17.0", default-features = false } bitflags = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 7a5452853d65..798ed8c75a5a 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -18,10 +18,8 @@ anyhow = { workspace = true, default-features = true, optional = true } log = { workspace = true } [build-dependencies] -parity-wasm = { workspace = true } -tempfile = { workspace = true } toml = { workspace = true } -polkavm-linker = { version = "0.14.0" } +polkavm-linker = { version = "0.17.0" } anyhow = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 3472e0846efd..46cd5760ca4e 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -20,7 +20,8 @@ use anyhow::Result; use anyhow::{bail, Context}; use std::{ - cfg, env, fs, + env, fs, + io::Write, path::{Path, PathBuf}, process::Command, }; @@ -82,7 +83,7 @@ fn create_cargo_toml<'a>( entries: impl Iterator, output_dir: &Path, ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; let mut set_dep = |name, path| -> Result<()> { cargo_toml["dependencies"][name]["path"] = toml::Value::String( fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), @@ -108,21 +109,24 @@ fn create_cargo_toml<'a>( let cargo_toml = toml::to_string_pretty(&cargo_toml)?; fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) .with_context(|| format!("Failed to write {cargo_toml:?}"))?; + fs::copy( + fixtures_dir.join("build/_rust-toolchain.toml"), + output_dir.join("rust-toolchain.toml"), + ) + .context("Failed to write toolchain file")?; Ok(()) } -fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { +fn invoke_build(current_dir: &Path) -> Result<()> { let encoded_rustflags = ["-Dwarnings"].join("\x1f"); - let mut build_command = Command::new(env::var("CARGO")?); + let mut build_command = Command::new("cargo"); build_command .current_dir(current_dir) .env_clear() .env("PATH", env::var("PATH").unwrap_or_default()) .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) - .env("RUSTC_BOOTSTRAP", "1") .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) - .env("RUSTUP_TOOLCHAIN", env::var("RUSTUP_TOOLCHAIN").unwrap_or_default()) .args([ "build", "--release", @@ -130,7 +134,7 @@ fn invoke_build(target: &Path, current_dir: &Path) -> Result<()> { "-Zbuild-std-features=panic_immediate_abort", ]) .arg("--target") - .arg(target); + .arg(polkavm_linker::target_json_64_path().unwrap()); if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { build_command.env("RUSTUP_TOOLCHAIN", &toolchain); @@ -168,7 +172,7 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result for entry in entries { post_process( &build_dir - .join("target/riscv32emac-unknown-none-polkavm/release") + .join("target/riscv64emac-unknown-none-polkavm/release") .join(entry.name()), &out_dir.join(entry.out_filename()), )?; @@ -177,11 +181,61 @@ fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result Ok(()) } +/// Create a directory in the `target` as output directory +fn create_out_dir() -> Result { + let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); + + // this is set in case the user has overriden the target directory + let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { + path.into() + } else { + // otherwise just traverse up from the out dir + let mut out_dir: PathBuf = temp_dir.clone(); + loop { + if !out_dir.pop() { + bail!("Cannot find project root.") + } + if out_dir.join("Cargo.lock").exists() { + break; + } + } + out_dir.join("target") + } + .join("pallet-revive-fixtures"); + + // clean up some leftover symlink from previous versions of this script + if out_dir.exists() && !out_dir.is_dir() { + fs::remove_file(&out_dir)?; + } + fs::create_dir_all(&out_dir).context("Failed to create output directory")?; + + // write the location of the out dir so it can be found later + let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) + .context("Failed to create fixture_location.rs")?; + write!( + file, + r#" + #[allow(dead_code)] + const FIXTURE_DIR: &str = "{0}"; + macro_rules! fixture {{ + ($name: literal) => {{ + include_bytes!(concat!("{0}", "/", $name, ".polkavm")) + }}; + }} + "#, + out_dir.display() + ) + .context("Failed to write to fixture_location.rs")?; + + Ok(out_dir) +} + pub fn main() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let out_dir: PathBuf = env::var("OUT_DIR")?.into(); - let target = fixtures_dir.join("riscv32emac-unknown-none-polkavm.json"); + let out_dir = create_out_dir().context("Cannot determine output directory")?; + let build_dir = out_dir.join("build"); + fs::create_dir_all(&build_dir).context("Failed to create build directory")?; println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); @@ -199,25 +253,9 @@ pub fn main() -> Result<()> { return Ok(()) } - let tmp_dir = tempfile::tempdir()?; - let tmp_dir_path = tmp_dir.path(); - - create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; - invoke_build(&target, tmp_dir_path)?; - - write_output(tmp_dir_path, &out_dir, entries)?; - - #[cfg(unix)] - if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { - let symlink_dir: PathBuf = symlink_dir.into(); - let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); - if symlink_dir.is_symlink() { - fs::remove_file(&symlink_dir) - .with_context(|| format!("Failed to remove_file {symlink_dir:?}"))?; - } - std::os::unix::fs::symlink(&out_dir, &symlink_dir) - .with_context(|| format!("Failed to symlink {out_dir:?} -> {symlink_dir:?}"))?; - } + create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; + invoke_build(&build_dir)?; + write_output(&build_dir, &out_dir, entries)?; Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml similarity index 80% rename from substrate/frame/revive/fixtures/build/Cargo.toml rename to substrate/frame/revive/fixtures/build/_Cargo.toml index 5d0e256e2e73..beaabd83403e 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/_Cargo.toml @@ -4,6 +4,9 @@ publish = false version = "1.0.0" edition = "2021" +# Make sure this is not included into the workspace +[workspace] + # Binary targets are injected dynamically by the build script. [[bin]] @@ -11,7 +14,7 @@ edition = "2021" [dependencies] uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.14.0" } +polkavm-derive = { version = "0.17.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml new file mode 100644 index 000000000000..4c757c708d58 --- /dev/null +++ b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-11-19" +components = ["rust-src"] +profile = "minimal" diff --git a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json b/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json deleted file mode 100644 index bbd54cdefbac..000000000000 --- a/substrate/frame/revive/fixtures/riscv32emac-unknown-none-polkavm.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "arch": "riscv32", - "cpu": "generic-rv32", - "crt-objects-fallback": "false", - "data-layout": "e-m:e-p:32:32-i64:64-n32-S32", - "eh-frame-header": false, - "emit-debug-gdb-scripts": false, - "features": "+e,+m,+a,+c,+lui-addi-fusion,+fast-unaligned-access,+xtheadcondmov", - "linker": "rust-lld", - "linker-flavor": "ld.lld", - "llvm-abiname": "ilp32e", - "llvm-target": "riscv32", - "max-atomic-width": 32, - "panic-strategy": "abort", - "relocation-model": "pie", - "target-pointer-width": "32", - "singlethread": true, - "pre-link-args": { - "ld": [ - "--emit-relocs", - "--unique", - "--relocatable" - ] - }, - "env": "polkavm" -} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index cc84daec9b59..24f6ee547dc7 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,10 +19,13 @@ extern crate alloc; +// generated file that tells us where to find the fixtures +include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); + /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; @@ -36,12 +39,6 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; - - macro_rules! fixture { - ($name: literal) => { - include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) - }; - } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -61,7 +58,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 7232c6342824..6814add128d9 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -79,6 +79,7 @@ use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, F /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. +/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -127,6 +128,7 @@ struct HostFn { enum HostFnReturn { Unit, U32, + U64, ReturnCode, } @@ -134,8 +136,7 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - Self::U32 => quote! { |ret_val| Some(ret_val) }, - Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, + _ => quote! { |ret_val| Some(ret_val.into()) }, } } @@ -143,6 +144,7 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, + Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -243,7 +245,8 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result"#; + - Result, + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -305,6 +308,7 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), + "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; @@ -339,50 +343,61 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: u32 = 6; - let mut registers_used = 0; - let mut bindings = vec![]; - let mut idx = 0; - for (name, ty) in param_names.clone().zip(param_types.clone()) { + const ALLOWED_REGISTERS: usize = 6; + + // all of them take one register but we truncate them before passing into the function + // it is important to not allow any type which has illegal bit patterns like 'bool' + if !param_types.clone().all(|ty| { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = if ident == "i8" || - ident == "i16" || - ident == "i32" || - ident == "u8" || - ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; - registers_used += size; - if registers_used > ALLOWED_REGISTERS { - return quote! { - let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; - } - } - let this_reg = quote::format_ident!("__a{}__", idx); - let next_reg = quote::format_ident!("__a{}__", idx + 1); - let binding = if size == 1 { + matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") + }) { + panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); + } + + // too many arguments: pass as pointer to a struct in memory + if param_names.clone().count() > ALLOWED_REGISTERS { + let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { quote! { - let #name = #this_reg as #ty; + #name: #ty, } - } else { - quote! { - let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); + }); + return quote! { + #[derive(Default)] + #[repr(C)] + struct Args { + #(#fields)* } - }; - bindings.push(binding); - idx += size; + let Args { #(#param_names,)* } = { + let len = ::core::mem::size_of::(); + let mut args = Args::default(); + let ptr = &mut args as *mut Args as *mut u8; + // Safety + // 1. The struct is initialized at all times. + // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. + // 3. The reference doesn't outlive the args field. + // 4. There is only the single reference to the args field. + // 5. The length of the generated slice is the same as the struct. + let reference = unsafe { + ::core::slice::from_raw_parts_mut(ptr, len) + }; + memory.read_into_buf(__a0__ as _, reference)?; + args + }; + } } + + // otherwise: one argument per register + let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { + let reg = quote::format_ident!("__a{}__", idx); + quote! { + let #name = #reg as #ty; + } + }); quote! { #( #bindings )* } @@ -409,7 +424,7 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { memory: &mut M, __syscall_symbol__: &[u8], __available_api_version__: ApiVersion, - ) -> Result, TrapReason> + ) -> Result, TrapReason> { #impls } diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 7734c8c57209..920318b26f71 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -218,6 +218,8 @@ async fn deploy_and_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn revert_call() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -240,6 +242,8 @@ async fn revert_call() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn event_logs() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); @@ -279,6 +283,8 @@ async fn invalid_transaction() -> anyhow::Result<()> { Ok(()) } +/// TODO: enable ( https://github.com/paritytech/contract-issues/issues/12 ) +#[ignore] #[tokio::test] async fn native_evm_ratio_works() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index ccea12945054..5b3e886a5628 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::{Decode, MaxEncodedLen}; +use codec::Decode; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,16 +304,6 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// This function is secure and recommended for all input types of fixed size - /// as long as the cost of reading the memory is included in the overall already charged - /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. - pub fn read_as(&mut self) -> Result { - self.memory.read_as(self.input_ptr) - } - /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 64e66382b9ab..5ce96f59c14d 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -129,23 +129,36 @@ pub mod code { Error::::CodeRejected })?; + if !program.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + // This scans the whole program but we only do it once on code deployment. // It is safe to do unchecked math in u32 because the size of the program // was already checked above. - use polkavm::program::ISA32_V1_NoSbrk as ISA; + use polkavm::program::ISA64_V1 as ISA; let mut num_instructions: u32 = 0; let mut max_basic_block_size: u32 = 0; let mut basic_block_size: u32 = 0; for inst in program.instructions(ISA) { + use polkavm::program::Instruction; num_instructions += 1; basic_block_size += 1; if inst.kind.opcode().starts_new_basic_block() { max_basic_block_size = max_basic_block_size.max(basic_block_size); basic_block_size = 0; } - if matches!(inst.kind, polkavm::program::Instruction::invalid) { - log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); - return Err(>::InvalidInstruction.into()) + match inst.kind { + Instruction::invalid => { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + Instruction::sbrk(_, _) => { + log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + _ => (), } } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index f10c4f5fddf8..d87ec7112286 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -293,8 +293,15 @@ impl WasmBlob { ) -> Result, ExecError> { let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - let engine = - polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); + config.set_cache_enabled(false); + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + config.set_backend(Some(polkavm::BackendKind::Compiler)); + } + let engine = polkavm::Engine::new(&config).expect( + "on-chain (no_std) use of interpreter is hard coded. + interpreter is available on all plattforms; qed", + ); let mut module_config = polkavm::ModuleConfig::new(); module_config.set_page_size(limits::PAGE_SIZE); @@ -306,6 +313,15 @@ impl WasmBlob { Error::::CodeRejected })?; + // This is checked at deploy time but we also want to reject pre-existing + // 32bit programs. + // TODO: Remove when we reset the test net. + // https://github.com/paritytech/contract-issues/issues/11 + if !module.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 3e2c83db1ebd..7ea518081e23 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -27,7 +27,7 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode}; use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, @@ -126,34 +126,13 @@ pub trait Memory { /// /// # Note /// - /// There must be an extra benchmark for determining the influence of `len` with - /// regard to the overall weight. + /// Make sure to charge a proportional amount of weight if `len` is not fixed. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } - - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// # Only use on fixed size types - /// - /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise - /// this implementation will out of bound access the buffer declared by the guest. Some examples - /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded - /// integer. - /// - /// # Note - /// - /// The weight of reading a fixed value is included in the overall weight of any - /// contract callable function. - fn read_as(&self, ptr: u32) -> Result { - let buf = self.read(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) - .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - Ok(decoded) - } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -164,8 +143,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); - fn write_output(&mut self, output: u32); + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); + fn write_output(&mut self, output: u64); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -214,7 +193,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -225,7 +204,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u32) { + fn write_output(&mut self, output: u64) { self.set_reg(polkavm::Reg::A0, output); } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 0c7461a35d69..b55391dd5d6c 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -20,11 +20,11 @@ codec = { features = [ "max-encoded-len", ], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.14.0" } +[target.'cfg(target_arch = "riscv64")'.dependencies] +polkavm-derive = { version = "0.17.0" } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +default-target = ["riscv64imac-unknown-none-elf"] [features] default = ["scale"] diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 6b3a8b07f040..d3fd4ac8d03e 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -14,8 +14,8 @@ use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; use paste::paste; -#[cfg(target_arch = "riscv32")] -mod riscv32; +#[cfg(target_arch = "riscv64")] +mod riscv64; macro_rules! hash_fn { ( $name:ident, $bytes:literal ) => { diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs similarity index 93% rename from substrate/frame/revive/uapi/src/host/riscv32.rs rename to substrate/frame/revive/uapi/src/host/riscv64.rs index e8b27057ed18..3cba14db6a04 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -26,10 +26,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u32,); + type Regs = (u64,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0) + ReturnCode(a0 as _) } } @@ -207,33 +207,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { - code_hash: *const u8, + code_hash: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - address: *const u8, - output: *mut u8, - output_len: *mut u32, - salt: *const u8, + address: u32, + output: u32, + output_len: u32, + salt: u32, } let args = Args { - code_hash: code_hash.as_ptr(), + code_hash: code_hash.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - address, - output: output_ptr, - output_len: &mut output_len as *mut _, - salt: salt_ptr, + address: address as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, + salt: salt_ptr as _, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -257,31 +257,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - callee: *const u8, + callee: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr(), + callee: callee.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -308,29 +308,29 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - address: *const u8, + address: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - input: *const u8, + deposit_limit: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - address: address.as_ptr(), + address: address.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index e660ce36ef75..91c2543bb719 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -65,6 +65,12 @@ impl From for u32 { } } +impl From for u64 { + fn from(error: ReturnErrorCode) -> Self { + u32::from(error).into() + } +} + define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. From 1e89a311471eba937a9552d7d1f55af1661feb08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 29 Nov 2024 14:09:49 +0100 Subject: [PATCH 36/41] Fix runtime api impl detection by construct runtime (#6665) Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 --------- Co-authored-by: GitHub Action --- Cargo.lock | 1 + prdoc/pr_6665.prdoc | 15 ++++++ .../src/construct_runtime/expand/metadata.rs | 2 + .../procedural/src/construct_runtime/mod.rs | 3 +- .../support/test/tests/runtime_metadata.rs | 49 ++++++++++--------- .../api/proc-macro/src/runtime_metadata.rs | 6 +-- substrate/primitives/api/test/Cargo.toml | 3 +- .../api/test/tests/decl_and_impl.rs | 2 + substrate/primitives/metadata-ir/src/lib.rs | 10 ++++ 9 files changed, 62 insertions(+), 29 deletions(-) create mode 100644 prdoc/pr_6665.prdoc diff --git a/Cargo.lock b/Cargo.lock index e1abeea49283..5e4e9c267b08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25548,6 +25548,7 @@ dependencies = [ "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc new file mode 100644 index 000000000000..b5aaf8a3b184 --- /dev/null +++ b/prdoc/pr_6665.prdoc @@ -0,0 +1,15 @@ +title: Fix runtime api impl detection by construct runtime +doc: +- audience: Runtime Dev + description: |- + Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. + + + Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 +crates: +- name: frame-support-procedural + bump: patch +- name: sp-api-proc-macro + bump: patch +- name: sp-metadata-ir + bump: patch diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 4590a3a7f490..0b3bd5168865 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -113,6 +113,8 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 17042c248780..087faf37252d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,7 +466,6 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. - #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -477,6 +476,8 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #outer_event #outer_error diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 7523a415d458..a098643abb91 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,34 +80,39 @@ sp_api::decl_runtime_apis! { } } -sp_api::impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } +// Module to emulate having the implementation in a different file. +mod apis { + use super::{Block, BlockT, Runtime}; - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + sp_api::impl_runtime_apis! { + impl crate::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn same_name() {} + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn wild_card(_: u32) {} - } + fn same_name() {} - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() + fn wild_card(_: u32) {} } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } } } } diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 6be396339259..1706f8ca6fbb 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -298,18 +298,14 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 1d21f23eb804..27f6dafa24bf 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -21,6 +21,7 @@ sp-version = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } codec = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } @@ -40,5 +41,5 @@ name = "bench" harness = false [features] -"enable-staging-api" = [] +enable-staging-api = [] disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 890cf6eccdbc..2e5a078cb382 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -309,6 +309,8 @@ fn mock_runtime_api_works_with_advanced() { #[test] fn runtime_api_metadata_matches_version_implemented() { + use sp_metadata_ir::InternalImplRuntimeApis; + let rt = Runtime {}; let runtime_metadata = rt.runtime_metadata(); diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index bf234432a1a6..dc01f7eaadb3 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -87,6 +87,16 @@ pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// INTERNAL USE ONLY +/// +/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to +/// fetch the runtime api metadata without exploding when there is no runtime api implementation +/// available. +#[doc(hidden)] +pub trait InternalImplRuntimeApis { + fn runtime_metadata(&self) -> alloc::vec::Vec; +} + #[cfg(test)] mod test { use super::*; From 4e7c968ae97c66812df989117ad251cba3864632 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:49:45 +0200 Subject: [PATCH 37/41] archive: Refactor `archive_storage` method into subscription (#6483) This PR adapts the `archive_storage` implementation from a method to a subscription. This keeps the archive APIs uniform and consistent. Builds on: https://github.com/paritytech/polkadot-sdk/pull/5997 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../client/rpc-spec-v2/src/archive/api.rs | 13 +- .../client/rpc-spec-v2/src/archive/archive.rs | 202 +++---- .../src/archive/archive_storage.rs | 105 +--- .../client/rpc-spec-v2/src/archive/mod.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 500 +++++++----------- .../rpc-spec-v2/src/chain_head/event.rs | 3 +- .../client/rpc-spec-v2/src/common/events.rs | 59 ++- .../client/rpc-spec-v2/src/common/storage.rs | 151 ++++-- substrate/client/service/src/builder.rs | 2 - 9 files changed, 458 insertions(+), 579 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index dcfeaecb147b..a205d0502c93 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -20,8 +20,7 @@ use crate::{ common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, }, MethodResult, }; @@ -100,13 +99,17 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); /// Returns the storage difference between two blocks. /// diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 55054d91d85d..62e44a016241 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -20,13 +20,13 @@ use crate::{ archive::{ - archive_storage::{ArchiveStorage, ArchiveStorageDiff}, - error::Error as ArchiveError, - ArchiveApiServer, + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, }, - common::events::{ - ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, - PaginatedStorageQuery, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, }, hex_string, MethodResult, SubscriptionTaskExecutor, }; @@ -57,42 +57,12 @@ use tokio::sync::mpsc; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} - -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. -/// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; - -/// The maximum number of queried items allowed for the `archive_storage` at a time. -/// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - /// The buffer capacity for each storage query. /// /// This is small because the underlying JSON-RPC server has /// its down buffer capacity per connection as well. const STORAGE_QUERY_BUF: usize = 16; -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} - /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { /// Substrate client. @@ -103,11 +73,6 @@ pub struct Archive, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -119,18 +84,9 @@ impl, Block: BlockT, Client> Archive { backend: Arc, genesis_hash: GenesisHash, executor: SubscriptionTaskExecutor, - config: ArchiveConfig, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - executor, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -260,47 +216,53 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) - } + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) }) - }) - .collect::, ArchiveError>>()?; + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); - Ok(storage_client.handle_query(hash, items, child_trie)) + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } fn archive_unstable_storage_diff( @@ -337,24 +299,74 @@ where // - process_events might encounter an error (if the client disconnected) // - storage_fut might encounter an error while processing a trie queries and // the error is propagated via the sink. - let _ = futures::future::join(storage_fut, process_events(&mut rx, &mut sink)).await; + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; }; self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); } } -/// Sends all the events to the sink. -async fn process_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { - while let Some(event) = rx.recv().await { - if event.is_done() { - log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); - } else if event.is_err() { - log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); + } + + if sink.send(&event).await.is_err() { + return + } + } } + } +} + +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; + + match event { + Ok(None) => continue, + + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, - if sink.send(&event).await.is_err() { - return + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } } } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 5a3920882f00..390db765a48f 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -33,114 +33,13 @@ use crate::{ common::{ events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageResult, - PaginatedStorageQuery, StorageQueryType, StorageResult, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, }, - storage::{IterQueryType, QueryIter, Storage}, + storage::Storage, }, }; use tokio::sync::mpsc; -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. - client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, -} - -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } - } -} - -impl ArchiveStorage -where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: StorageProvider + 'static, -{ - /// Generate the response of the `archive_storage` method. - pub fn handle_query( - &self, - hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); - - let mut storage_results = Vec::with_capacity(items.len()); - for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } - }, - }; - } - - ArchiveStorageResult::ok(storage_results, discarded_items) - } -} - /// Parse hex-encoded string parameter as raw bytes. /// /// If the parsing fails, returns an error propagated to the RPC method. diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab..14fa104c113a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 994c5d28bd61..cddaafde6659 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -19,16 +19,13 @@ use crate::{ common::events::{ ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, - ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageMethodOk, - ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, StorageResultType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; @@ -55,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -65,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -83,7 +75,6 @@ fn setup_api( backend, CHAIN_GENESIS, Arc::new(TokioTestExecutor::default()), - ArchiveConfig { max_descendant_responses, max_queried_items }, ) .into_rpc(); @@ -101,7 +92,7 @@ async fn get_next_event(sub: &mut RpcSubscriptio #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -110,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -144,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -178,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -190,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -296,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -355,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -369,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -420,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -457,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -513,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -604,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -625,237 +662,94 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; - - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; - - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( + // Valid call with storage at the key. + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; -} + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); -#[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Import a new block with storage changes. - let mut builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(client.chain_info().genesis_hash) - .with_parent_block_number(0) - .build() - .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); - let block = builder.build().unwrap().block; - let block_hash = format!("{:?}", block.header.hash()); - client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); } #[tokio::test] async fn archive_storage_diff_main_trie() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -965,7 +859,7 @@ async fn archive_storage_diff_main_trie() { #[tokio::test] async fn archive_storage_diff_no_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Build 2 identical blocks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1012,7 +906,7 @@ async fn archive_storage_diff_no_changes() { #[tokio::test] async fn archive_storage_diff_deleted_changes() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) @@ -1079,7 +973,7 @@ async fn archive_storage_diff_deleted_changes() { #[tokio::test] async fn archive_storage_diff_invalid_params() { let invalid_hash = hex_string(&INVALID_HASH); - let (_, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_, api) = setup_api(); // Invalid shape for parameters. let items: Vec> = Vec::new(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd9863060910..de74145a3f08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index 198a60bf4cac..44f722c0c61b 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,6 +78,10 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -354,8 +376,11 @@ mod tests { #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -365,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -379,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index 673e20b2bc78..a1e34d51530e 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -24,7 +24,7 @@ use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -70,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -97,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -120,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -149,6 +148,7 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) @@ -199,56 +199,6 @@ where } } - /// Iterate over at most the provided number of keys. - /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( - &self, - query: QueryIter, - hash: Block::Hash, - child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { - let QueryIter { ty, query_key, pagination_start_key } = query; - - let mut keys_iter = if let Some(child_key) = child_key { - self.client.child_storage_keys( - hash, - child_key.to_owned(), - Some(&query_key), - pagination_start_key.as_ref(), - ) - } else { - self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; - - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; - - next_pagination_key = Some(key.clone()); - - let result = match ty { - IterQueryType::Value => self.query_value(hash, &key, child_key), - IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; - - if let Some(value) = result { - ret.push(value); - } - } - - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) - } - /// Raw iterator over the keys. pub fn raw_keys_iter( &self, @@ -264,3 +214,96 @@ where keys_iter.map_err(|err| err.to_string()) } } + +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} + +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } + } + }) + .await?; + + Ok(()) + } +} diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 027a444012af..a47a05c0a190 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -756,8 +756,6 @@ where backend.clone(), genesis_hash, task_executor.clone(), - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; From 1d519a1054d2edb8fc0b868eba6318fb3d448b33 Mon Sep 17 00:00:00 2001 From: Pavlo Khrystenko <45178695+pkhry@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:24:58 +0100 Subject: [PATCH 38/41] Update scale-info to 2.11.6 (#6681) # Description Updates scale-info to from 2.11.5 2.11.6, so that generated code is annotated with `allow(deprecated)` Pre-requisite for https://github.com/paritytech/polkadot-sdk/pull/6312 --- Cargo.lock | 8 +- Cargo.toml | 2 +- prdoc/pr_6681.prdoc | 406 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 411 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_6681.prdoc diff --git a/Cargo.lock b/Cargo.lock index 5e4e9c267b08..1fe2d766f16a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23715,9 +23715,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", @@ -23729,9 +23729,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.5" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", diff --git a/Cargo.toml b/Cargo.toml index 964964908a9b..ecc385504181 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1197,7 +1197,7 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.1", default-features = false } +scale-info = { version = "2.11.6", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc new file mode 100644 index 000000000000..93a967d4a66c --- /dev/null +++ b/prdoc/pr_6681.prdoc @@ -0,0 +1,406 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: update scale-info to 2.11.6 + +doc: + - audience: Runtime Dev + description: | + Updates scale-info to 2.11.1 from 2.11.5. + Updated version of scale-info annotates generated code with `allow(deprecated)` + +crates: + - name: bridge-runtime-common + bump: none + - name: bp-header-chain + bump: none + - name: bp-runtime + bump: none + - name: frame-support + bump: none + - name: sp-core + bump: none + - name: sp-trie + bump: none + - name: sp-runtime + bump: none + - name: sp-application-crypto + bump: none + - name: sp-arithmetic + bump: none + - name: sp-weights + bump: none + - name: sp-api + bump: none + - name: sp-metadata-ir + bump: none + - name: sp-version + bump: none + - name: sp-inherents + bump: none + - name: frame-executive + bump: none + - name: frame-system + bump: none + - name: pallet-balances + bump: none + - name: frame-benchmarking + bump: none + - name: pallet-migrations + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-slots + bump: none + - name: sp-staking + bump: none + - name: staging-xcm + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: pallet-message-queue + bump: none + - name: polkadot-runtime-common + bump: none + - name: frame-election-provider-support + bump: none + - name: sp-npos-elections + bump: none + - name: sp-consensus-grandpa + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-grandpa + bump: none + - name: sp-genesis-builder + bump: none + - name: sp-consensus-babe + bump: none + - name: sp-mixnet + bump: none + - name: sc-rpc-api + bump: none + - name: sp-session + bump: none + - name: sp-statement-store + bump: none + - name: sp-transaction-storage-proof + bump: none + - name: pallet-asset-rate + bump: none + - name: pallet-authorship + bump: none + - name: pallet-babe + bump: none + - name: pallet-session + bump: none + - name: pallet-timestamp + bump: none + - name: pallet-offences + bump: none + - name: pallet-staking + bump: none + - name: pallet-bags-list + bump: none + - name: pallet-broker + bump: none + - name: pallet-election-provider-multi-phase + bump: none + - name: pallet-fast-unstake + bump: none + - name: pallet-identity + bump: none + - name: pallet-transaction-payment + bump: none + - name: pallet-treasury + bump: none + - name: pallet-utility + bump: none + - name: pallet-collective + bump: none + - name: pallet-root-testing + bump: none + - name: pallet-vesting + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: pallet-authority-discovery + bump: none + - name: pallet-mmr + bump: none + - name: sp-mmr-primitives + bump: none + - name: staging-xcm-executor + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-asset-conversion + bump: none + - name: pallet-assets + bump: none + - name: pallet-salary + bump: none + - name: pallet-ranked-collective + bump: none + - name: pallet-xcm + bump: none + - name: xcm-runtime-apis + bump: none + - name: pallet-grandpa + bump: none + - name: pallet-indices + bump: none + - name: pallet-sudo + bump: none + - name: sp-consensus-beefy + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: pallet-aura + bump: none + - name: sp-consensus-aura + bump: none + - name: pallet-collator-selection + bump: none + - name: pallet-glutton + bump: none + - name: staging-parachain-info + bump: none + - name: westend-runtime + bump: none + - name: frame-metadata-hash-extension + bump: none + - name: frame-system-benchmarking + bump: none + - name: pallet-beefy + bump: none + - name: pallet-beefy-mmr + bump: none + - name: pallet-conviction-voting + bump: none + - name: pallet-scheduler + bump: none + - name: pallet-preimage + bump: none + - name: pallet-delegated-staking + bump: none + - name: pallet-nomination-pools + bump: none + - name: pallet-democracy + bump: none + - name: pallet-elections-phragmen + bump: none + - name: pallet-membership + bump: none + - name: pallet-multisig + bump: none + - name: polkadot-sdk-frame + bump: none + - name: pallet-dev-mode + bump: none + - name: pallet-verify-signature + bump: none + - name: pallet-nomination-pools-benchmarking + bump: none + - name: pallet-offences-benchmarking + bump: none + - name: pallet-im-online + bump: none + - name: pallet-parameters + bump: none + - name: pallet-proxy + bump: none + - name: pallet-recovery + bump: none + - name: pallet-referenda + bump: none + - name: pallet-society + bump: none + - name: pallet-state-trie-migration + bump: none + - name: pallet-whitelist + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: rococo-runtime + bump: none + - name: pallet-bounties + bump: none + - name: pallet-child-bounties + bump: none + - name: pallet-nis + bump: none + - name: pallet-tips + bump: none + - name: parachains-common + bump: none + - name: pallet-asset-tx-payment + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: bp-xcm-bridge-hub-router + bump: none + - name: pallet-xcm-bridge-hub-router + bump: none + - name: assets-common + bump: none + - name: bp-messages + bump: none + - name: bp-parachains + bump: none + - name: bp-polkadot-core + bump: none + - name: bp-relayers + bump: none + - name: bp-xcm-bridge-hub + bump: none + - name: bridge-hub-common + bump: none + - name: snowbridge-core + bump: none + - name: snowbridge-beacon-primitives + bump: none + - name: snowbridge-ethereum + bump: none + - name: pallet-bridge-grandpa + bump: none + - name: pallet-bridge-messages + bump: none + - name: pallet-bridge-parachains + bump: none + - name: pallet-bridge-relayers + bump: none + - name: pallet-xcm-bridge-hub + bump: none + - name: cumulus-pallet-dmp-queue + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: frame-benchmarking-pallet-pov + bump: none + - name: pallet-alliance + bump: none + - name: pallet-asset-conversion-ops + bump: none + - name: pallet-asset-conversion-tx-payment + bump: none + - name: pallet-assets-freezer + bump: none + - name: pallet-atomic-swap + bump: none + - name: pallet-collective-content + bump: none + - name: pallet-contracts + bump: none + - name: pallet-contracts-uapi + bump: none + - name: pallet-insecure-randomness-collective-flip + bump: none + - name: pallet-contracts-mock-network + bump: none + - name: xcm-simulator + bump: none + - name: pallet-core-fellowship + bump: none + - name: pallet-lottery + bump: none + - name: pallet-mixnet + bump: none + - name: pallet-nft-fractionalization + bump: none + - name: pallet-nfts + bump: none + - name: pallet-node-authorization + bump: none + - name: pallet-paged-list + bump: none + - name: pallet-remark + bump: none + - name: pallet-revive + bump: none + - name: pallet-revive-uapi + bump: none + - name: pallet-revive-eth-rpc + bump: none + - name: pallet-skip-feeless-payment + bump: none + - name: pallet-revive-mock-network + bump: none + - name: pallet-root-offences + bump: none + - name: pallet-safe-mode + bump: none + - name: pallet-scored-pool + bump: none + - name: pallet-statement + bump: none + - name: pallet-transaction-storage + bump: none + - name: pallet-tx-pause + bump: none + - name: pallet-uniques + bump: none + - name: snowbridge-outbound-queue-merkle-tree + bump: none + - name: snowbridge-pallet-ethereum-client + bump: none + - name: snowbridge-pallet-inbound-queue + bump: none + - name: snowbridge-router-primitives + bump: none + - name: snowbridge-pallet-outbound-queue + bump: none + - name: snowbridge-pallet-system + bump: none + - name: bp-asset-hub-rococo + bump: none + - name: bp-asset-hub-westend + bump: none + - name: bp-polkadot-bulletin + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: penpal-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: xcm-simulator-example + bump: none \ No newline at end of file From 5ad8780b653350050c6a854205de20c439aa7b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexandre=20R=2E=20Bald=C3=A9?= Date: Fri, 29 Nov 2024 19:35:06 +0000 Subject: [PATCH 39/41] People chain integration tests (#6377) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Made as a follow-up of https://github.com/polkadot-fellows/runtimes/pull/499 ## Integration N/A ## Review Notes N/A --------- Co-authored-by: Dónal Murray --- Cargo.lock | 1 + .../tests/people/people-westend/Cargo.toml | 1 + .../people-westend/src/tests/governance.rs | 503 ++++++++++++++++++ .../people/people-westend/src/tests/mod.rs | 1 + 4 files changed, 506 insertions(+) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs diff --git a/Cargo.lock b/Cargo.lock index 1fe2d766f16a..a945d148e051 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16662,6 +16662,7 @@ dependencies = [ "sp-runtime 31.0.1", "staging-xcm 7.0.0", "staging-xcm-executor 7.0.0", + "westend-runtime", "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index aa6eebc5458f..53acd038cdf5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } westend-runtime-constants = { workspace = true, default-features = true } +westend-runtime = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs new file mode 100644 index 000000000000..3dadcdd94870 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs @@ -0,0 +1,503 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::ProcessMessageError; + +use codec::Encode; +use frame_support::sp_runtime::traits::Dispatchable; +use parachains_common::AccountId; +use people_westend_runtime::people::IdentityInfo; +use westend_runtime::governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin; +use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; + +use pallet_identity::Data; + +use emulated_integration_tests_common::accounts::{ALICE, BOB}; + +#[test] +fn relay_commands_add_registrar() { + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_registrar_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + let mut signed_origin = true; + + for (origin_kind, origin) in origins { + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_registrar_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if signed_origin { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, + ] + ); + } else { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + } + }); + + signed_origin = false; + } +} + +#[test] +fn relay_commands_kill_identity() { + // To kill an identity, first one must be set + PeopleWestend::execute_with(|| { + type PeopleRuntime = ::Runtime; + type PeopleRuntimeEvent = ::RuntimeEvent; + + let people_westend_alice = + ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); + + let identity_info = IdentityInfo { + email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), + ..Default::default() + }; + let identity: Box<::IdentityInformation> = + Box::new(identity_info); + + assert_ok!(::Identity::set_identity( + people_westend_alice, + identity + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, + ] + ); + }); + + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_kill_identity_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(BOB); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: kill_identity_call.encode().into() } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} + +#[test] +fn relay_commands_add_remove_username_authority() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + let people_westend_bob = PeopleWestend::account_id_of(BOB); + + let (origin_kind, origin, usr) = + (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); + + // First, add a username authority. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = + PeopleCall::Identity(pallet_identity::Call::::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); + + // Now, use the previously added username authority to concede a username to an account. + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::set_username_for( + ::RuntimeOrigin::signed(people_westend_alice.clone()), + people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), + full_username, + None, + true + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, + ] + ); + }); + + // Accept the given username + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::accept_username( + ::RuntimeOrigin::signed(people_westend_bob.clone()), + full_username.try_into().unwrap(), + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, + ] + ); + }); + + // Now, remove the username authority with another priviledged XCM call. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: remove_username_authority.encode().into() } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Final event check. + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_remove_username_authority_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice.clone()), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { origin_kind, call: add_username_authority.encode().into() } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = + RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: remove_username_authority.encode().into(), + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 08749b295dc2..b9ad9e3db467 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,4 +14,5 @@ // limitations under the License. mod claim_assets; +mod governance; mod teleport; From 8eac4e887c827ea0bac8915901c305a05457a8d9 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 29 Nov 2024 23:28:34 +0200 Subject: [PATCH 40/41] network/libp2p-backend: Suppress warning adding already reserved node as reserved (#6703) Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. --------- Co-authored-by: GitHub Action --- prdoc/pr_6703.prdoc | 7 +++++++ substrate/client/network/src/protocol_controller.rs | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_6703.prdoc diff --git a/prdoc/pr_6703.prdoc b/prdoc/pr_6703.prdoc new file mode 100644 index 000000000000..2dd0962a3eea --- /dev/null +++ b/prdoc/pr_6703.prdoc @@ -0,0 +1,7 @@ +title: 'network/libp2p-backend: Suppress warning adding already reserved node as reserved' +doc: +- audience: Node Dev + description: Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. +crates: +- name: sc-network + bump: patch diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index af7adb50907f..11f5321294d0 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -464,7 +464,7 @@ impl ProtocolController { /// maintain connections with such peers. fn on_add_reserved_peer(&mut self, peer_id: PeerId) { if self.reserved_nodes.contains_key(&peer_id) { - warn!( + debug!( target: LOG_TARGET, "Trying to add an already reserved node {peer_id} as reserved on {:?}.", self.set_id, From 5e0bcb0ee9788b7bb16ccfbda4fdc153b24c6386 Mon Sep 17 00:00:00 2001 From: eskimor Date: Sat, 30 Nov 2024 00:31:27 +0100 Subject: [PATCH 41/41] Let's be a bit less strict here. (#6662) This might actually happen in non malicious cases. Co-authored-by: eskimor --- polkadot/node/network/collator-protocol/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs index ae7f9a8c1fbc..598cdcf43900 100644 --- a/polkadot/node/network/collator-protocol/src/error.rs +++ b/polkadot/node/network/collator-protocol/src/error.rs @@ -122,7 +122,7 @@ impl SecondingError { PersistedValidationDataMismatch | CandidateHashMismatch | RelayParentMismatch | - Duplicate | ParentHeadDataMismatch | + ParentHeadDataMismatch | InvalidCoreIndex(_, _) | InvalidSessionIndex(_, _) | InvalidReceiptVersion(_)