diff --git a/.config/lychee.toml b/.config/lychee.toml
index 1de9fcd559dd..b7bb6f0ce495 100644
--- a/.config/lychee.toml
+++ b/.config/lychee.toml
@@ -32,7 +32,6 @@ exclude = [
"https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp",
"https://github.com/paritytech/substrate/frame/fast-unstake",
"https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs",
- "https://polkadot-try-runtime-node.parity-chains.parity.io/",
"https://polkadot.network/the-path-of-a-parachain-block/",
"https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html",
"https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model",
@@ -41,6 +40,7 @@ exclude = [
"https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model",
"https://research.web3.foundation/en/latest/polkadot/slashing/npos.html",
"https://rpc.polkadot.io/",
+ "https://try-runtime.polkadot.io/",
"https://w3f.github.io/parachain-implementers-guide/node/approval/approval-distribution.html",
"https://w3f.github.io/parachain-implementers-guide/node/index.html",
"https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html",
diff --git a/.config/zepter.yaml b/.config/zepter.yaml
index 24441e90b1a0..7a67ba2695cf 100644
--- a/.config/zepter.yaml
+++ b/.config/zepter.yaml
@@ -27,7 +27,7 @@ workflows:
]
# The umbrella crate uses more features, so we to check those too:
check_umbrella:
- - [ $check.0, '--features=serde,experimental,runtime,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ]
+ - [ $check.0, '--features=serde,experimental,riscv,runtime,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ]
# Same as `check_*`, but with the `--fix` flag.
default:
- [ $check.0, '--fix' ]
diff --git a/.github/command-screnshot.png b/.github/command-screnshot.png
deleted file mode 100644
index 1451fabca8b9..000000000000
Binary files a/.github/command-screnshot.png and /dev/null differ
diff --git a/.github/commands-readme.md b/.github/commands-readme.md
deleted file mode 100644
index ce4e0fd0d789..000000000000
--- a/.github/commands-readme.md
+++ /dev/null
@@ -1,276 +0,0 @@
-# Running commands
-
-Command bot has been migrated, it is no longer a comment parser and now it is a GitHub action that works as a [`workflow_dispatch`](https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch) event.
-
-## How to run an action
-
-To run an action, you need to go to the [_actions tab_](https://github.com/paritytech/polkadot-sdk/actions) and pick the one you desire to run.
-
-The current available command actions are:
-
-- [Command FMT](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-fmt.yml)
-- [Command Update UI](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-update-ui.yml)
-- [Command Prdoc](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-prdoc.yml)
-- [Command Sync](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-sync.yml)
-- [Command Bench](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench.yml)
-- [Command Bench All](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-all.yml)
-- [Command Bench Overhead](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-overhead.yml)
-
-You need to select the action, and click on the dropdown that says: `Run workflow`. It is located in the upper right.
-
-If this dropdown is not visible, you may not have permission to run the action. Contact IT for help.
-
-![command screenshot](command-screnshot.png)
-
-Each command will have the same two required values, but it could have more.
-
-GitHub's official documentation: [Manually running a workflow](https://docs.github.com/en/actions/using-workflows/manually-running-a-workflow)
-
-#### Running from CLI
-
-You can use [`gh cli`](https://cli.github.com/) to run the commands too. Refers to the [`gh workflow run`](https://cli.github.com/manual/gh_workflow_run) section from the documentation for more information.
-
-### Number of the Pull Request
-
-The number of the pull request. Required so the action can fetch the correct branch and comment if it fails.
-
-## Action configurations
-
-### FMT
-
-For FMT you only need the PR number.
-
-You can use the following [`gh cli`](https://cli.github.com/) inside the repo:
-
-```bash
-gh workflow run command-fmt.yml -f pr=1000
-```
-
-### Update UI
-
-For Update UI you only need the PR number.
-
-You can use the following [`gh cli`](https://cli.github.com/) inside the repo:
-
-```bash
-gh workflow run command-update-ui.yml -f pr=1000
-```
-
-### Bench
-
-Runs `benchmark pallet` or `benchmark overhead` against your PR and commits back updated weights.
-
-Posible combinations based on the `benchmark` dropdown.
-
-- `substrate-pallet`: Pallet Benchmark for Substrate for specific pallet
- - Requires `Subcommand` to be `pallet`
- - Requires `Runtime` to be `dev`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Target Directory` to be `substrate`
-- `polkadot-pallet`: Pallet Benchmark for Polkadot for specific pallet
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `rococo`
- - `westend`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Target Directory` to be `polkadot`
-- `cumulus-assets`: Pallet Benchmark for Cumulus assets
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `asset-hub-westend`
- - `asset-hub-rococo`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `assets`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-collectives`: Pallet Benchmark for Cumulus collectives
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be `collectives-westend`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `collectives`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-coretime`: Pallet Benchmark for Cumulus coretime
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `coretime-rococo`
- - `coretime-westend`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `coretime`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-bridge-hubs`: Pallet Benchmark for Cumulus bridge-hubs
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `bridge-hub-rococo`
- - `bridge-hub-westend`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `bridge-hub`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-contracts`: Pallet Benchmark for Cumulus contracts
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one `contracts-rococo`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `contracts`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-glutton`: Pallet Benchmark for Cumulus glutton
- - Requires `Subcommand` to be `pallet`
- - Requires `Runtime` to be one of the following:
- - `glutton-westend`
- - `glutton-westend-dev-1300`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `glutton`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-starters`: Pallet Benchmark for Cumulus starters
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `seedling`
- - `shell`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `starters`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-people`: Pallet Benchmark for Cumulus people
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `people-westend`
- - `people-rococo`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `people`
- - Requires `Target Directory` to be `cumulus`
-- `cumulus-testing`: Pallet Benchmark for Cumulus testing
- - Requires `Subcommand` to be one of the following:
- - `pallet`
- - `xcm`
- - Requires `Runtime` to be one of the following:
- - `penpal`
- - `rococo-parachain`
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
- - Requires `Runtime Dir` to be `testing`
- - Requires `Target Directory` to be `cumulus`
-
-You can use the following [`gh cli`](https://cli.github.com/) inside the repo:
-
-```bash
-gh workflow run command-bench.yml -f pr=1000 -f benchmark=polkadot-pallet -f subcommand=pallet -f runtime=rococo -f pallet=pallet_name -f target_dir=polkadot
-```
-
-### Bench-all
-
-This is a wrapper to run `bench` for all pallets.
-
-Posible combinations based on the `benchmark` dropdown.
-
-- `pallet`: Benchmark for Substrate/Polkadot/Cumulus/Trappist for specific pallet
- - Requires field `Pallet` to have an input that applies to `^([a-z_]+)([:]{2}[a-z_]+)?$`
-- `substrate`: Pallet + Overhead + Machine Benchmark for Substrate for all pallets
- - Requires `Target Directory` to be `substrate`
-- `polkadot`: Pallet + Overhead Benchmark for Polkadot
- - Requires `Runtime` to be one of the following:
- - `rococo`
- - `westend`
- - Requires `Target Directory` to be `polkadot`
-- `cumulus`: Pallet Benchmark for Cumulus
- - Requires `Runtime` to be one of the following:
- - `rococo`
- - `westend`
- - `asset-hub-kusama`
- - `asset-hub-polkadot`
- - `asset-hub-rococo`
- - `asset-hub-westend`
- - `bridge-hub-kusama`
- - `bridge-hub-polkadot`
- - `bridge-hub-rococo`
- - `bridge-hub-westend`
- - `collectives-polkadot`
- - `collectives-westend`
- - `coretime-rococo`
- - `coretime-westend`
- - `contracts-rococo`
- - `glutton-kusama`
- - `glutton-westend`
- - `people-rococo`
- - `people-westend`
- - Requires `Target Directory` to be `cumulus`
-
-You can use the following [`gh cli`](https://cli.github.com/) inside the repo:
-
-```bash
-gh workflow run command-bench-all.yml -f pr=1000 -f benchmark=pallet -f pallet=pallet_name -f target_dir=polkadot -f runtime=rococo
-```
-
-### Bench-overhead
-
-Run benchmarks overhead and commit back results to PR.
-
-Posible combinations based on the `benchmark` dropdown.
-
-- `default`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files
- - Requires `Runtime` to be one of the following:
- - `rococo`
- - `westend`
- - Requires `Target directory` to be `polkadot`
-- `substrate`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files
- - Requires `Target directory` to be `substrate`
-- `cumulus`: Runs `benchmark overhead` and commits back to PR the updated `extrinsic_weights.rs` files
- - Requires `Runtime` to be one of the following:
- - `asset-hub-rococo`
- - `asset-hub-westend`
- - Requires `Target directory` to be `cumulus`
-
-You can use the following [`gh cli`](https://cli.github.com/) inside the repo:
-
-```bash
-gh workflow run command-bench-overheard.yml -f pr=1000 -f benchmark=substrate -f runtime=rococo -f target_dir=substrate
-```
-
-### PrDoc
-
-Generate a PrDoc with the crates populated by all modified crates.
-
-Options:
-- `pr`: The PR number to generate the PrDoc for.
-- `audience`: The audience of whom the changes may concern.
-- `bump`: A default bump level for all crates. The PrDoc will likely need to be edited to reflect the actual changes after generation.
-- `overwrite`: Whether to overwrite any existing PrDoc.
-
-### Sync
-
-Run sync and commit back results to PR.
-
-Posible combinations based on the `benchmark` dropdown.
-
-- `chain`
- - Requires one of the following:
- - `rococo`
- - `westend`
-- `sync-type`
- - Requires one of the following:
- - `warp`
- - `full`
- - `fast`
- - `fast-unsafe`
-
-You can use the following [`gh cli`](https://cli.github.com/) inside the repo:
-
-```bash
-gh workflow run command-sync.yml -f pr=1000 -f chain=rococo -f sync-type=full
-```
-
-## How to modify an action
-
-If you want to modify an action and test it, you can do by simply pushing your changes and then selecting your branch in the `Use worflow from` option.
-
-This will use a file from a specified branch.
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 120000
index 000000000000..7b6b3498755f
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1 @@
+../docs/contributor/PULL_REQUEST_TEMPLATE.md
\ No newline at end of file
diff --git a/.github/scripts/cmd/_help.py b/.github/scripts/cmd/_help.py
new file mode 100644
index 000000000000..8ad49dad8461
--- /dev/null
+++ b/.github/scripts/cmd/_help.py
@@ -0,0 +1,26 @@
+import argparse
+
+"""
+
+Custom help action for argparse, it prints the help message for the main parser and all subparsers.
+
+"""
+
+
+class _HelpAction(argparse._HelpAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser.print_help()
+
+ # retrieve subparsers from parser
+ subparsers_actions = [
+ action for action in parser._actions
+ if isinstance(action, argparse._SubParsersAction)]
+ # there will probably only be one subparser_action,
+ # but better save than sorry
+ for subparsers_action in subparsers_actions:
+ # get all subparsers and print help
+ for choice, subparser in subparsers_action.choices.items():
+ print("\n### Command '{}'".format(choice))
+ print(subparser.format_help())
+
+ parser.exit()
diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py
new file mode 100755
index 000000000000..63bd6a2795aa
--- /dev/null
+++ b/.github/scripts/cmd/cmd.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import json
+import argparse
+import _help
+
+_HelpAction = _help._HelpAction
+
+f = open('.github/workflows/runtimes-matrix.json', 'r')
+runtimesMatrix = json.load(f)
+
+runtimeNames = list(map(lambda x: x['name'], runtimesMatrix))
+
+common_args = {
+ '--continue-on-fail': {"action": "store_true", "help": "Won't exit(1) on failed command and continue with next steps. "},
+ '--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"},
+ '--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"},
+ '--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"},
+}
+
+parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for polkadot-sdk repo', add_help=False)
+parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help
+for arg, config in common_args.items():
+ parser.add_argument(arg, **config)
+
+subparsers = parser.add_subparsers(help='a command to run', dest='command')
+
+"""
+BENCH
+"""
+
+bench_example = '''**Examples**:
+ Runs all benchmarks
+ %(prog)s
+
+ Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
+ %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
+
+ Runs bench for all pallets for westend runtime and continues even if some benchmarks fail
+ %(prog)s --runtime westend --continue-on-fail
+
+ Does not output anything and cleans up the previous bot's & author command triggering comments in PR
+ %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
+'''
+
+parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
+
+for arg, config in common_args.items():
+ parser_bench.add_argument(arg, **config)
+
+parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
+parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
+
+"""
+FMT
+"""
+parser_fmt = subparsers.add_parser('fmt', help='Formats code (cargo +nightly-VERSION fmt) and configs (taplo format)')
+for arg, config in common_args.items():
+ parser_fmt.add_argument(arg, **config)
+
+"""
+Update UI
+"""
+parser_ui = subparsers.add_parser('update-ui', help='Updates UI tests')
+for arg, config in common_args.items():
+ parser_ui.add_argument(arg, **config)
+
+
+args, unknown = parser.parse_known_args()
+
+print(f'args: {args}')
+
+if args.command == 'bench':
+ runtime_pallets_map = {}
+ failed_benchmarks = {}
+ successful_benchmarks = {}
+
+ profile = "release"
+
+ print(f'Provided runtimes: {args.runtime}')
+ # convert to mapped dict
+ runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
+ runtimesMatrix = {x['name']: x for x in runtimesMatrix}
+ print(f'Filtered out runtimes: {runtimesMatrix}')
+
+ # loop over remaining runtimes to collect available pallets
+ for runtime in runtimesMatrix.values():
+ os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks")
+ print(f'-- listing pallets for benchmark for {runtime["name"]}')
+ wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
+ output = os.popen(
+ f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read()
+ raw_pallets = output.strip().split('\n')
+
+ all_pallets = set()
+ for pallet in raw_pallets:
+ if pallet:
+ all_pallets.add(pallet.split(',')[0].strip())
+
+ pallets = list(all_pallets)
+ print(f'Pallets in {runtime}: {pallets}')
+ runtime_pallets_map[runtime['name']] = pallets
+
+ # filter out only the specified pallets from collected runtimes/pallets
+ if args.pallet:
+ print(f'Pallet: {args.pallet}')
+ new_pallets_map = {}
+ # keep only specified pallets if they exist in the runtime
+ for runtime in runtime_pallets_map:
+ if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
+ new_pallets_map[runtime] = args.pallet
+
+ runtime_pallets_map = new_pallets_map
+
+ print(f'Filtered out runtimes & pallets: {runtime_pallets_map}')
+
+ if not runtime_pallets_map:
+ if args.pallet and not args.runtime:
+ print(f"No pallets {args.pallet} found in any runtime")
+ elif args.runtime and not args.pallet:
+ print(f"{args.runtime} runtime does not have any pallets")
+ elif args.runtime and args.pallet:
+ print(f"No pallets {args.pallet} found in {args.runtime}")
+ else:
+ print('No runtimes found')
+ sys.exit(1)
+
+ header_path = os.path.abspath('./substrate/HEADER-APACHE2')
+
+ for runtime in runtime_pallets_map:
+ for pallet in runtime_pallets_map[runtime]:
+ config = runtimesMatrix[runtime]
+ print(f'-- config: {config}')
+ if runtime == 'dev':
+ # to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
+ search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
+ print(f'-- running: {search_manifest_path}')
+ manifest_path = os.popen(search_manifest_path).read()
+ if not manifest_path:
+ print(f'-- pallet {pallet} not found in dev runtime')
+ exit(1)
+ package_dir = os.path.dirname(manifest_path)
+ print(f'-- package_dir: {package_dir}')
+ print(f'-- manifest_path: {manifest_path}')
+ output_path = os.path.join(package_dir, "src", "weights.rs")
+ else:
+ default_path = f"./{config['path']}/src/weights"
+ xcm_path = f"./{config['path']}/src/weights/xcm"
+ output_path = default_path if not pallet.startswith("pallet_xcm_benchmarks") else xcm_path
+ print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
+ cmd = f"frame-omni-bencher v1 benchmark pallet --extrinsic=* --runtime=target/{profile}/wbuild/{config['package']}/{config['package'].replace('-', '_')}.wasm --pallet={pallet} --header={header_path} --output={output_path} --wasm-execution=compiled --steps=50 --repeat=20 --heap-pages=4096 --no-storage-info --no-min-squares --no-median-slopes"
+ print(f'-- Running: {cmd}')
+ status = os.system(cmd)
+ if status != 0 and not args.continue_on_fail:
+ print(f'Failed to benchmark {pallet} in {runtime}')
+ sys.exit(1)
+
+ # Otherwise collect failed benchmarks and print them at the end
+ # push failed pallets to failed_benchmarks
+ if status != 0:
+ failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
+ else:
+ successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]
+
+ if failed_benchmarks:
+ print('❌ Failed benchmarks of runtimes/pallets:')
+ for runtime, pallets in failed_benchmarks.items():
+ print(f'-- {runtime}: {pallets}')
+
+ if successful_benchmarks:
+ print('✅ Successful benchmarks of runtimes/pallets:')
+ for runtime, pallets in successful_benchmarks.items():
+ print(f'-- {runtime}: {pallets}')
+
+elif args.command == 'fmt':
+ command = f"cargo +nightly fmt"
+ print(f'Formatting with `{command}`')
+ nightly_status = os.system(f'{command}')
+ taplo_status = os.system('taplo format --config .config/taplo.toml')
+
+ if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail:
+ print('❌ Failed to format code')
+ sys.exit(1)
+
+elif args.command == 'update-ui':
+ command = 'sh ./scripts/update-ui-tests.sh'
+ print(f'Updating ui with `{command}`')
+ status = os.system(f'{command}')
+
+ if status != 0 and not args.continue_on_fail:
+ print('❌ Failed to format code')
+ sys.exit(1)
+
+print('🚀 Done')
diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py
index b7b2e6f970fa..ba7def20fcb9 100644
--- a/.github/scripts/generate-prdoc.py
+++ b/.github/scripts/generate-prdoc.py
@@ -96,6 +96,7 @@ def create_prdoc(pr, audience, title, description, patch, bump, force):
# write the parsed PR documentation back to the file
with open(path, "w") as f:
yaml.dump(prdoc, f)
+ print(f"PrDoc for PR {pr} written to {path}")
def parse_args():
parser = argparse.ArgumentParser()
diff --git a/.github/scripts/generate-readmes.py b/.github/scripts/generate-readmes.py
new file mode 100755
index 000000000000..f838eaa29a74
--- /dev/null
+++ b/.github/scripts/generate-readmes.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+
+"""
+A script to generate READMEs for all public crates,
+if they do not already have one.
+
+It relies on functions from the `check-workspace.py` script.
+
+The resulting README is based on a template defined below,
+and includes the crate name, description, license,
+and optionally - the SDK release version.
+
+# Example
+
+```sh
+python3 -m pip install toml
+.github/scripts/generate-readmes.py . --sdk-version 1.15.0
+```
+"""
+
+import os
+import toml
+import importlib
+import argparse
+
+check_workspace = importlib.import_module("check-workspace")
+
+README_TEMPLATE = """
Emergency Bypass
diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml
index ee4bd62a558d..ee5ac31e9caa 100644
--- a/.github/workflows/checks-quick.yml
+++ b/.github/workflows/checks-quick.yml
@@ -100,6 +100,8 @@ jobs:
--exclude
"substrate/frame/contracts/fixtures/build"
"substrate/frame/contracts/fixtures/contracts/common"
+ "substrate/frame/revive/fixtures/build"
+ "substrate/frame/revive/fixtures/contracts/common"
- name: deny git deps
run: python3 .github/scripts/deny-git-deps.py .
check-markdown:
@@ -154,3 +156,28 @@ jobs:
git diff
exit 1
fi
+ check-fail-ci:
+ runs-on: ubuntu-latest
+ container:
+ # there's no "rg" in ci-unified, and tools is a smaller image anyway
+ image: "paritytech/tools:latest"
+ # paritytech/tools uses "nonroot" user by default, which doesn't have enough
+ # permissions to create GHA context
+ options: --user root
+ steps:
+ - name: Fetch latest code
+ uses: actions/checkout@v4
+ - name: Check
+ run: |
+ set +e
+ rg --line-number --hidden --type rust --glob '!{.git,target}' "$ASSERT_REGEX" .; exit_status=$?
+ if [ $exit_status -eq 0 ]; then
+ echo "$ASSERT_REGEX was found, exiting with 1";
+ exit 1;
+ else
+ echo "No $ASSERT_REGEX was found, exiting with 0";
+ exit 0;
+ fi
+ env:
+ ASSERT_REGEX: "FAIL-CI"
+ GIT_DEPTH: 1
diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml
new file mode 100644
index 000000000000..dac46cf435a6
--- /dev/null
+++ b/.github/workflows/cmd.yml
@@ -0,0 +1,411 @@
+name: Command
+
+on:
+ issue_comment: # listen for comments on issues
+ types: [ created ]
+
+permissions: # allow the action to comment on the PR
+ contents: write
+ issues: write
+ pull-requests: write
+ actions: read
+
+jobs:
+ is-org-member:
+ if: startsWith(github.event.comment.body, '/cmd')
+ runs-on: ubuntu-latest
+ outputs:
+ member: ${{ steps.is-member.outputs.result }}
+ steps:
+ - name: Generate token
+ id: generate_token
+ uses: tibdex/github-app-token@v2.1.0
+ with:
+ app_id: ${{ secrets.CMD_BOT_APP_ID }}
+ private_key: ${{ secrets.CMD_BOT_APP_KEY }}
+
+ - name: Check if user is a member of the organization
+ id: is-member
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ steps.generate_token.outputs.token }}
+ result-encoding: string
+ script: |
+ const fs = require("fs");
+ try {
+ const org = '${{ github.event.repository.owner.login }}';
+ const username = '${{ github.event.comment.user.login }}';
+
+ const membership = await github.rest.orgs.checkMembershipForUser({
+ org: org,
+ username: username
+ });
+
+ console.log(membership, membership.status, membership.status === 204);
+
+ if (membership.status === 204) {
+ return 'true';
+ } else {
+ console.log(membership);
+ fs.appendFileSync(process.env["GITHUB_STEP_SUMMARY"], `${membership.data && membership.data.message || 'Unknown error happened, please check logs'}`);
+ }
+ } catch (error) {
+ console.log(error)
+ }
+
+ return 'false';
+
+ reject-non-members:
+ needs: is-org-member
+ if: ${{ startsWith(github.event.comment.body, '/cmd') && needs.is-org-member.outputs.member != 'true' }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Add reaction to rejected comment
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.reactions.createForIssueComment({
+ comment_id: ${{ github.event.comment.id }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ content: 'confused'
+ })
+
+ - name: Comment PR (Rejected)
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: `Sorry, only members of the organization ${{ github.event.repository.owner.login }} members can run commands.`
+ })
+
+ acknowledge:
+ needs: is-org-member
+ if: ${{ startsWith(github.event.comment.body, '/cmd') && needs.is-org-member.outputs.member == 'true' }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Add reaction to triggered comment
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.reactions.createForIssueComment({
+ comment_id: ${{ github.event.comment.id }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ content: 'eyes'
+ })
+
+ clean:
+ needs: is-org-member
+ runs-on: ubuntu-latest
+ steps:
+ - name: Clean previous comments
+ if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--clean') && needs.is-org-member.outputs.member == 'true' }}
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.issues.listComments({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo
+ }).then(comments => {
+ for (let comment of comments.data) {
+ console.log(comment)
+ if (
+ ${{ github.event.comment.id }} !== comment.id &&
+ (
+ (
+ (
+ comment.body.startsWith('Command') ||
+ comment.body.startsWith('Command') ||
+ comment.body.startsWith('Sorry, only ')
+ ) && comment.user.type === 'Bot'
+ ) ||
+ (comment.body.startsWith('/cmd') && comment.user.login === context.actor)
+ )
+ ) {
+ github.rest.issues.deleteComment({
+ comment_id: comment.id,
+ owner: context.repo.owner,
+ repo: context.repo.repo
+ })
+ }
+ }
+ })
+ help:
+ needs: [ clean, is-org-member ]
+ if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--help') && needs.is-org-member.outputs.member == 'true' }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Get command
+ uses: actions-ecosystem/action-regex-match@v2
+ id: get-pr-comment
+ with:
+ text: ${{ github.event.comment.body }}
+ regex: '^(\/cmd )([-\/\s\w.=:]+)$' # see explanation in docs/contributor/commands-readme.md#examples
+
+ - name: Save output of help
+ id: help
+ env:
+ CMD: ${{ steps.get-pr-comment.outputs.group2 }} # to avoid "" around the command
+ run: |
+ echo 'help<> $GITHUB_OUTPUT
+ python3 .github/scripts/cmd/cmd.py $CMD >> $GITHUB_OUTPUT
+ echo 'EOF' >> $GITHUB_OUTPUT
+
+ - name: Comment PR (Help)
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: `Command help:
+
+ \`\`\`
+ ${{ steps.help.outputs.help }}
+ \`\`\`
+
+ `
+ })
+
+ - name: Add confused reaction on failure
+ uses: actions/github-script@v7
+ if: ${{ failure() }}
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.reactions.createForIssueComment({
+ comment_id: ${{ github.event.comment.id }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ content: 'confused'
+ })
+
+ - name: Add 👍 reaction on success
+ uses: actions/github-script@v7
+ if: ${{ !failure() }}
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.reactions.createForIssueComment({
+ comment_id: ${{ github.event.comment.id }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ content: '+1'
+ })
+
+ set-image:
+ needs: [ clean, is-org-member ]
+ if: ${{ startsWith(github.event.comment.body, '/cmd') && !contains(github.event.comment.body, '--help') && needs.is-org-member.outputs.member == 'true' }}
+ runs-on: ubuntu-latest
+ outputs:
+ IMAGE: ${{ steps.set-image.outputs.IMAGE }}
+ RUNNER: ${{ steps.set-image.outputs.RUNNER }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - id: set-image
+ run: |
+ BODY=$(echo "${{ github.event.comment.body }}" | xargs)
+ IMAGE_OVERRIDE=$(echo $BODY | grep -oe 'docker.io/paritytech/ci-unified:.*\s' | xargs)
+
+ cat .github/env >> $GITHUB_OUTPUT
+
+ if [ -n "$IMAGE_OVERRIDE" ]; then
+ echo "IMAGE=$IMAGE_OVERRIDE" >> $GITHUB_OUTPUT
+ fi
+
+ if [[ $BODY == "/cmd bench"* ]]; then
+ echo "RUNNER=arc-runners-polkadot-sdk-benchmark" >> $GITHUB_OUTPUT
+ elif [[ $BODY == "/cmd update-ui"* ]]; then
+ echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT
+ else
+ echo "RUNNER=ubuntu-latest" >> $GITHUB_OUTPUT
+ fi
+
+ cmd:
+ needs: [ set-image ]
+ env:
+ JOB_NAME: 'cmd'
+ runs-on: ${{ needs.set-image.outputs.RUNNER }}
+ container:
+ image: ${{ needs.set-image.outputs.IMAGE }}
+ steps:
+ - name: Get command
+ uses: actions-ecosystem/action-regex-match@v2
+ id: get-pr-comment
+ with:
+ text: ${{ github.event.comment.body }}
+ regex: '^(\/cmd )([-\/\s\w.=:]+)$' # see explanation in docs/contributor/commands-readme.md#examples
+
+ - name: Build workflow link
+ if: ${{ !contains(github.event.comment.body, '--quiet') }}
+ id: build-link
+ run: |
+ # Get exactly the CMD job link, filtering out the other jobs
+ jobLink=$(curl -s \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/jobs | jq '.jobs[] | select(.name | contains("${{ env.JOB_NAME }}")) | .html_url')
+
+ runLink=$(curl -s \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.run_id }} | jq '.html_url')
+
+ echo "job_url=${jobLink}"
+ echo "run_url=${runLink}"
+ echo "job_url=$jobLink" >> $GITHUB_OUTPUT
+ echo "run_url=$runLink" >> $GITHUB_OUTPUT
+
+
+ - name: Comment PR (Start)
+ if: ${{ !contains(github.event.comment.body, '--quiet') }}
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ let job_url = ${{ steps.build-link.outputs.job_url }}
+
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started 🚀 [See logs here](${job_url})`
+ })
+
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.head_ref }}
+
+ - name: Install dependencies for bench
+ if: startsWith(steps.get-pr-comment.outputs.group2, 'bench')
+ run: cargo install subweight frame-omni-bencher --locked
+
+ - name: Run cmd
+ id: cmd
+ env:
+ CMD: ${{ steps.get-pr-comment.outputs.group2 }} # to avoid "" around the command
+ run: |
+ echo "Running command: '$CMD' on '${{ needs.set-image.outputs.RUNNER }}' runner, container: '${{ needs.set-image.outputs.IMAGE }}'"
+ echo "RUST_NIGHTLY_VERSION: $RUST_NIGHTLY_VERSION"
+ # Fixes "detected dubious ownership" error in the ci
+ git config --global --add safe.directory '*'
+ git remote -v
+ python3 .github/scripts/cmd/cmd.py $CMD
+ git status
+ git diff
+
+ - name: Commit changes
+ run: |
+ if [ -n "$(git status --porcelain)" ]; then
+ git config --local user.email "action@github.com"
+ git config --local user.name "GitHub Action"
+
+ git pull origin ${{ github.head_ref }}
+ git add .
+ git restore --staged Cargo.lock # ignore changes in Cargo.lock
+ git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true
+ git push origin ${{ github.head_ref }}
+ else
+ echo "Nothing to commit";
+ fi
+
+ - name: Run Subweight
+ id: subweight
+ if: startsWith(steps.get-pr-comment.outputs.group2, 'bench')
+ shell: bash
+ run: |
+ git fetch
+ result=$(subweight compare commits \
+ --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \
+ --method asymptotic \
+ --format markdown \
+ --no-color \
+ --change added changed \
+ --ignore-errors \
+ refs/remotes/origin/master ${{ github.ref }})
+
+ # Save the multiline result to the output
+ {
+ echo "result<> $GITHUB_OUTPUT
+
+ - name: Comment PR (End)
+ if: ${{ !failure() && !contains(github.event.comment.body, '--quiet') }}
+ uses: actions/github-script@v7
+ env:
+ SUBWEIGHT: '${{ steps.subweight.outputs.result }}'
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ let runUrl = ${{ steps.build-link.outputs.run_url }}
+ let subweight = process.env.SUBWEIGHT;
+
+ let subweightCollapsed = subweight
+ ? `\n\nSubweight results: \n\n${subweight}\n\n `
+ : '';
+
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}`
+ })
+
+ - name: Comment PR (Failure)
+ if: ${{ failure() && !contains(github.event.comment.body, '--quiet') }}
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ let jobUrl = ${{ steps.build-link.outputs.job_url }}
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})`
+ })
+
+ - name: Add 😕 reaction on failure
+ uses: actions/github-script@v7
+ if: ${{ failure() }}
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.reactions.createForIssueComment({
+ comment_id: ${{ github.event.comment.id }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ content: 'confused'
+ })
+
+ - name: Add 👍 reaction on success
+ uses: actions/github-script@v7
+ if: ${{ !failure() }}
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.reactions.createForIssueComment({
+ comment_id: ${{ github.event.comment.id }},
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ content: '+1'
+ })
diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml
new file mode 100644
index 000000000000..4c63297efc18
--- /dev/null
+++ b/.github/workflows/command-backport.yml
@@ -0,0 +1,62 @@
+name: Backport into stable
+
+on:
+ # This trigger can be problematic, see: https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/
+ # In our case it is fine since we only run it on merged Pull Requests and do not execute any of the repo code itself.
+ pull_request_target:
+ types: [ closed, labeled ]
+
+permissions:
+ contents: write # so it can comment
+ pull-requests: write # so it can create pull requests
+
+jobs:
+ backport:
+ name: Backport pull request
+ runs-on: ubuntu-latest
+
+ # The 'github.event.pull_request.merged' ensures that it got into master:
+ if: >
+ ( !startsWith(github.event.pull_request.base.ref, 'stable') ) &&
+ (
+ github.event_name == 'pull_request_target' &&
+ github.event.pull_request.merged &&
+ github.event.pull_request.base.ref == 'master' &&
+ contains(github.event.pull_request.labels.*.name, 'A4-needs-backport')
+ )
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Create backport pull requests
+ uses: korthout/backport-action@v3
+ id: backport
+ with:
+ target_branches: stable2407
+ merge_commits: skip
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ pull_description: |
+ Backport #${pull_number} into `${target_branch}` (cc @${pull_author}).
+
+
+ pull_title: |
+ [${target_branch}] Backport #${pull_number}
+
+ - name: Label Backports
+ if: ${{ steps.backport.outputs.created_pull_numbers != '' }}
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const pullNumbers = '${{ steps.backport.outputs.created_pull_numbers }}'.split(' ');
+
+ for (const pullNumber of pullNumbers) {
+ await github.rest.issues.addLabels({
+ issue_number: parseInt(pullNumber),
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ['A3-backport']
+ });
+ console.log(`Added A3-backport label to PR #${pullNumber}`);
+ }
diff --git a/.github/workflows/command-bench-all.yml b/.github/workflows/command-bench-all.yml
deleted file mode 100644
index 4128f86fb7c8..000000000000
--- a/.github/workflows/command-bench-all.yml
+++ /dev/null
@@ -1,99 +0,0 @@
-name: Command Bench All
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- description: Number of the Pull Request
- required: true
- benchmark:
- description: Pallet benchmark
- type: choice
- required: true
- options:
- - pallet
- - substrate
- - polkadot
- - cumulus
- pallet:
- description: Pallet
- required: false
- type: string
- default: pallet_name
- target_dir:
- description: Target directory
- type: choice
- options:
- - substrate
- - polkadot
- - cumulus
- runtime:
- description: Runtime
- type: choice
- options:
- - rococo
- - westend
- - asset-hub-kusama
- - asset-hub-polkadot
- - asset-hub-rococo
- - asset-hub-westend
- - bridge-hub-kusama
- - bridge-hub-polkadot
- - bridge-hub-rococo
- - bridge-hub-westend
- - collectives-polkadot
- - collectives-westend
- - coretime-rococo
- - coretime-westend
- - contracts-rococo
- - glutton-kusama
- - glutton-westend
- - people-rococo
- - people-westend
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-bench-all:
- needs: [set-image]
- runs-on: arc-runners-polkadot-sdk-weights
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Run bench all
- run: |
- "./scripts/bench-all.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}"
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - run: git pull --rebase
- - uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: cmd-action - ${{ github.workflow }}
- branch: ${{ steps.gh.outputs.branch }}
- - name: Report succeed
- run: gh pr comment ${{ inputs.pr }} --body "Action completed 🎉🎉 Run by @${{ github.actor }} for ${{ github.workflow }}
completed 🎉. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/command-bench-overhead.yml b/.github/workflows/command-bench-overhead.yml
deleted file mode 100644
index fec8d37bb9ef..000000000000
--- a/.github/workflows/command-bench-overhead.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-name: Command Bench Overhead
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- description: Number of the Pull Request
- required: true
- benchmark:
- description: Pallet benchmark
- type: choice
- required: true
- options:
- - default
- - substrate
- - cumulus
- runtime:
- description: Runtime
- type: choice
- options:
- - rococo
- - westend
- - asset-hub-rococo
- - asset-hub-westend
- target_dir:
- description: Target directory
- type: choice
- options:
- - polkadot
- - substrate
- - cumulus
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-bench-overhead:
- needs: [set-image]
- runs-on: arc-runners-polkadot-sdk-benchmark
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Run bench overhead
- run: |
- "./scripts/bench.sh" "${{ inputs.benchmark }}" --subcommand "overhead" --runtime "${{ inputs.runtime }}" --target_dir "${{ inputs.target_dir }}"
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - run: git pull --rebase
- - uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: cmd-action - ${{ github.workflow }}
- branch: ${{ steps.gh.outputs.branch }}
- - name: Report succeed
- run: gh pr comment ${{ inputs.pr }} --body "Action completed 🎉🎉 Run by @${{ github.actor }} for ${{ github.workflow }}
completed 🎉. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/command-bench.yml b/.github/workflows/command-bench.yml
deleted file mode 100644
index ac879f443755..000000000000
--- a/.github/workflows/command-bench.yml
+++ /dev/null
@@ -1,124 +0,0 @@
-name: Command Bench
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- description: Number of the Pull Request
- required: true
- benchmark:
- description: Pallet benchmark
- type: choice
- required: true
- options:
- - substrate-pallet
- - polkadot-pallet
- - cumulus-assets
- - cumulus-collectives
- - cumulus-coretime
- - cumulus-bridge-hubs
- - cumulus-contracts
- - cumulus-glutton
- - cumulus-starters
- - cumulus-people
- - cumulus-testing
- subcommand:
- description: Subcommand
- type: choice
- required: true
- options:
- - pallet
- - xcm
- runtime:
- description: Runtime
- type: choice
- options:
- - dev
- - rococo
- - westend
- - asset-hub-westend
- - asset-hub-rococo
- - collectives-westend
- - coretime-rococo
- - coretime-westend
- - bridge-hub-rococo
- - bridge-hub-westend
- - contracts-rococo
- - glutton-westend
- - glutton-westend-dev-1300
- - seedling
- - shell
- - people-westend
- - people-rococo
- - penpal
- - rococo-parachain
- pallet:
- description: Pallet
- type: string
- default: pallet_name
- target_dir:
- description: Target directory
- type: choice
- options:
- - substrate
- - polkadot
- - cumulus
- runtime_dir:
- description: Runtime directory
- type: choice
- options:
- - people
- - collectives
- - coretime
- - bridge-hubs
- - contracts
- - glutton
- - starters
- - testing
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-bench:
- needs: [set-image]
- runs-on: arc-runners-polkadot-sdk-benchmark
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Run bench
- run: |
- "./scripts/bench.sh" "${{ inputs.benchmark }}" --runtime "${{ inputs.runtime }}" --pallet "${{ inputs.pallet }}" --target_dir "${{ inputs.target_dir }}" --subcommand "${{ inputs.subcommand }}" --runtime_dir "${{ inputs.runtime_dir }}"
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - run: git pull --rebase
- - uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: cmd-action - ${{ github.workflow }}
- branch: ${{ steps.gh.outputs.branch }}
- - name: Report succeed
- run: gh pr comment ${{ inputs.pr }} --body "Action completed 🎉🎉 Run by @${{ github.actor }} for ${{ github.workflow }}
completed 🎉. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/command-fmt.yml b/.github/workflows/command-fmt.yml
deleted file mode 100644
index fc37a17ac549..000000000000
--- a/.github/workflows/command-fmt.yml
+++ /dev/null
@@ -1,65 +0,0 @@
-name: Command FMT
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- description: Number of the Pull Request
- required: true
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-fmt:
- needs: [set-image]
- runs-on: ubuntu-latest
- timeout-minutes: 20
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Run FMT
- run: |
- cargo --version
- rustc --version
- cargo +nightly --version
- rustc +nightly --version
-
- cargo +nightly fmt
-
- # format toml.
- # since paritytech/ci-unified:bullseye-1.73.0-2023-11-01-v20231204 includes taplo-cli
- taplo format --config .config/taplo.toml
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - run: git pull --rebase
- - uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: cmd-action - ${{ github.workflow }}
- branch: ${{ steps.gh.outputs.branch }}
- - name: Report succeed
- run: gh pr comment ${{ inputs.pr }} --body "Action completed 🎉🎉 Run by @${{ github.actor }} for ${{ github.workflow }}
completed 🎉. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/command-inform.yml b/.github/workflows/command-inform.yml
deleted file mode 100644
index afdcf4c1b7b9..000000000000
--- a/.github/workflows/command-inform.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: Inform of new command action
-
-on:
- issue_comment:
- types: [created]
-
-jobs:
- comment:
- runs-on: ubuntu-latest
- # Temporary disable the bot until the new command bot works properly
- if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') && false
- steps:
- - name: Inform that the new command exist
- uses: actions/github-script@v7
- with:
- script: |
- github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body: 'We are migrating the command bot to be a GitHub Action Please, see the documentation on how to use it '
- })
diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml
deleted file mode 100644
index 3a08b9a5fb28..000000000000
--- a/.github/workflows/command-prdoc.yml
+++ /dev/null
@@ -1,90 +0,0 @@
-name: Command PrDoc
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- type: number
- description: Number of the Pull Request
- required: true
- bump:
- type: choice
- description: Default bump level for all crates
- default: "TODO"
- required: true
- options:
- - "TODO"
- - "no change"
- - "patch"
- - "minor"
- - "major"
- audience:
- type: choice
- description: Audience of the PrDoc
- default: "TODO"
- required: true
- options:
- - "TODO"
- - "Runtime Dev"
- - "Runtime User"
- - "Node Dev"
- - "Node User"
- overwrite:
- type: choice
- description: Overwrite existing PrDoc
- default: "true"
- required: true
- options:
- - "true"
- - "false"
-
-concurrency:
- group: command-prdoc
- cancel-in-progress: true
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-prdoc:
- needs: [set-image]
- runs-on: ubuntu-latest
- timeout-minutes: 20
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Generate PrDoc
- run: |
- python3 -m pip install -q cargo-workspace PyGithub whatthepatch pyyaml toml
-
- python3 .github/scripts/generate-prdoc.py --pr "${{ inputs.pr }}" --bump "${{ inputs.bump }}" --audience "${{ inputs.audience }}" --force "${{ inputs.overwrite }}"
-
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - name: Push Commit
- uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: Add PrDoc (auto generated)
- branch: ${{ steps.gh.outputs.branch }}
- file_pattern: 'prdoc/*.prdoc'
diff --git a/.github/workflows/command-sync.yml b/.github/workflows/command-sync.yml
deleted file mode 100644
index c610f4066a87..000000000000
--- a/.github/workflows/command-sync.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-name: Command Sync
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- description: Number of the Pull Request
- required: true
- chain:
- description: Chain
- type: choice
- required: true
- options:
- - westend
- - rococo
- sync-type:
- description: Sync type
- type: choice
- required: true
- options:
- - warp
- - full
- - fast
- - fast-unsafe
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-sync:
- needs: [set-image]
- runs-on: arc-runners-polkadot-sdk-warpsync
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Run sync
- run: |
- "./scripts/sync.sh" --chain "${{ inputs.chain }}" --type "${{ inputs.sync-type }}"
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - run: git pull --rebase
- - uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: cmd-action - ${{ github.workflow }}
- branch: ${{ steps.gh.outputs.branch }}
- - name: Report succeed
- run: gh pr comment ${{ inputs.pr }} --body "Action completed 🎉🎉 Run by @${{ github.actor }} for ${{ github.workflow }}
completed 🎉. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/command-update-ui.yml b/.github/workflows/command-update-ui.yml
deleted file mode 100644
index 860177adc879..000000000000
--- a/.github/workflows/command-update-ui.yml
+++ /dev/null
@@ -1,59 +0,0 @@
-name: Command Update UI
-
-on:
- workflow_dispatch:
- inputs:
- pr:
- description: Number of the Pull Request
- required: true
- rust-version:
- description: Version of rust. Example 1.70
- required: false
-
-jobs:
- set-image:
- runs-on: ubuntu-latest
- outputs:
- IMAGE: ${{ steps.set_image.outputs.IMAGE }}
- steps:
- - name: Checkout
- uses: actions/checkout@v4
- - id: set_image
- run: cat .github/env >> $GITHUB_OUTPUT
- cmd-update-ui:
- needs: [set-image]
- runs-on: arc-runners-polkadot-sdk-beefy
- timeout-minutes: 90
- container:
- image: ${{ needs.set-image.outputs.IMAGE }}
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Download repo
- uses: actions/checkout@v4
- - name: Install gh cli
- id: gh
- uses: ./.github/actions/set-up-gh
- with:
- pr-number: ${{ inputs.pr }}
- GH_TOKEN: ${{ github.token }}
- - name: Run update-ui
- run: |
- "./scripts/update-ui-tests.sh" "${{ inputs.rust-version }}"
- - name: Report failure
- if: ${{ failure() }}
- run: gh pr comment ${{ inputs.pr }} --body "Command failed ❌ Run by @${{ github.actor }} for ${{ github.workflow }}
failed. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
- - run: git pull --rebase
- - uses: stefanzweifel/git-auto-commit-action@v5
- with:
- commit_message: cmd-action - ${{ github.workflow }}
- branch: ${{ steps.gh.outputs.branch }}
- - name: Report succeed
- run: gh pr comment ${{ inputs.pr }} --body "Action completed 🎉🎉 Run by @${{ github.actor }} for ${{ github.workflow }}
completed 🎉. See logs here ."
- env:
- RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- GH_TOKEN: ${{ github.token }}
diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml
index 6d31ca7a7365..4343dbf915a9 100644
--- a/.github/workflows/release-30_publish_release_draft.yml
+++ b/.github/workflows/release-30_publish_release_draft.yml
@@ -26,6 +26,7 @@ jobs:
uses: "./.github/workflows/release-srtool.yml"
with:
excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template"
+ build_opts: "--features on-chain-release-build"
build-binaries:
runs-on: ubuntu-latest
diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml
index a749c86faa0c..c5d214ec68ab 100644
--- a/.github/workflows/release-50_publish-docker.yml
+++ b/.github/workflows/release-50_publish-docker.yml
@@ -298,7 +298,7 @@ jobs:
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0
+ uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: Cache Docker layers
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
@@ -322,7 +322,7 @@ jobs:
- name: Build and push
id: docker_build
- uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0
+ uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
with:
push: true
file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile
diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml
index e1dc42afc6e9..262203f05005 100644
--- a/.github/workflows/release-srtool.yml
+++ b/.github/workflows/release-srtool.yml
@@ -9,6 +9,8 @@ on:
inputs:
excluded_runtimes:
type: string
+ build_opts:
+ type: string
outputs:
published_runtimes:
value: ${{ jobs.find-runtimes.outputs.runtime }}
@@ -74,6 +76,8 @@ jobs:
- name: Srtool build
id: srtool_build
uses: chevdor/srtool-actions@v0.9.2
+ env:
+ BUILD_OPTS: ${{ inputs.build_opts }}
with:
chain: ${{ matrix.chain }}
runtime_dir: ${{ matrix.runtime_dir }}
diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml
index 80c96b0ef537..3dd5b1114813 100644
--- a/.github/workflows/review-bot.yml
+++ b/.github/workflows/review-bot.yml
@@ -15,7 +15,6 @@ on:
jobs:
review-approvals:
runs-on: ubuntu-latest
- environment: master
steps:
- name: Generate token
id: app_token
diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json
new file mode 100644
index 000000000000..45a3acd3f166
--- /dev/null
+++ b/.github/workflows/runtimes-matrix.json
@@ -0,0 +1,98 @@
+[
+ {
+ "name": "dev",
+ "package": "kitchensink-runtime",
+ "path": "substrate/frame",
+ "uri": null,
+ "is_relay": false
+ },
+ {
+ "name": "westend",
+ "package": "westend-runtime",
+ "path": "polkadot/runtime/westend",
+ "uri": "wss://try-runtime-westend.polkadot.io:443",
+ "is_relay": true
+ },
+ {
+ "name": "rococo",
+ "package": "rococo-runtime",
+ "path": "polkadot/runtime/rococo",
+ "uri": "wss://try-runtime-rococo.polkadot.io:443",
+ "is_relay": true
+ },
+ {
+ "name": "asset-hub-westend",
+ "package": "asset-hub-westend-runtime",
+ "path": "cumulus/parachains/runtimes/assets/asset-hub-westend",
+ "uri": "wss://westend-asset-hub-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "asset-hub-rococo",
+ "package": "asset-hub-rococo-runtime",
+ "path": "cumulus/parachains/runtimes/assets/asset-hub-rococo",
+ "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "bridge-hub-rococo",
+ "package": "bridge-hub-rococo-runtime",
+ "path": "cumulus/parachains/runtimes/bridges/bridge-hub-rococo",
+ "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "bridge-hub-westend",
+ "package": "bridge-hub-rococo-runtime",
+ "path": "cumulus/parachains/runtimes/bridges/bridge-hub-westend",
+ "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "collectives-westend",
+ "package": "collectives-westend-runtime",
+ "path": "cumulus/parachains/runtimes/collectives/collectives-westend",
+ "uri": "wss://westend-collectives-rpc.polkadot.io:443"
+ },
+ {
+ "name": "contracts-rococo",
+ "package": "contracts-rococo-runtime",
+ "path": "cumulus/parachains/runtimes/contracts/contracts-rococo",
+ "uri": "wss://rococo-contracts-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "coretime-rococo",
+ "package": "coretime-rococo-runtime",
+ "path": "cumulus/parachains/runtimes/coretime/coretime-rococo",
+ "uri": "wss://rococo-coretime-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "coretime-westend",
+ "package": "coretime-westend-runtime",
+ "path": "cumulus/parachains/runtimes/coretime/coretime-westend",
+ "uri": "wss://westend-coretime-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "glutton-westend",
+ "package": "glutton-westend-runtime",
+ "path": "cumulus/parachains/runtimes/gluttons/glutton-westend",
+ "is_relay": false
+ },
+ {
+ "name": "people-rococo",
+ "package": "people-rococo-runtime",
+ "path": "cumulus/parachains/runtimes/people/people-rococo",
+ "uri": "wss://rococo-people-rpc.polkadot.io:443",
+ "is_relay": false
+ },
+ {
+ "name": "people-westend",
+ "package": "people-westend-runtime",
+ "path": "cumulus/parachains/runtimes/people/people-westend",
+ "uri": "wss://westend-people-rpc.polkadot.io:443",
+ "is_relay": false
+ }
+]
diff --git a/.gitignore b/.gitignore
index e3e382af6195..0263626d832d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,6 +23,7 @@
**/node_modules
**/target/
**/wip/*.stderr
+**/__pycache__/
/.cargo/config
/.envrc
artifacts
diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml
index 2b8b90ef19a4..3e94eb77c7b4 100644
--- a/.gitlab/pipeline/check.yml
+++ b/.gitlab/pipeline/check.yml
@@ -106,7 +106,7 @@ check-runtime-migration-westend:
NETWORK: "westend"
PACKAGE: "westend-runtime"
WASM: "westend_runtime.compact.compressed.wasm"
- URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443"
+ URI: "wss://try-runtime-westend.polkadot.io:443"
SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings"
check-runtime-migration-rococo:
@@ -119,29 +119,9 @@ check-runtime-migration-rococo:
NETWORK: "rococo"
PACKAGE: "rococo-runtime"
WASM: "rococo_runtime.compact.compressed.wasm"
- URI: "wss://rococo-try-runtime-node.parity-chains.parity.io:443"
+ URI: "wss://try-runtime-rococo.polkadot.io:443"
SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings"
-find-fail-ci-phrase:
- stage: check
- variables:
- CI_IMAGE: "paritytech/tools:latest"
- ASSERT_REGEX: "FAIL-CI"
- GIT_DEPTH: 1
- extends:
- - .kubernetes-env
- - .test-pr-refs
- script:
- - set +e
- - rg --line-number --hidden --type rust --glob '!{.git,target}' "$ASSERT_REGEX" .; exit_status=$?
- - if [ $exit_status -eq 0 ]; then
- echo "$ASSERT_REGEX was found, exiting with 1";
- exit 1;
- else
- echo "No $ASSERT_REGEX was found, exiting with 0";
- exit 0;
- fi
-
check-core-crypto-features:
stage: check
extends:
diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml
index 9d7a8b931193..070bfc8472d5 100644
--- a/.gitlab/pipeline/zombienet/bridges.yml
+++ b/.gitlab/pipeline/zombienet/bridges.yml
@@ -52,12 +52,12 @@ zombienet-bridges-0001-asset-transfer-works:
extends:
- .zombienet-bridges-common
script:
- - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0001-asset-transfer --docker
+ - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-test.sh 0001-asset-transfer --docker
- echo "Done"
zombienet-bridges-0002-free-headers-synced-while-idle:
extends:
- .zombienet-bridges-common
script:
- - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-free-headers-synced-while-idle --docker
+ - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-test.sh 0002-free-headers-synced-while-idle --docker
- echo "Done"
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 120000
index 000000000000..63b2a0dc1abc
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1 @@
+docs/contributor/CODE_OF_CONDUCT.md
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 120000
index 000000000000..0f645512e8e4
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1 @@
+docs/contributor/CONTRIBUTING.md
\ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
index a67f9fdee31b..bb0f01542d31 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1465,15 +1465,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "beef"
-version = "0.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1"
-dependencies = [
- "serde",
-]
-
[[package]]
name = "binary-merkle-tree"
version = "13.0.0"
@@ -1511,7 +1502,7 @@ dependencies = [
"proc-macro2 1.0.82",
"quote 1.0.36",
"regex",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"shlex",
"syn 2.0.61",
]
@@ -1622,13 +1613,13 @@ dependencies = [
[[package]]
name = "blake2b_simd"
-version = "1.0.1"
+version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc"
+checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780"
dependencies = [
"arrayref",
"arrayvec 0.7.4",
- "constant_time_eq 0.2.6",
+ "constant_time_eq 0.3.0",
]
[[package]]
@@ -5991,7 +5982,7 @@ dependencies = [
"sc-cli",
"sp-runtime",
"sp-statement-store",
- "sp-tracing 16.0.0",
+ "tracing-subscriber 0.3.18",
]
[[package]]
@@ -7416,9 +7407,9 @@ dependencies = [
[[package]]
name = "jsonrpsee"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47"
+checksum = "5ec465b607a36dc5dd45d48b7689bc83f679f66a3ac6b6b21cc787a11e0f8685"
dependencies = [
"jsonrpsee-core",
"jsonrpsee-http-client",
@@ -7432,9 +7423,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-client-transport"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432"
+checksum = "90f0977f9c15694371b8024c35ab58ca043dbbf4b51ccb03db8858a021241df1"
dependencies = [
"base64 0.22.1",
"futures-util",
@@ -7455,13 +7446,11 @@ dependencies = [
[[package]]
name = "jsonrpsee-core"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b"
+checksum = "e942c55635fbf5dc421938b8558a8141c7e773720640f4f1dbe1f4164ca4e221"
dependencies = [
- "anyhow",
"async-trait",
- "beef",
"bytes",
"futures-timer",
"futures-util",
@@ -7472,7 +7461,7 @@ dependencies = [
"parking_lot 0.12.3",
"pin-project",
"rand",
- "rustc-hash",
+ "rustc-hash 2.0.0",
"serde",
"serde_json",
"thiserror",
@@ -7483,9 +7472,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-http-client"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac"
+checksum = "e33774602df12b68a2310b38a535733c477ca4a498751739f89fe8dbbb62ec4c"
dependencies = [
"async-trait",
"base64 0.22.1",
@@ -7508,9 +7497,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-proc-macros"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4"
+checksum = "6b07a2daf52077ab1b197aea69a5c990c060143835bf04c77070e98903791715"
dependencies = [
"heck 0.5.0",
"proc-macro-crate 3.1.0",
@@ -7521,11 +7510,10 @@ dependencies = [
[[package]]
name = "jsonrpsee-server"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51"
+checksum = "038fb697a709bec7134e9ccbdbecfea0e2d15183f7140254afef7c5610a3f488"
dependencies = [
- "anyhow",
"futures-util",
"http 1.1.0",
"http-body 1.0.0",
@@ -7549,11 +7537,10 @@ dependencies = [
[[package]]
name = "jsonrpsee-types"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af"
+checksum = "23b67d6e008164f027afbc2e7bb79662650158d26df200040282d2aa1cbb093b"
dependencies = [
- "beef",
"http 1.1.0",
"serde",
"serde_json",
@@ -7562,9 +7549,9 @@ dependencies = [
[[package]]
name = "jsonrpsee-ws-client"
-version = "0.23.2"
+version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e"
+checksum = "992bf67d1132f88edf4a4f8cff474cf01abb2be203004a2b8e11c2b20795b99e"
dependencies = [
"http 1.1.0",
"jsonrpsee-client-transport",
@@ -8739,48 +8726,18 @@ dependencies = [
"futures-timer",
"jsonrpsee",
"minimal-template-runtime",
- "polkadot-sdk-frame",
- "sc-basic-authorship",
- "sc-cli",
- "sc-client-api",
- "sc-consensus",
- "sc-consensus-manual-seal",
- "sc-executor",
- "sc-network",
- "sc-offchain",
- "sc-rpc-api",
- "sc-service",
- "sc-telemetry",
- "sc-transaction-pool",
- "sc-transaction-pool-api",
+ "polkadot-sdk",
"serde_json",
- "sp-api",
- "sp-block-builder",
- "sp-blockchain",
- "sp-io",
- "sp-keyring",
- "sp-runtime",
- "sp-timestamp",
- "substrate-build-script-utils",
- "substrate-frame-rpc-system",
]
[[package]]
name = "minimal-template-runtime"
version = "0.0.0"
dependencies = [
- "pallet-balances",
"pallet-minimal-template",
- "pallet-sudo",
- "pallet-timestamp",
- "pallet-transaction-payment",
- "pallet-transaction-payment-rpc-runtime-api",
"parity-scale-codec",
- "polkadot-sdk-frame",
+ "polkadot-sdk",
"scale-info",
- "sp-genesis-builder",
- "sp-runtime",
- "substrate-wasm-builder",
]
[[package]]
@@ -9283,7 +9240,6 @@ dependencies = [
"sc-consensus-grandpa-rpc",
"sc-mixnet",
"sc-rpc",
- "sc-rpc-api",
"sc-sync-state-rpc",
"sc-transaction-pool-api",
"sp-api",
@@ -9571,6 +9527,15 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "object"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce"
+dependencies = [
+ "memchr",
+]
+
[[package]]
name = "oid-registry"
version = "0.6.1"
@@ -10357,7 +10322,7 @@ dependencies = [
"anyhow",
"frame-system",
"parity-wasm",
- "polkavm-linker",
+ "polkavm-linker 0.9.2",
"sp-runtime",
"tempfile",
"toml 0.8.8",
@@ -10417,7 +10382,7 @@ dependencies = [
"bitflags 1.3.2",
"parity-scale-codec",
"paste",
- "polkavm-derive",
+ "polkavm-derive 0.9.1",
"scale-info",
]
@@ -10963,7 +10928,7 @@ name = "pallet-minimal-template"
version = "0.0.0"
dependencies = [
"parity-scale-codec",
- "polkadot-sdk-frame",
+ "polkadot-sdk",
"scale-info",
]
@@ -11412,6 +11377,115 @@ dependencies = [
"sp-runtime",
]
+[[package]]
+name = "pallet-revive"
+version = "0.1.0"
+dependencies = [
+ "array-bytes",
+ "assert_matches",
+ "bitflags 1.3.2",
+ "environmental",
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "impl-trait-for-tuples",
+ "log",
+ "pallet-assets",
+ "pallet-balances",
+ "pallet-message-queue",
+ "pallet-proxy",
+ "pallet-revive-fixtures",
+ "pallet-revive-proc-macro",
+ "pallet-revive-uapi",
+ "pallet-timestamp",
+ "pallet-utility",
+ "parity-scale-codec",
+ "paste",
+ "polkavm 0.10.0",
+ "pretty_assertions",
+ "scale-info",
+ "serde",
+ "sp-api",
+ "sp-core",
+ "sp-io",
+ "sp-keystore",
+ "sp-runtime",
+ "sp-std 14.0.0",
+ "sp-tracing 16.0.0",
+ "staging-xcm",
+ "staging-xcm-builder",
+ "wat",
+]
+
+[[package]]
+name = "pallet-revive-fixtures"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "frame-system",
+ "parity-wasm",
+ "polkavm-linker 0.10.0",
+ "sp-runtime",
+ "tempfile",
+ "toml 0.8.8",
+]
+
+[[package]]
+name = "pallet-revive-mock-network"
+version = "0.1.0"
+dependencies = [
+ "assert_matches",
+ "frame-support",
+ "frame-system",
+ "pallet-assets",
+ "pallet-balances",
+ "pallet-message-queue",
+ "pallet-proxy",
+ "pallet-revive",
+ "pallet-revive-fixtures",
+ "pallet-revive-proc-macro",
+ "pallet-revive-uapi",
+ "pallet-timestamp",
+ "pallet-utility",
+ "pallet-xcm",
+ "parity-scale-codec",
+ "polkadot-parachain-primitives",
+ "polkadot-primitives",
+ "polkadot-runtime-parachains",
+ "pretty_assertions",
+ "scale-info",
+ "sp-api",
+ "sp-core",
+ "sp-io",
+ "sp-keystore",
+ "sp-runtime",
+ "sp-tracing 16.0.0",
+ "staging-xcm",
+ "staging-xcm-builder",
+ "staging-xcm-executor",
+ "xcm-simulator",
+]
+
+[[package]]
+name = "pallet-revive-proc-macro"
+version = "0.1.0"
+dependencies = [
+ "proc-macro2 1.0.82",
+ "quote 1.0.36",
+ "syn 2.0.61",
+]
+
+[[package]]
+name = "pallet-revive-uapi"
+version = "0.1.0"
+dependencies = [
+ "bitflags 1.3.2",
+ "parity-scale-codec",
+ "paste",
+ "polkavm-derive 0.10.0",
+ "scale-info",
+]
+
[[package]]
name = "pallet-root-offences"
version = "25.0.0"
@@ -13918,6 +13992,7 @@ dependencies = [
"sc-chain-spec",
"sc-cli",
"sc-client-api",
+ "sc-client-db",
"sc-consensus",
"sc-executor",
"sc-network",
@@ -14304,6 +14379,11 @@ dependencies = [
"pallet-recovery",
"pallet-referenda",
"pallet-remark",
+ "pallet-revive",
+ "pallet-revive-fixtures",
+ "pallet-revive-mock-network",
+ "pallet-revive-proc-macro",
+ "pallet-revive-uapi",
"pallet-root-offences",
"pallet-root-testing",
"pallet-safe-mode",
@@ -15093,9 +15173,22 @@ checksum = "8a3693e5efdb2bf74e449cd25fd777a28bd7ed87e41f5d5da75eb31b4de48b94"
dependencies = [
"libc",
"log",
- "polkavm-assembler",
- "polkavm-common",
- "polkavm-linux-raw",
+ "polkavm-assembler 0.9.0",
+ "polkavm-common 0.9.0",
+ "polkavm-linux-raw 0.9.0",
+]
+
+[[package]]
+name = "polkavm"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7ec0c5935f2eff23cfc4653002f4f8d12b37f87a720e0631282d188c32089d6"
+dependencies = [
+ "libc",
+ "log",
+ "polkavm-assembler 0.10.0",
+ "polkavm-common 0.10.0",
+ "polkavm-linux-raw 0.10.0",
]
[[package]]
@@ -15107,6 +15200,15 @@ dependencies = [
"log",
]
+[[package]]
+name = "polkavm-assembler"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8e4fd5a43100bf1afe9727b8130d01f966f5cfc9144d5604b21e795c2bcd80e"
+dependencies = [
+ "log",
+]
+
[[package]]
name = "polkavm-common"
version = "0.9.0"
@@ -15116,13 +15218,32 @@ dependencies = [
"log",
]
+[[package]]
+name = "polkavm-common"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0097b48bc0bedf9f3f537ce8f37e8f1202d8d83f9b621bdb21ff2c59b9097c50"
+dependencies = [
+ "log",
+ "polkavm-assembler 0.10.0",
+]
+
[[package]]
name = "polkavm-derive"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae8c4bea6f3e11cd89bb18bcdddac10bd9a24015399bd1c485ad68a985a19606"
dependencies = [
- "polkavm-derive-impl-macro",
+ "polkavm-derive-impl-macro 0.9.0",
+]
+
+[[package]]
+name = "polkavm-derive"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dcc701385c08c31bdb0569f0c51a290c580d892fa77f1dd88a7352a62679ecf"
+dependencies = [
+ "polkavm-derive-impl-macro 0.10.0",
]
[[package]]
@@ -15131,7 +15252,19 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c"
dependencies = [
- "polkavm-common",
+ "polkavm-common 0.9.0",
+ "proc-macro2 1.0.82",
+ "quote 1.0.36",
+ "syn 2.0.61",
+]
+
+[[package]]
+name = "polkavm-derive-impl"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7855353a5a783dd5d09e3b915474bddf66575f5a3cf45dec8d1c5e051ba320dc"
+dependencies = [
+ "polkavm-common 0.10.0",
"proc-macro2 1.0.82",
"quote 1.0.36",
"syn 2.0.61",
@@ -15143,7 +15276,17 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429"
dependencies = [
- "polkavm-derive-impl",
+ "polkavm-derive-impl 0.9.0",
+ "syn 2.0.61",
+]
+
+[[package]]
+name = "polkavm-derive-impl-macro"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9324fe036de37c17829af233b46ef6b5562d4a0c09bb7fdb9f8378856dee30cf"
+dependencies = [
+ "polkavm-derive-impl 0.10.0",
"syn 2.0.61",
]
@@ -15157,7 +15300,22 @@ dependencies = [
"hashbrown 0.14.3",
"log",
"object 0.32.2",
- "polkavm-common",
+ "polkavm-common 0.9.0",
+ "regalloc2 0.9.3",
+ "rustc-demangle",
+]
+
+[[package]]
+name = "polkavm-linker"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d704edfe7bdcc876784f19436d53d515b65eb07bc9a0fae77085d552c2dbbb5"
+dependencies = [
+ "gimli 0.28.0",
+ "hashbrown 0.14.3",
+ "log",
+ "object 0.36.1",
+ "polkavm-common 0.10.0",
"regalloc2 0.9.3",
"rustc-demangle",
]
@@ -15168,6 +15326,12 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120"
+[[package]]
+name = "polkavm-linux-raw"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126"
+
[[package]]
name = "polling"
version = "2.8.0"
@@ -15777,7 +15941,7 @@ dependencies = [
"pin-project-lite",
"quinn-proto 0.9.6",
"quinn-udp 0.3.2",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"rustls 0.20.9",
"thiserror",
"tokio",
@@ -15796,7 +15960,7 @@ dependencies = [
"pin-project-lite",
"quinn-proto 0.10.6",
"quinn-udp 0.4.1",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"rustls 0.21.7",
"thiserror",
"tokio",
@@ -15812,7 +15976,7 @@ dependencies = [
"bytes",
"rand",
"ring 0.16.20",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"rustls 0.20.9",
"slab",
"thiserror",
@@ -15830,7 +15994,7 @@ dependencies = [
"bytes",
"rand",
"ring 0.16.20",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"rustls 0.21.7",
"slab",
"thiserror",
@@ -16103,7 +16267,7 @@ checksum = "ad156d539c879b7a24a363a2016d77961786e71f48f2e2fc8302a92abd2429a6"
dependencies = [
"hashbrown 0.13.2",
"log",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"slice-group-by",
"smallvec",
]
@@ -16704,6 +16868,12 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+[[package]]
+name = "rustc-hash"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152"
+
[[package]]
name = "rustc-hex"
version = "2.1.0"
@@ -16925,9 +17095,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.14"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
+checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
[[package]]
name = "rusty-fork"
@@ -17638,7 +17808,7 @@ dependencies = [
name = "sc-executor-common"
version = "0.29.0"
dependencies = [
- "polkavm",
+ "polkavm 0.9.3",
"sc-allocator",
"sp-maybe-compressed-blob",
"sp-wasm-interface 20.0.0",
@@ -17651,7 +17821,7 @@ name = "sc-executor-polkavm"
version = "0.29.0"
dependencies = [
"log",
- "polkavm",
+ "polkavm 0.9.3",
"sc-executor-common",
"sp-wasm-interface 20.0.0",
]
@@ -18074,11 +18244,9 @@ dependencies = [
"sp-runtime",
"sp-session",
"sp-statement-store",
- "sp-tracing 16.0.0",
"sp-version",
"substrate-test-runtime-client",
"tokio",
- "tracing-subscriber 0.3.18",
]
[[package]]
@@ -18104,6 +18272,7 @@ dependencies = [
name = "sc-rpc-server"
version = "11.0.0"
dependencies = [
+ "dyn-clone",
"forwarded-header-value",
"futures",
"governor",
@@ -18113,6 +18282,7 @@ dependencies = [
"ip_network",
"jsonrpsee",
"log",
+ "sc-rpc-api",
"serde",
"serde_json",
"substrate-prometheus-endpoint",
@@ -18390,7 +18560,7 @@ dependencies = [
"parity-scale-codec",
"parking_lot 0.12.3",
"regex",
- "rustc-hash",
+ "rustc-hash 1.1.0",
"sc-client-api",
"sc-tracing-proc-macro",
"serde",
@@ -19750,7 +19920,6 @@ dependencies = [
"sc-executor",
"sc-network",
"sc-offchain",
- "sc-rpc-api",
"sc-service",
"sc-telemetry",
"sc-transaction-pool",
@@ -20312,7 +20481,7 @@ dependencies = [
"libsecp256k1",
"log",
"parity-scale-codec",
- "polkavm-derive",
+ "polkavm-derive 0.9.1",
"rustversion",
"secp256k1",
"sp-core",
@@ -20439,7 +20608,7 @@ dependencies = [
name = "sp-rpc"
version = "26.0.0"
dependencies = [
- "rustc-hash",
+ "rustc-hash 1.1.0",
"serde",
"serde_json",
"sp-core",
@@ -20501,7 +20670,7 @@ dependencies = [
"bytes",
"impl-trait-for-tuples",
"parity-scale-codec",
- "polkavm-derive",
+ "polkavm-derive 0.9.1",
"primitive-types",
"rustversion",
"sp-core",
@@ -21546,7 +21715,7 @@ dependencies = [
"merkleized-metadata",
"parity-scale-codec",
"parity-wasm",
- "polkavm-linker",
+ "polkavm-linker 0.9.2",
"sc-executor",
"sp-core",
"sp-io",
diff --git a/Cargo.toml b/Cargo.toml
index 397163b3cce1..f26a894960a7 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -395,6 +395,11 @@ members = [
"substrate/frame/recovery",
"substrate/frame/referenda",
"substrate/frame/remark",
+ "substrate/frame/revive",
+ "substrate/frame/revive/fixtures",
+ "substrate/frame/revive/mock-network",
+ "substrate/frame/revive/proc-macro",
+ "substrate/frame/revive/uapi",
"substrate/frame/root-offences",
"substrate/frame/root-testing",
"substrate/frame/safe-mode",
@@ -579,7 +584,7 @@ ahash = { version = "0.8.2" }
alloy-primitives = { version = "0.4.2", default-features = false }
alloy-sol-types = { version = "0.4.2", default-features = false }
always-assert = { version = "0.1" }
-anyhow = { version = "1.0.81" }
+anyhow = { version = "1.0.81", default-features = false }
aquamarine = { version = "0.5.0" }
arbitrary = { version = "1.3.2" }
ark-bls12-377 = { version = "0.4.0", default-features = false }
@@ -616,7 +621,7 @@ bip39 = { version = "2.0.0" }
bitflags = { version = "1.3.2" }
bitvec = { version = "1.0.1", default-features = false }
blake2 = { version = "0.10.4", default-features = false }
-blake2b_simd = { version = "1.0.1", default-features = false }
+blake2b_simd = { version = "1.0.2", default-features = false }
blake3 = { version = "1.5" }
bounded-collections = { version = "0.2.0", default-features = false }
bounded-vec = { version = "0.7" }
@@ -807,8 +812,8 @@ isahc = { version = "1.2" }
itertools = { version = "0.11" }
jobserver = { version = "0.1.26" }
jsonpath_lib = { version = "0.3" }
-jsonrpsee = { version = "0.23.2" }
-jsonrpsee-core = { version = "0.23.2" }
+jsonrpsee = { version = "0.24.3" }
+jsonrpsee-core = { version = "0.24.3" }
k256 = { version = "0.13.3", default-features = false }
kitchensink-runtime = { path = "substrate/bin/node/runtime" }
kvdb = { version = "0.13.0" }
@@ -894,7 +899,7 @@ pallet-collator-selection = { path = "cumulus/pallets/collator-selection", defau
pallet-collective = { path = "substrate/frame/collective", default-features = false }
pallet-collective-content = { path = "cumulus/parachains/pallets/collective-content", default-features = false }
pallet-contracts = { path = "substrate/frame/contracts", default-features = false }
-pallet-contracts-fixtures = { path = "substrate/frame/contracts/fixtures" }
+pallet-contracts-fixtures = { path = "substrate/frame/contracts/fixtures", default-features = false }
pallet-contracts-mock-network = { default-features = false, path = "substrate/frame/contracts/mock-network" }
pallet-contracts-proc-macro = { path = "substrate/frame/contracts/proc-macro", default-features = false }
pallet-contracts-uapi = { path = "substrate/frame/contracts/uapi", default-features = false }
@@ -950,6 +955,11 @@ pallet-ranked-collective = { path = "substrate/frame/ranked-collective", default
pallet-recovery = { path = "substrate/frame/recovery", default-features = false }
pallet-referenda = { path = "substrate/frame/referenda", default-features = false }
pallet-remark = { default-features = false, path = "substrate/frame/remark" }
+pallet-revive = { path = "substrate/frame/revive", default-features = false }
+pallet-revive-fixtures = { path = "substrate/frame/revive/fixtures", default-features = false }
+pallet-revive-mock-network = { default-features = false, path = "substrate/frame/revive/mock-network" }
+pallet-revive-proc-macro = { path = "substrate/frame/revive/proc-macro", default-features = false }
+pallet-revive-uapi = { path = "substrate/frame/revive/uapi", default-features = false }
pallet-root-offences = { default-features = false, path = "substrate/frame/root-offences" }
pallet-root-testing = { path = "substrate/frame/root-testing", default-features = false }
pallet-safe-mode = { default-features = false, path = "substrate/frame/safe-mode" }
@@ -1060,7 +1070,7 @@ polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" }
polkadot-test-client = { path = "polkadot/node/test/client" }
polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" }
polkadot-test-service = { path = "polkadot/node/test/service" }
-polkavm = "0.9.3"
+polkavm = { version = "0.9.3", default-features = false }
polkavm-derive = "0.9.1"
polkavm-linker = "0.9.2"
portpicker = { version = "0.1.1" }
@@ -1105,7 +1115,7 @@ rstest = { version = "0.18.2" }
rustc-hash = { version = "1.1.0" }
rustc-hex = { version = "2.1.0", default-features = false }
rustix = { version = "0.36.7", default-features = false }
-rustversion = { version = "1.0.6" }
+rustversion = { version = "1.0.17" }
rusty-fork = { version = "0.3.0", default-features = false }
safe-mix = { version = "1.0", default-features = false }
sc-allocator = { path = "substrate/client/allocator", default-features = false }
diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs
index bf105b140401..c36313a14764 100644
--- a/bridges/modules/messages/src/lib.rs
+++ b/bridges/modules/messages/src/lib.rs
@@ -70,7 +70,6 @@ use bp_runtime::{
};
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{dispatch::PostDispatchInfo, ensure, fail, traits::Get, DefaultNoBound};
-use sp_runtime::traits::UniqueSaturatedFrom;
use sp_std::{marker::PhantomData, prelude::*};
mod inbound_lane;
@@ -153,40 +152,6 @@ pub mod pallet {
type OperatingModeStorage = PalletOperatingMode;
}
- #[pallet::hooks]
- impl, I: 'static> Hooks> for Pallet
- where
- u32: TryFrom>,
- {
- fn on_idle(_block: BlockNumberFor, remaining_weight: Weight) -> Weight {
- // we'll need at least to read outbound lane state, kill a message and update lane state
- let db_weight = T::DbWeight::get();
- if !remaining_weight.all_gte(db_weight.reads_writes(1, 2)) {
- return Weight::zero()
- }
-
- // messages from lane with index `i` in `ActiveOutboundLanes` are pruned when
- // `System::block_number() % lanes.len() == i`. Otherwise we need to read lane states on
- // every block, wasting the whole `remaining_weight` for nothing and causing starvation
- // of the last lane pruning
- let active_lanes = T::ActiveOutboundLanes::get();
- let active_lanes_len = (active_lanes.len() as u32).into();
- let active_lane_index = u32::unique_saturated_from(
- frame_system::Pallet::::block_number() % active_lanes_len,
- );
- let active_lane_id = active_lanes[active_lane_index as usize];
-
- // first db read - outbound lane state
- let mut active_lane = outbound_lane::(active_lane_id);
- let mut used_weight = db_weight.reads(1);
- // and here we'll have writes
- used_weight += active_lane.prune_messages(db_weight, remaining_weight - used_weight);
-
- // we already checked we have enough `remaining_weight` to cover this `used_weight`
- used_weight
- }
- }
-
#[pallet::call]
impl, I: 'static> Pallet {
/// Change `PalletOwner`.
@@ -610,6 +575,14 @@ pub mod pallet {
}
}
+ #[pallet::hooks]
+ impl, I: 'static> Hooks> for Pallet {
+ #[cfg(feature = "try-runtime")]
+ fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> {
+ Self::do_try_state()
+ }
+ }
+
impl, I: 'static> Pallet {
/// Get stored data of the outbound message with given nonce.
pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option {
@@ -644,6 +617,58 @@ pub mod pallet {
}
}
+ #[cfg(any(feature = "try-runtime", test))]
+ impl, I: 'static> Pallet {
+ /// Ensure the correctness of the state of this pallet.
+ pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
+ Self::do_try_state_for_outbound_lanes()
+ }
+
+ /// Ensure the correctness of the state of outbound lanes.
+ pub fn do_try_state_for_outbound_lanes() -> Result<(), sp_runtime::TryRuntimeError> {
+ use sp_runtime::traits::One;
+ use sp_std::vec::Vec;
+
+ // collect unpruned lanes
+ let mut unpruned_lanes = Vec::new();
+ for (lane_id, lane_data) in OutboundLanes::::iter() {
+ let Some(expected_last_prunned_nonce) =
+ lane_data.oldest_unpruned_nonce.checked_sub(One::one())
+ else {
+ continue;
+ };
+
+ // collect message_nonces that were supposed to be pruned
+ let mut unpruned_message_nonces = Vec::new();
+ const MAX_MESSAGES_ITERATION: u64 = 16;
+ let start_nonce =
+ expected_last_prunned_nonce.checked_sub(MAX_MESSAGES_ITERATION).unwrap_or(0);
+ for current_nonce in start_nonce..=expected_last_prunned_nonce {
+ // check a message for current_nonce
+ if OutboundMessages::::contains_key(MessageKey {
+ lane_id,
+ nonce: current_nonce,
+ }) {
+ unpruned_message_nonces.push(current_nonce);
+ }
+ }
+
+ if !unpruned_message_nonces.is_empty() {
+ log::warn!(
+ target: LOG_TARGET,
+ "do_try_state_for_outbound_lanes for lane_id: {lane_id:?} with lane_data: {lane_data:?} found unpruned_message_nonces: {unpruned_message_nonces:?}",
+ );
+ unpruned_lanes.push((lane_id, lane_data, unpruned_message_nonces));
+ }
+ }
+
+ // ensure messages before `oldest_unpruned_nonce` are really pruned.
+ ensure!(unpruned_lanes.is_empty(), "Found unpruned lanes!");
+
+ Ok(())
+ }
+ }
+
/// Get-parameter that returns number of active outbound lanes that the pallet maintains.
pub struct MaybeOutboundLanesCount(PhantomData<(T, I)>);
diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs
index fcdddf199dc6..788a13e82b1b 100644
--- a/bridges/modules/messages/src/outbound_lane.rs
+++ b/bridges/modules/messages/src/outbound_lane.rs
@@ -22,13 +22,9 @@ use bp_messages::{
ChainWithMessages, DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer,
};
use codec::{Decode, Encode};
-use frame_support::{
- traits::Get,
- weights::{RuntimeDbWeight, Weight},
- BoundedVec, PalletError,
-};
+use frame_support::{traits::Get, BoundedVec, PalletError};
use scale_info::TypeInfo;
-use sp_runtime::{traits::Zero, RuntimeDebug};
+use sp_runtime::RuntimeDebug;
use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData};
/// Outbound lane storage.
@@ -143,41 +139,17 @@ impl OutboundLane {
ensure_unrewarded_relayers_are_correct(confirmed_messages.end, relayers)?;
+ // prune all confirmed messages
+ for nonce in confirmed_messages.begin..=confirmed_messages.end {
+ self.storage.remove_message(&nonce);
+ }
+
data.latest_received_nonce = confirmed_messages.end;
+ data.oldest_unpruned_nonce = data.latest_received_nonce.saturating_add(1);
self.storage.set_data(data);
Ok(Some(confirmed_messages))
}
-
- /// Prune at most `max_messages_to_prune` already received messages.
- ///
- /// Returns weight, consumed by messages pruning and lane state update.
- pub fn prune_messages(
- &mut self,
- db_weight: RuntimeDbWeight,
- mut remaining_weight: Weight,
- ) -> Weight {
- let write_weight = db_weight.writes(1);
- let two_writes_weight = write_weight + write_weight;
- let mut spent_weight = Weight::zero();
- let mut data = self.storage.data();
- while remaining_weight.all_gte(two_writes_weight) &&
- data.oldest_unpruned_nonce <= data.latest_received_nonce
- {
- self.storage.remove_message(&data.oldest_unpruned_nonce);
-
- spent_weight += write_weight;
- remaining_weight -= write_weight;
- data.oldest_unpruned_nonce += 1;
- }
-
- if !spent_weight.is_zero() {
- spent_weight += write_weight;
- self.storage.set_data(data);
- }
-
- spent_weight
- }
}
/// Verifies unrewarded relayers vec.
@@ -221,7 +193,6 @@ mod tests {
REGULAR_PAYLOAD, TEST_LANE_ID,
},
};
- use frame_support::weights::constants::RocksDbWeight;
use sp_std::ops::RangeInclusive;
fn unrewarded_relayers(
@@ -281,7 +252,7 @@ mod tests {
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
+ assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
});
}
@@ -302,7 +273,7 @@ mod tests {
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 2);
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
+ assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3);
assert_eq!(
lane.confirm_delivery(3, 3, &unrewarded_relayers(3..=3)),
@@ -310,7 +281,7 @@ mod tests {
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
+ assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
});
}
@@ -331,12 +302,12 @@ mod tests {
assert_eq!(lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(None),);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
+ assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
assert_eq!(lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), Ok(None),);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
+ assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
});
}
@@ -394,57 +365,6 @@ mod tests {
);
}
- #[test]
- fn prune_messages_works() {
- run_test(|| {
- let mut lane = outbound_lane::(TEST_LANE_ID);
- // when lane is empty, nothing is pruned
- assert_eq!(
- lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
- Weight::zero()
- );
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
- // when nothing is confirmed, nothing is pruned
- lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
- lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
- lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
- assert!(lane.storage.message(&1).is_some());
- assert!(lane.storage.message(&2).is_some());
- assert!(lane.storage.message(&3).is_some());
- assert_eq!(
- lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
- Weight::zero()
- );
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
- // after confirmation, some messages are received
- assert_eq!(
- lane.confirm_delivery(2, 2, &unrewarded_relayers(1..=2)),
- Ok(Some(delivered_messages(1..=2))),
- );
- assert_eq!(
- lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
- RocksDbWeight::get().writes(3),
- );
- assert!(lane.storage.message(&1).is_none());
- assert!(lane.storage.message(&2).is_none());
- assert!(lane.storage.message(&3).is_some());
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3);
- // after last message is confirmed, everything is pruned
- assert_eq!(
- lane.confirm_delivery(1, 3, &unrewarded_relayers(3..=3)),
- Ok(Some(delivered_messages(3..=3))),
- );
- assert_eq!(
- lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
- RocksDbWeight::get().writes(2),
- );
- assert!(lane.storage.message(&1).is_none());
- assert!(lane.storage.message(&2).is_none());
- assert!(lane.storage.message(&3).is_none());
- assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
- });
- }
-
#[test]
fn confirm_delivery_detects_when_more_than_expected_messages_are_confirmed() {
run_test(|| {
diff --git a/bridges/modules/messages/src/tests/pallet_tests.rs b/bridges/modules/messages/src/tests/pallet_tests.rs
index 42e1042717de..f7a288d649a9 100644
--- a/bridges/modules/messages/src/tests/pallet_tests.rs
+++ b/bridges/modules/messages/src/tests/pallet_tests.rs
@@ -38,15 +38,14 @@ use bp_runtime::{BasicOperatingMode, PreComputedSize, RangeInclusiveExt, Size};
use bp_test_utils::generate_owned_bridge_module_tests;
use codec::Encode;
use frame_support::{
- assert_noop, assert_ok,
+ assert_err, assert_noop, assert_ok,
dispatch::Pays,
storage::generator::{StorageMap, StorageValue},
- traits::Hooks,
weights::Weight,
};
use frame_system::{EventRecord, Pallet as System, Phase};
use sp_core::Get;
-use sp_runtime::DispatchError;
+use sp_runtime::{BoundedVec, DispatchError};
fn get_ready_for_events() {
System::::set_block_number(1);
@@ -99,6 +98,7 @@ fn receive_messages_delivery_proof() {
last_delivered_nonce: 1,
},
));
+ assert_ok!(Pallet::::do_try_state());
assert_eq!(
System::::events(),
@@ -160,6 +160,7 @@ fn pallet_rejects_transactions_if_halted() {
),
Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted),
);
+ assert_ok!(Pallet::::do_try_state());
});
}
@@ -220,6 +221,7 @@ fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() {
last_delivered_nonce: 1,
},
));
+ assert_ok!(Pallet::::do_try_state());
});
}
@@ -395,10 +397,14 @@ fn receive_messages_proof_rejects_proof_with_too_many_messages() {
#[test]
fn receive_messages_delivery_proof_works() {
run_test(|| {
+ assert_eq!(OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, 0);
+ assert_eq!(OutboundLanes::::get(TEST_LANE_ID).oldest_unpruned_nonce, 1);
+
send_regular_message(TEST_LANE_ID);
receive_messages_delivery_proof();
- assert_eq!(OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, 1,);
+ assert_eq!(OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, 1);
+ assert_eq!(OutboundLanes::::get(TEST_LANE_ID).oldest_unpruned_nonce, 2);
});
}
@@ -428,6 +434,7 @@ fn receive_messages_delivery_proof_rewards_relayers() {
},
);
assert_ok!(result);
+ assert_ok!(Pallet::::do_try_state());
assert_eq!(
result.unwrap().actual_weight.unwrap(),
TestWeightInfo::receive_messages_delivery_proof_weight(
@@ -467,6 +474,7 @@ fn receive_messages_delivery_proof_rewards_relayers() {
},
);
assert_ok!(result);
+ assert_ok!(Pallet::::do_try_state());
// even though the pre-dispatch weight was for two messages, the actual weight is
// for single message only
assert_eq!(
@@ -852,129 +860,6 @@ fn inbound_message_details_works() {
});
}
-#[test]
-fn on_idle_callback_respects_remaining_weight() {
- run_test(|| {
- send_regular_message(TEST_LANE_ID);
- send_regular_message(TEST_LANE_ID);
- send_regular_message(TEST_LANE_ID);
- send_regular_message(TEST_LANE_ID);
-
- assert_ok!(Pallet::::receive_messages_delivery_proof(
- RuntimeOrigin::signed(1),
- prepare_messages_delivery_proof(
- TEST_LANE_ID,
- InboundLaneData {
- last_confirmed_nonce: 4,
- relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)].into(),
- },
- ),
- UnrewardedRelayersState {
- unrewarded_relayer_entries: 1,
- messages_in_oldest_entry: 4,
- total_messages: 4,
- last_delivered_nonce: 4,
- },
- ));
-
- // all 4 messages may be pruned now
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, 4);
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1);
- System::::set_block_number(2);
-
- // if passed wight is too low to do anything
- let dbw = DbWeight::get();
- assert_eq!(Pallet::::on_idle(0, dbw.reads_writes(1, 1)), Weight::zero(),);
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1);
-
- // if passed wight is enough to prune single message
- assert_eq!(
- Pallet::::on_idle(0, dbw.reads_writes(1, 2)),
- dbw.reads_writes(1, 2),
- );
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2);
-
- // if passed wight is enough to prune two more messages
- assert_eq!(
- Pallet::::on_idle(0, dbw.reads_writes(1, 3)),
- dbw.reads_writes(1, 3),
- );
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 4);
-
- // if passed wight is enough to prune many messages
- assert_eq!(
- Pallet::::on_idle(0, dbw.reads_writes(100, 100)),
- dbw.reads_writes(1, 2),
- );
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 5);
- });
-}
-
-#[test]
-fn on_idle_callback_is_rotating_lanes_to_prune() {
- run_test(|| {
- // send + receive confirmation for lane 1
- send_regular_message(TEST_LANE_ID);
- receive_messages_delivery_proof();
- // send + receive confirmation for lane 2
- send_regular_message(TEST_LANE_ID_2);
- assert_ok!(Pallet::::receive_messages_delivery_proof(
- RuntimeOrigin::signed(1),
- prepare_messages_delivery_proof(
- TEST_LANE_ID_2,
- InboundLaneData {
- last_confirmed_nonce: 1,
- relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into(),
- },
- ),
- UnrewardedRelayersState {
- unrewarded_relayer_entries: 1,
- messages_in_oldest_entry: 1,
- total_messages: 1,
- last_delivered_nonce: 1,
- },
- ));
-
- // nothing is pruned yet
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, 1);
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 1);
- assert_eq!(
- outbound_lane::(TEST_LANE_ID_2).data().latest_received_nonce,
- 1
- );
- assert_eq!(
- outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce,
- 1
- );
-
- // in block#2.on_idle lane messages of lane 1 are pruned
- let dbw = DbWeight::get();
- System::::set_block_number(2);
- assert_eq!(
- Pallet::::on_idle(0, dbw.reads_writes(100, 100)),
- dbw.reads_writes(1, 2),
- );
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2);
- assert_eq!(
- outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce,
- 1
- );
-
- // in block#3.on_idle lane messages of lane 2 are pruned
- System::::set_block_number(3);
-
- assert_eq!(
- Pallet::::on_idle(0, dbw.reads_writes(100, 100)),
- dbw.reads_writes(1, 2),
- );
- assert_eq!(outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, 2);
- assert_eq!(
- outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce,
- 2
- );
- });
-}
-
#[test]
fn outbound_message_from_unconfigured_lane_is_rejected() {
run_test(|| {
@@ -1098,3 +983,33 @@ fn maybe_outbound_lanes_count_returns_correct_value() {
Some(mock::ActiveOutboundLanes::get().len() as u32)
);
}
+
+#[test]
+fn do_try_state_for_outbound_lanes_works() {
+ run_test(|| {
+ let lane_id = TEST_LANE_ID;
+
+ // setup delivered nonce 1
+ OutboundLanes::::insert(
+ lane_id,
+ OutboundLaneData {
+ oldest_unpruned_nonce: 2,
+ latest_received_nonce: 1,
+ latest_generated_nonce: 0,
+ },
+ );
+ // store message for nonce 1
+ OutboundMessages::::insert(
+ MessageKey { lane_id, nonce: 1 },
+ BoundedVec::default(),
+ );
+ assert_err!(
+ Pallet::::do_try_state(),
+ sp_runtime::TryRuntimeError::Other("Found unpruned lanes!")
+ );
+
+ // remove message for nonce 1
+ OutboundMessages::::remove(MessageKey { lane_id, nonce: 1 });
+ assert_ok!(Pallet::::do_try_state());
+ })
+}
diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml
index beb03b9381d4..4c25566607dc 100644
--- a/bridges/relays/utils/Cargo.toml
+++ b/bridges/relays/utils/Cargo.toml
@@ -11,7 +11,7 @@ publish = false
workspace = true
[dependencies]
-anyhow = { workspace = true }
+anyhow = { workspace = true, default-features = true }
async-std = { workspace = true }
async-trait = { workspace = true }
backoff = { workspace = true }
diff --git a/bridges/testing/README.md b/bridges/testing/README.md
index bd467a410d01..158dfd73b1ad 100644
--- a/bridges/testing/README.md
+++ b/bridges/testing/README.md
@@ -1,31 +1,29 @@
# Bridges Tests for Local Rococo <> Westend Bridge
This folder contains [zombienet](https://github.com/paritytech/zombienet/) based integration tests for both
-onchain and offchain bridges code. Due to some
-[technical difficulties](https://github.com/paritytech/parity-bridges-common/pull/2649#issue-1965339051), we
-are using native zombienet provider, which means that you need to build some binaries locally.
+onchain and offchain bridges code.
-To start those tests, you need to:
+Prerequisites for running the tests locally:
- download latest [zombienet release](https://github.com/paritytech/zombienet/releases);
- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the
-[`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone;
+ [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone;
- build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the
-[`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone;
+ [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone;
- ensure that you have [`node`](https://nodejs.org/en) installed. Additionally, we'll need globally installed
-`polkadot/api-cli` package (use `npm install -g @polkadot/api-cli@beta` to install it);
+ `polkadot/api-cli` package (use `npm install -g @polkadot/api-cli@beta` to install it);
- build Substrate relay by running `cargo build -p substrate-relay --release` command in the
-[`parity-bridges-common`](https://github.com/paritytech/parity-bridges-common) repository clone.
+ [`parity-bridges-common`](https://github.com/paritytech/parity-bridges-common) repository clone;
-- copy fresh `substrate-relay` binary, built in previous point, to the `~/local_bridge_testing/bin/substrate-relay`;
+- copy the `substrate-relay` binary, built in the previous step, to `~/local_bridge_testing/bin/substrate-relay`;
-- change the `POLKADOT_SDK_PATH` and `ZOMBIENET_BINARY_PATH` (and ensure that the nearby variables
-have correct values) in the `./run-tests.sh`.
+After that, any test can be run using the `run-test.sh` command.
+Example: `./run-new-test.sh 0001-asset-transfer`
-After that, you could run tests with the `./run-tests.sh` command. Hopefully, it'll show the
+Hopefully, it'll show the
"All tests have completed successfully" message in the end. Otherwise, it'll print paths to zombienet
process logs, which, in turn, may be used to track locations of all spinned relay and parachain nodes.
diff --git a/bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-required-headers-synced-when-active.js
similarity index 94%
rename from bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js
rename to bridges/testing/framework/js-helpers/only-required-headers-synced-when-active.js
index 8c3130e4fd96..61738a21e38e 100644
--- a/bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js
+++ b/bridges/testing/framework/js-helpers/only-required-headers-synced-when-active.js
@@ -65,8 +65,12 @@ async function run(nodeName, networkInfo, args) {
// wait until we have received + delivered messages OR until timeout
await utils.pollUntil(
exitAfterSeconds,
- () => { return atLeastOneMessageReceived && atLeastOneMessageDelivered; },
- () => { unsubscribe(); },
+ () => {
+ return atLeastOneMessageReceived && atLeastOneMessageDelivered;
+ },
+ () => {
+ unsubscribe();
+ },
() => {
if (!atLeastOneMessageReceived) {
throw new Error("No messages received from bridged chain");
@@ -78,4 +82,4 @@ async function run(nodeName, networkInfo, args) {
);
}
-module.exports = { run }
+module.exports = {run}
diff --git a/bridges/testing/run-new-test.sh b/bridges/testing/run-test.sh
similarity index 100%
rename from bridges/testing/run-new-test.sh
rename to bridges/testing/run-test.sh
diff --git a/bridges/testing/run-tests.sh b/bridges/testing/run-tests.sh
deleted file mode 100755
index fd12b57f5334..000000000000
--- a/bridges/testing/run-tests.sh
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/bin/bash
-set -x
-shopt -s nullglob
-
-trap "trap - SIGINT SIGTERM EXIT && killall -q -9 substrate-relay && kill -- -$$" SIGINT SIGTERM EXIT
-
-# run tests in range [TESTS_BEGIN; TESTS_END)
-TESTS_BEGIN=1
-TESTS_END=1000
-# whether to use paths for zombienet+bridges tests container or for local testing
-ZOMBIENET_DOCKER_PATHS=0
-while [ $# -ne 0 ]
-do
- arg="$1"
- case "$arg" in
- --docker)
- ZOMBIENET_DOCKER_PATHS=1
- ;;
- --test)
- shift
- TESTS_BEGIN="$1"
- TESTS_END="$1"
- ;;
- esac
- shift
-done
-
-# assuming that we'll be using native provide && all processes will be executing locally
-# (we need absolute paths here, because they're used when scripts are called by zombienet from tmp folders)
-export POLKADOT_SDK_PATH=`realpath $(dirname "$0")/../..`
-export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_PATH/bridges/testing/tests
-
-# set path to binaries
-if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then
- export POLKADOT_BINARY=/usr/local/bin/polkadot
- export POLKADOT_PARACHAIN_BINARY=/usr/local/bin/polkadot-parachain
-
- export SUBSTRATE_RELAY_BINARY=/usr/local/bin/substrate-relay
- export ZOMBIENET_BINARY_PATH=/usr/local/bin/zombie
-else
- export POLKADOT_BINARY=$POLKADOT_SDK_PATH/target/release/polkadot
- export POLKADOT_PARACHAIN_BINARY=$POLKADOT_SDK_PATH/target/release/polkadot-parachain
-
- export SUBSTRATE_RELAY_BINARY=~/local_bridge_testing/bin/substrate-relay
- export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux
-fi
-
-# check if `wait` supports -p flag
-if [ `printf "$BASH_VERSION\n5.1" | sort -V | head -n 1` = "5.1" ]; then IS_BASH_5_1=1; else IS_BASH_5_1=0; fi
-
-# bridge configuration
-export LANE_ID="00000002"
-
-# tests configuration
-ALL_TESTS_FOLDER=`mktemp -d /tmp/bridges-zombienet-tests.XXXXX`
-
-function start_coproc() {
- local command=$1
- local name=$2
- local logname=`basename $name`
- local coproc_log=`mktemp -p $TEST_FOLDER $logname.XXXXX`
- coproc COPROC {
- # otherwise zombienet uses some hardcoded paths
- unset RUN_IN_CONTAINER
- unset ZOMBIENET_IMAGE
-
- $command >$coproc_log 2>&1
- }
- TEST_COPROCS[$COPROC_PID, 0]=$name
- TEST_COPROCS[$COPROC_PID, 1]=$coproc_log
- echo "Spawned $name coprocess. StdOut + StdErr: $coproc_log"
-
- return $COPROC_PID
-}
-
-# execute every test from tests folder
-TEST_INDEX=$TESTS_BEGIN
-while true
-do
- declare -A TEST_COPROCS
- TEST_COPROCS_COUNT=0
- TEST_PREFIX=$(printf "%04d" $TEST_INDEX)
-
- # it'll be used by the `sync-exit.sh` script
- export TEST_FOLDER=`mktemp -d -p $ALL_TESTS_FOLDER test-$TEST_PREFIX.XXXXX`
-
- # check if there are no more tests
- zndsl_files=($BRIDGE_TESTS_FOLDER/$TEST_PREFIX-*.zndsl)
- if [ ${#zndsl_files[@]} -eq 0 ]; then
- break
- fi
-
- # start tests
- for zndsl_file in "${zndsl_files[@]}"; do
- start_coproc "$ZOMBIENET_BINARY_PATH --provider native test $zndsl_file" "$zndsl_file"
- echo -n "1">>$TEST_FOLDER/exit-sync
- ((TEST_COPROCS_COUNT++))
- done
- # wait until all tests are completed
- for n in `seq 1 $TEST_COPROCS_COUNT`; do
- if [ "$IS_BASH_5_1" -eq 1 ]; then
- wait -n -p COPROC_PID
- exit_code=$?
- coproc_name=${TEST_COPROCS[$COPROC_PID, 0]}
- coproc_log=${TEST_COPROCS[$COPROC_PID, 1]}
- coproc_stdout=$(cat $coproc_log)
- else
- wait -n
- exit_code=$?
- coproc_name=""
- coproc_stdout=""
- fi
- echo "Process $coproc_name has finished with exit code: $exit_code"
-
- # if exit code is not zero, exit
- if [ $exit_code -ne 0 ]; then
- echo "====================================================================="
- echo "=== Shutting down. Log of failed process below ==="
- echo "====================================================================="
- echo "$coproc_stdout"
-
- exit 1
- fi
- done
-
- # proceed to next index
- ((TEST_INDEX++))
- if [ "$TEST_INDEX" -ge "$TESTS_END" ]; then
- break
- fi
-
- # kill relay here - it is started manually by tests
- killall substrate-relay
-done
-
-echo "====================================================================="
-echo "=== All tests have completed successfully ==="
-echo "====================================================================="
diff --git a/bridges/testing/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl b/bridges/testing/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
deleted file mode 100644
index 07b91481dc7c..000000000000
--- a/bridges/testing/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
+++ /dev/null
@@ -1,26 +0,0 @@
-Description: While relayer is active, we only sync mandatory and required Rococo (and Rococo BH) headers to Westend BH.
-Network: ../environments/rococo-westend/bridge_hub_westend_local_network.toml
-Creds: config
-
-# step 1: initialize Westend AH
-asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 60 seconds
-
-# step 2: initialize Westend bridge hub
-bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds
-
-# step 3: ensure that initialization has completed
-asset-hub-westend-collator1: js-script ../js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds
-
-# step 4: send message from Westend to Rococo
-asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds
-
-# step 5: start relayer
-# (we are starting it after sending the message to be sure that relayer won't relay messages before our js script
-# will be started at step 6)
-# (it is started by sibling 0003-required-headers-synced-while-active-westend-to-rococo.zndsl)
-
-# step 6: ensure that relayer won't sync any extra headers while delivering messages and confirmations
-bridge-hub-westend-collator1: js-script ../js-helpers/only-required-headers-synced-when-active.js with "500,rococo-at-westend" within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/testing/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl b/bridges/testing/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
deleted file mode 100644
index a6b11fc24052..000000000000
--- a/bridges/testing/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
+++ /dev/null
@@ -1,26 +0,0 @@
-Description: While relayer is active, we only sync mandatory and required Westend (and Westend BH) headers to Rococo BH.
-Network: ../environments/rococo-westend/bridge_hub_rococo_local_network.toml
-Creds: config
-
-# step 1: initialize Rococo AH
-asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 60 seconds
-
-# step 2: initialize Rococo bridge hub
-bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds
-
-# step 3: ensure that initialization has completed
-asset-hub-rococo-collator1: js-script ../js-helpers/wait-hrmp-channel-opened.js with "1013" within 600 seconds
-
-# step 4: send message from Rococo to Westend
-asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds
-
-# step 5: start relayer
-# (we are starting it after sending the message to be sure that relayer won't relay messages before our js script
-# will be started at step 6)
-bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds
-
-# step 6: ensure that relayer won't sync any extra headers while delivering messages and confirmations
-bridge-hub-rococo-collator1: js-script ../js-helpers/only-required-headers-synced-when-active.js with "500,westend-at-rococo" within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/testing/tests/0003-required-headers-synced-while-active/rococo-to-westend.zndsl b/bridges/testing/tests/0003-required-headers-synced-while-active/rococo-to-westend.zndsl
new file mode 100644
index 000000000000..897b79eeff23
--- /dev/null
+++ b/bridges/testing/tests/0003-required-headers-synced-while-active/rococo-to-westend.zndsl
@@ -0,0 +1,7 @@
+Description: While relayer is active, we only sync mandatory and required Rococo (and Rococo BH) headers to Westend BH.
+Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml
+Creds: config
+
+# ensure that relayer won't sync any extra headers while delivering messages and confirmations
+bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-required-headers-synced-when-active.js with "500,rococo-at-westend" within 600 seconds
+
diff --git a/bridges/testing/tests/0003-required-headers-synced-while-active/run.sh b/bridges/testing/tests/0003-required-headers-synced-while-active/run.sh
new file mode 100755
index 000000000000..8fad38f22052
--- /dev/null
+++ b/bridges/testing/tests/0003-required-headers-synced-while-active/run.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+set -e
+
+# TODO: This test doesn't work. It was added at a time when we couldn't run it because we didn't have the scafolding.
+# It needs to be fixed. For the moment we keep it in the repo as it is since the idea has value.
+# But we don't run it in the CI.
+
+source "${BASH_SOURCE%/*}/../../framework/utils/common.sh"
+source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh"
+
+export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend`
+
+logs_dir=$TEST_DIR/logs
+
+$ENV_PATH/spawn.sh --init &
+env_pid=$!
+
+ensure_process_file $env_pid $TEST_DIR/rococo.env 600
+rococo_dir=`cat $TEST_DIR/rococo.env`
+echo
+
+ensure_process_file $env_pid $TEST_DIR/westend.env 300
+westend_dir=`cat $TEST_DIR/westend.env`
+echo
+
+echo "Sending message from Rococo to Westend"
+$ENV_PATH/helper.sh auto-log reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000
+echo
+
+echo "Sending message from Westend to Rococo"
+$ENV_PATH/helper.sh auto-log reserve-transfer-assets-from-asset-hub-westend-local 5000000000000
+echo
+
+
+# Start the relayer with a 30s delay
+# We want to be sure that the messages won't be relayed before starting the js script in `rococo-to-westend.zndsl`
+start_relayer_log=$logs_dir/start_relayer.log
+echo -e "The rococo-westend relayer will be started in 30s. Logs will be available at: $start_relayer_log\n"
+(sleep 30 && $ENV_PATH/start_relayer.sh \
+ $rococo_dir $westend_dir finality_relayer_pid parachains_relayer_pid messages_relayer_pid > $start_relayer_log)&
+
+run_zndsl ${BASH_SOURCE%/*}/rococo-to-westend.zndsl $westend_dir
+
diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs
index a7b2eb19de88..564d7b58c94d 100644
--- a/cumulus/client/cli/src/lib.rs
+++ b/cumulus/client/cli/src/lib.rs
@@ -21,13 +21,13 @@
use std::{
fs,
io::{self, Write},
- net::SocketAddr,
path::PathBuf,
sync::Arc,
};
use codec::Encode;
use sc_chain_spec::ChainSpec;
+use sc_cli::RpcEndpoint;
use sc_client_api::HeaderBackend;
use sc_service::{
config::{PrometheusConfig, RpcBatchRequestConfig, TelemetryEndpoints},
@@ -423,7 +423,7 @@ impl sc_cli::CliConfiguration for NormalizedRunCmd {
self.base.rpc_cors(is_dev)
}
- fn rpc_addr(&self, default_listen_port: u16) -> sc_cli::Result> {
+ fn rpc_addr(&self, default_listen_port: u16) -> sc_cli::Result >> {
self.base.rpc_addr(default_listen_port)
}
diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml
index 01e07cb395a9..47e2d8572c3f 100644
--- a/cumulus/client/consensus/aura/Cargo.toml
+++ b/cumulus/client/consensus/aura/Cargo.toml
@@ -54,3 +54,7 @@ polkadot-primitives = { workspace = true, default-features = true }
polkadot-node-primitives = { workspace = true, default-features = true }
polkadot-node-subsystem = { workspace = true, default-features = true }
polkadot-overseer = { workspace = true, default-features = true }
+
+[features]
+# Allows collator to use full PoV size for block building
+full-pov-size = []
diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs
index 0f9583cd0eb0..d843483b79fa 100644
--- a/cumulus/client/consensus/aura/src/collators/basic.rs
+++ b/cumulus/client/consensus/aura/src/collators/basic.rs
@@ -237,6 +237,16 @@ where
.await
);
+ let allowed_pov_size = if cfg!(feature = "full-pov-size") {
+ validation_data.max_pov_size
+ } else {
+ // Set the block limit to 50% of the maximum PoV size.
+ //
+ // TODO: If we got benchmarking that includes the proof size,
+ // we should be able to use the maximum pov size.
+ validation_data.max_pov_size / 2
+ } as usize;
+
let maybe_collation = try_request!(
collator
.collate(
@@ -245,11 +255,7 @@ where
None,
(parachain_inherent_data, other_inherent_data),
params.authoring_duration,
- // Set the block limit to 50% of the maximum PoV size.
- //
- // TODO: If we got benchmarking that includes the proof size,
- // we should be able to use the maximum pov size.
- (validation_data.max_pov_size / 2) as usize,
+ allowed_pov_size,
)
.await
);
diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs
index 02d60538a732..0be1e0a23ca5 100644
--- a/cumulus/client/consensus/aura/src/collators/lookahead.rs
+++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs
@@ -412,6 +412,16 @@ where
)
.await;
+ let allowed_pov_size = if cfg!(feature = "full-pov-size") {
+ validation_data.max_pov_size
+ } else {
+ // Set the block limit to 50% of the maximum PoV size.
+ //
+ // TODO: If we got benchmarking that includes the proof size,
+ // we should be able to use the maximum pov size.
+ validation_data.max_pov_size / 2
+ } as usize;
+
match collator
.collate(
&parent_header,
@@ -419,11 +429,7 @@ where
None,
(parachain_inherent_data, other_inherent_data),
params.authoring_duration,
- // Set the block limit to 50% of the maximum PoV size.
- //
- // TODO: If we got benchmarking that includes the proof size,
- // we should be able to use the maximum pov size.
- (validation_data.max_pov_size / 2) as usize,
+ allowed_pov_size,
)
.await
{
diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
index 1fbc0689da86..b70cfe3841b7 100644
--- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
+++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
@@ -350,6 +350,16 @@ where
)
.await;
+ let allowed_pov_size = if cfg!(feature = "full-pov-size") {
+ validation_data.max_pov_size
+ } else {
+ // Set the block limit to 50% of the maximum PoV size.
+ //
+ // TODO: If we got benchmarking that includes the proof size,
+ // we should be able to use the maximum pov size.
+ validation_data.max_pov_size / 2
+ } as usize;
+
let Ok(Some(candidate)) = collator
.build_block_and_import(
&parent_header,
@@ -357,11 +367,7 @@ where
None,
(parachain_inherent_data, other_inherent_data),
authoring_duration,
- // Set the block limit to 50% of the maximum PoV size.
- //
- // TODO: If we got benchmarking that includes the proof size,
- // we should be able to use the maximum pov size.
- (validation_data.max_pov_size / 2) as usize,
+ allowed_pov_size,
)
.await
else {
diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml
index ce91d48bf589..bb760ae03f4d 100644
--- a/cumulus/client/consensus/proposer/Cargo.toml
+++ b/cumulus/client/consensus/proposer/Cargo.toml
@@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
workspace = true
[dependencies]
-anyhow = { workspace = true }
+anyhow = { workspace = true, default-features = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs
index 17dc1a552c2d..9d7e62af3c68 100644
--- a/cumulus/pallets/collator-selection/src/lib.rs
+++ b/cumulus/pallets/collator-selection/src/lib.rs
@@ -972,7 +972,7 @@ pub mod pallet {
let result = Self::assemble_collators();
frame_system::Pallet::::register_extra_weight_unchecked(
- T::WeightInfo::new_session(candidates_len_before, removed),
+ T::WeightInfo::new_session(removed, candidates_len_before),
DispatchClass::Mandatory,
);
Some(result)
diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index 9e0a68d09a14..bf136dc0644c 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -53,9 +53,6 @@ use polkadot_runtime_parachains::FeeTracker;
use scale_info::TypeInfo;
use sp_runtime::{
traits::{Block as BlockT, BlockNumberProvider, Hash},
- transaction_validity::{
- InvalidTransaction, TransactionSource, TransactionValidity, ValidTransaction,
- },
BoundedSlice, FixedU128, RuntimeDebug, Saturating,
};
use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm};
@@ -193,7 +190,7 @@ pub mod ump_constants {
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
- use frame_system::{pallet_prelude::*, WeightInfo as SystemWeightInfo};
+ use frame_system::pallet_prelude::*;
#[pallet::pallet]
#[pallet::storage_version(migration::STORAGE_VERSION)]
@@ -653,52 +650,8 @@ pub mod pallet {
Ok(())
}
- /// Authorize an upgrade to a given `code_hash` for the runtime. The runtime can be supplied
- /// later.
- ///
- /// The `check_version` parameter sets a boolean flag for whether or not the runtime's spec
- /// version and name should be verified on upgrade. Since the authorization only has a hash,
- /// it cannot actually perform the verification.
- ///
- /// This call requires Root origin.
- #[pallet::call_index(2)]
- #[pallet::weight(::SystemWeightInfo::authorize_upgrade())]
- #[allow(deprecated)]
- #[deprecated(
- note = "To be removed after June 2024. Migrate to `frame_system::authorize_upgrade`."
- )]
- pub fn authorize_upgrade(
- origin: OriginFor,
- code_hash: T::Hash,
- check_version: bool,
- ) -> DispatchResult {
- ensure_root(origin)?;
- frame_system::Pallet::::do_authorize_upgrade(code_hash, check_version);
- Ok(())
- }
-
- /// Provide the preimage (runtime binary) `code` for an upgrade that has been authorized.
- ///
- /// If the authorization required a version check, this call will ensure the spec name
- /// remains unchanged and that the spec version has increased.
- ///
- /// Note that this function will not apply the new `code`, but only attempt to schedule the
- /// upgrade with the Relay Chain.
- ///
- /// All origins are allowed.
- #[pallet::call_index(3)]
- #[pallet::weight(::SystemWeightInfo::apply_authorized_upgrade())]
- #[allow(deprecated)]
- #[deprecated(
- note = "To be removed after June 2024. Migrate to `frame_system::apply_authorized_upgrade`."
- )]
- pub fn enact_authorized_upgrade(
- _: OriginFor,
- code: Vec,
- ) -> DispatchResultWithPostInfo {
- let post = frame_system::Pallet::::do_apply_authorize_upgrade(code)?;
- Ok(post)
- }
+ // WARNING: call indices 2 and 3 were used in a former version of this pallet. Using them
+ // again will require to bump the transaction version of runtimes using this pallet.
}
#[pallet::event]
@@ -951,30 +904,6 @@ pub mod pallet {
sp_io::storage::set(b":c", &[]);
}
}
-
- #[pallet::validate_unsigned]
- impl sp_runtime::traits::ValidateUnsigned for Pallet {
- type Call = Call;
-
- fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity {
- if let Call::enact_authorized_upgrade { ref code } = call {
- if let Ok(hash) = frame_system::Pallet::::validate_authorized_upgrade(&code[..])
- {
- return Ok(ValidTransaction {
- priority: 100,
- requires: Vec::new(),
- provides: vec![hash.as_ref().to_vec()],
- longevity: TransactionLongevity::max_value(),
- propagate: true,
- })
- }
- }
- if let Call::set_validation_data { .. } = call {
- return Ok(Default::default())
- }
- Err(InvalidTransaction::Call.into())
- }
- }
}
impl Pallet {
diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs
index 7bea72224b8b..b4d118aadf04 100644
--- a/cumulus/pallets/parachain-system/src/mock.rs
+++ b/cumulus/pallets/parachain-system/src/mock.rs
@@ -49,9 +49,9 @@ type Block = frame_system::mocking::MockBlock;
frame_support::construct_runtime!(
pub enum Test {
- System: frame_system::{Pallet, Call, Config, Storage, Event},
- ParachainSystem: parachain_system::{Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned},
- MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event},
+ System: frame_system,
+ ParachainSystem: parachain_system,
+ MessageQueue: pallet_message_queue,
}
);
diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs
index 51c6e83c1131..548231966e42 100755
--- a/cumulus/pallets/parachain-system/src/tests.rs
+++ b/cumulus/pallets/parachain-system/src/tests.rs
@@ -1127,10 +1127,8 @@ fn upgrade_version_checks_should_work() {
let new_code = vec![1, 2, 3, 4];
let new_code_hash = H256(sp_crypto_hashing::blake2_256(&new_code));
- #[allow(deprecated)]
- let _authorize = ParachainSystem::authorize_upgrade(RawOrigin::Root.into(), new_code_hash, true);
- #[allow(deprecated)]
- let res = ParachainSystem::enact_authorized_upgrade(RawOrigin::None.into(), new_code);
+ let _authorize = System::authorize_upgrade(RawOrigin::Root.into(), new_code_hash);
+ let res = System::apply_authorized_upgrade(RawOrigin::None.into(), new_code);
assert_eq!(expected.map_err(DispatchErrorWithPostInfo::from), res);
});
diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs
index 7fb96de7a4ea..348939de1f14 100644
--- a/cumulus/pallets/xcmp-queue/src/mock.rs
+++ b/cumulus/pallets/xcmp-queue/src/mock.rs
@@ -45,7 +45,7 @@ frame_support::construct_runtime!(
System: frame_system::{Pallet, Call, Config, Storage, Event},
Balances: pallet_balances::{Pallet, Call, Storage, Config, Event},
ParachainSystem: cumulus_pallet_parachain_system::{
- Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned,
+ Pallet, Call, Config, Storage, Inherent, Event,
},
XcmpQueue: xcmp_queue::{Pallet, Call, Storage, Event},
}
diff --git a/cumulus/parachains/chain-specs/coretime-polkadot.json b/cumulus/parachains/chain-specs/coretime-polkadot.json
index 73e104b38290..806231db7646 100644
--- a/cumulus/parachains/chain-specs/coretime-polkadot.json
+++ b/cumulus/parachains/chain-specs/coretime-polkadot.json
@@ -8,7 +8,9 @@
"/dns/polkadot-coretime-connect-a-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKjnixAHbKMsPTJwGx8SrBeGEJLHA8KmKcEDYMp3YmWgR",
"/dns/polkadot-coretime-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWQ7B7p4DFv1jWqaKfhrZBcMmi5g8bWFnmskguLaGEmT6n",
"/dns4/coretime-polkadot.boot.stake.plus/tcp/30332/wss/p2p/12D3KooWFJ2yBTKFKYwgKUjfY3F7XfaxHV8hY6fbJu5oMkpP7wZ9",
- "/dns4/coretime-polkadot.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWCy5pToLafcQzPHn5kadxAftmF6Eh8ZJGPXhSeXSUDfjv"
+ "/dns4/coretime-polkadot.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWCy5pToLafcQzPHn5kadxAftmF6Eh8ZJGPXhSeXSUDfjv",
+ "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9",
+ "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/30361/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9"
],
"telemetryEndpoints": null,
"protocolId": null,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
index 98df41090a40..0143c09036d2 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
@@ -258,4 +258,4 @@ metadata-hash = ["substrate-wasm-builder/metadata-hash"]
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"]
+on-chain-release-build = ["metadata-hash"]
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
index 2f244a07e8f1..77130ff846b5 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
@@ -262,4 +262,4 @@ metadata-hash = ["substrate-wasm-builder/metadata-hash"]
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"]
+on-chain-release-build = ["metadata-hash"]
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
index f85861bdaa54..6d0fbd7d5c66 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
@@ -305,4 +305,4 @@ fast-runtime = []
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["sp-api/disable-logging"]
+on-chain-release-build = []
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs
index 5522a325f192..d0a7ed25363d 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_rococo_bulletin.rs
@@ -17,7 +17,7 @@
//! Autogenerated weights for `pallet_bridge_messages`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-07-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
@@ -60,8 +60,8 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `654`
// Estimated: `52645`
- // Minimum execution time: 37_206_000 picoseconds.
- Weight::from_parts(38_545_000, 0)
+ // Minimum execution time: 36_836_000 picoseconds.
+ Weight::from_parts(37_858_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
.saturating_add(T::DbWeight::get().reads(4))
.saturating_add(T::DbWeight::get().writes(1))
@@ -80,11 +80,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `654`
// Estimated: `52645`
- // Minimum execution time: 37_075_000 picoseconds.
- Weight::from_parts(37_757_000, 0)
+ // Minimum execution time: 36_587_000 picoseconds.
+ Weight::from_parts(37_516_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 5_776
- .saturating_add(Weight::from_parts(11_586_768, 0).saturating_mul(n.into()))
+ // Standard Error: 8_655
+ .saturating_add(Weight::from_parts(11_649_169, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(4))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -100,8 +100,8 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `654`
// Estimated: `52645`
- // Minimum execution time: 42_087_000 picoseconds.
- Weight::from_parts(42_970_000, 0)
+ // Minimum execution time: 42_157_000 picoseconds.
+ Weight::from_parts(43_105_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
.saturating_add(T::DbWeight::get().reads(4))
.saturating_add(T::DbWeight::get().writes(1))
@@ -120,11 +120,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `654`
// Estimated: `52645`
- // Minimum execution time: 35_055_000 picoseconds.
- Weight::from_parts(36_987_740, 0)
+ // Minimum execution time: 35_536_000 picoseconds.
+ Weight::from_parts(37_452_828, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 4
- .saturating_add(Weight::from_parts(2_316, 0).saturating_mul(n.into()))
+ // Standard Error: 3
+ .saturating_add(Weight::from_parts(2_269, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(4))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -134,15 +134,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`)
/// Storage: `BridgePolkadotBulletinMessages::OutboundLanes` (r:1 w:1)
/// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`)
+ /// Storage: `BridgePolkadotBulletinMessages::OutboundMessages` (r:0 w:1)
+ /// Proof: `BridgePolkadotBulletinMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_single_message() -> Weight {
// Proof Size summary in bytes:
// Measured: `621`
// Estimated: `2543`
- // Minimum execution time: 24_326_000 picoseconds.
- Weight::from_parts(25_169_000, 0)
+ // Minimum execution time: 25_800_000 picoseconds.
+ Weight::from_parts(26_666_000, 0)
.saturating_add(Weight::from_parts(0, 2543))
.saturating_add(T::DbWeight::get().reads(3))
- .saturating_add(T::DbWeight::get().writes(1))
+ .saturating_add(T::DbWeight::get().writes(2))
}
/// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -150,15 +152,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`)
/// Storage: `BridgePolkadotBulletinMessages::OutboundLanes` (r:1 w:1)
/// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`)
+ /// Storage: `BridgePolkadotBulletinMessages::OutboundMessages` (r:0 w:2)
+ /// Proof: `BridgePolkadotBulletinMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
// Proof Size summary in bytes:
// Measured: `621`
// Estimated: `2543`
- // Minimum execution time: 24_484_000 picoseconds.
- Weight::from_parts(25_130_000, 0)
+ // Minimum execution time: 27_262_000 picoseconds.
+ Weight::from_parts(27_997_000, 0)
.saturating_add(Weight::from_parts(0, 2543))
.saturating_add(T::DbWeight::get().reads(3))
- .saturating_add(T::DbWeight::get().writes(1))
+ .saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -166,15 +170,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: `BridgePolkadotBulletinGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`)
/// Storage: `BridgePolkadotBulletinMessages::OutboundLanes` (r:1 w:1)
/// Proof: `BridgePolkadotBulletinMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`)
+ /// Storage: `BridgePolkadotBulletinMessages::OutboundMessages` (r:0 w:2)
+ /// Proof: `BridgePolkadotBulletinMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
// Proof Size summary in bytes:
// Measured: `621`
// Estimated: `2543`
- // Minimum execution time: 24_450_000 picoseconds.
- Weight::from_parts(25_164_000, 0)
+ // Minimum execution time: 26_992_000 picoseconds.
+ Weight::from_parts(27_921_000, 0)
.saturating_add(Weight::from_parts(0, 2543))
.saturating_add(T::DbWeight::get().reads(3))
- .saturating_add(T::DbWeight::get().writes(1))
+ .saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `BridgePolkadotBulletinMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgePolkadotBulletinMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -204,11 +210,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `813`
// Estimated: `52645`
- // Minimum execution time: 54_317_000 picoseconds.
- Weight::from_parts(59_171_547, 0)
+ // Minimum execution time: 55_509_000 picoseconds.
+ Weight::from_parts(59_826_763, 0)
.saturating_add(Weight::from_parts(0, 52645))
// Standard Error: 7
- .saturating_add(Weight::from_parts(7_566, 0).saturating_mul(n.into()))
+ .saturating_add(Weight::from_parts(7_565, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(10))
.saturating_add(T::DbWeight::get().writes(4))
}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs
index 9c05dae979da..dc6c917c6d00 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs
@@ -17,7 +17,7 @@
//! Autogenerated weights for `pallet_bridge_messages`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-07-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
@@ -62,8 +62,8 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `658`
// Estimated: `52645`
- // Minimum execution time: 41_396_000 picoseconds.
- Weight::from_parts(43_141_000, 0)
+ // Minimum execution time: 40_198_000 picoseconds.
+ Weight::from_parts(42_079_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
@@ -84,11 +84,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `658`
// Estimated: `52645`
- // Minimum execution time: 41_095_000 picoseconds.
- Weight::from_parts(42_030_000, 0)
+ // Minimum execution time: 39_990_000 picoseconds.
+ Weight::from_parts(41_381_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 5_702
- .saturating_add(Weight::from_parts(11_627_951, 0).saturating_mul(n.into()))
+ // Standard Error: 8_459
+ .saturating_add(Weight::from_parts(11_710_167, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -106,8 +106,8 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `658`
// Estimated: `52645`
- // Minimum execution time: 45_912_000 picoseconds.
- Weight::from_parts(47_564_000, 0)
+ // Minimum execution time: 45_940_000 picoseconds.
+ Weight::from_parts(47_753_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
@@ -128,11 +128,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `658`
// Estimated: `52645`
- // Minimum execution time: 39_175_000 picoseconds.
- Weight::from_parts(41_674_095, 0)
+ // Minimum execution time: 39_067_000 picoseconds.
+ Weight::from_parts(41_787_019, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 4
- .saturating_add(Weight::from_parts(2_305, 0).saturating_mul(n.into()))
+ // Standard Error: 5
+ .saturating_add(Weight::from_parts(2_295, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -146,15 +146,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0)
/// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1)
/// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+ /// Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1)
+ /// Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_single_message() -> Weight {
// Proof Size summary in bytes:
// Measured: `501`
// Estimated: `3966`
- // Minimum execution time: 32_033_000 picoseconds.
- Weight::from_parts(33_131_000, 0)
+ // Minimum execution time: 33_107_000 picoseconds.
+ Weight::from_parts(34_364_000, 0)
.saturating_add(Weight::from_parts(0, 3966))
.saturating_add(T::DbWeight::get().reads(5))
- .saturating_add(T::DbWeight::get().writes(2))
+ .saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -166,15 +168,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0)
/// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1)
/// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+ /// Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:2)
+ /// Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
// Proof Size summary in bytes:
// Measured: `501`
// Estimated: `3966`
- // Minimum execution time: 32_153_000 picoseconds.
- Weight::from_parts(33_126_000, 0)
+ // Minimum execution time: 34_826_000 picoseconds.
+ Weight::from_parts(35_563_000, 0)
.saturating_add(Weight::from_parts(0, 3966))
.saturating_add(T::DbWeight::get().reads(5))
- .saturating_add(T::DbWeight::get().writes(2))
+ .saturating_add(T::DbWeight::get().writes(4))
}
/// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -186,15 +190,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0)
/// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2)
/// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+ /// Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:2)
+ /// Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
// Proof Size summary in bytes:
// Measured: `501`
// Estimated: `6086`
- // Minimum execution time: 36_387_000 picoseconds.
- Weight::from_parts(37_396_000, 0)
+ // Minimum execution time: 38_725_000 picoseconds.
+ Weight::from_parts(39_727_000, 0)
.saturating_add(Weight::from_parts(0, 6086))
.saturating_add(T::DbWeight::get().reads(6))
- .saturating_add(T::DbWeight::get().writes(3))
+ .saturating_add(T::DbWeight::get().writes(5))
}
/// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -224,11 +230,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `789`
// Estimated: `52645`
- // Minimum execution time: 56_562_000 picoseconds.
- Weight::from_parts(61_452_871, 0)
+ // Minimum execution time: 56_892_000 picoseconds.
+ Weight::from_parts(61_941_659, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 9
- .saturating_add(Weight::from_parts(7_587, 0).saturating_mul(n.into()))
+ // Standard Error: 8
+ .saturating_add(Weight::from_parts(7_580, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(10))
.saturating_add(T::DbWeight::get().writes(4))
}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
index a9381501359e..1c9d8c0207b9 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
@@ -292,6 +292,6 @@ try-runtime = [
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["sp-api/disable-logging"]
+on-chain-release-build = []
fast-runtime = []
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs
index 386342d7ea5d..1033387b527e 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_messages.rs
@@ -17,7 +17,7 @@
//! Autogenerated weights for `pallet_bridge_messages`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-07-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-07-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-7wrmsoux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024
@@ -62,8 +62,8 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `522`
// Estimated: `52645`
- // Minimum execution time: 40_748_000 picoseconds.
- Weight::from_parts(41_836_000, 0)
+ // Minimum execution time: 40_289_000 picoseconds.
+ Weight::from_parts(42_150_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
@@ -83,11 +83,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `522`
// Estimated: `52645`
- // Minimum execution time: 40_923_000 picoseconds.
- Weight::from_parts(41_287_000, 0)
+ // Minimum execution time: 40_572_000 picoseconds.
+ Weight::from_parts(41_033_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 9_774
- .saturating_add(Weight::from_parts(11_469_207, 0).saturating_mul(n.into()))
+ // Standard Error: 12_000
+ .saturating_add(Weight::from_parts(11_710_588, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -105,8 +105,8 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `522`
// Estimated: `52645`
- // Minimum execution time: 45_946_000 picoseconds.
- Weight::from_parts(47_547_000, 0)
+ // Minimum execution time: 46_655_000 picoseconds.
+ Weight::from_parts(49_576_000, 0)
.saturating_add(Weight::from_parts(0, 52645))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
@@ -126,11 +126,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `522`
// Estimated: `52645`
- // Minimum execution time: 39_668_000 picoseconds.
- Weight::from_parts(41_908_980, 0)
+ // Minimum execution time: 40_245_000 picoseconds.
+ Weight::from_parts(43_461_320, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 11
- .saturating_add(Weight::from_parts(2_209, 0).saturating_mul(n.into()))
+ // Standard Error: 21
+ .saturating_add(Weight::from_parts(2_246, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -144,15 +144,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0)
/// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1)
/// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+ /// Storage: `BridgeRococoMessages::OutboundMessages` (r:0 w:1)
+ /// Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_single_message() -> Weight {
// Proof Size summary in bytes:
// Measured: `357`
// Estimated: `3822`
- // Minimum execution time: 30_544_000 picoseconds.
- Weight::from_parts(31_171_000, 0)
+ // Minimum execution time: 32_001_000 picoseconds.
+ Weight::from_parts(32_842_000, 0)
.saturating_add(Weight::from_parts(0, 3822))
.saturating_add(T::DbWeight::get().reads(5))
- .saturating_add(T::DbWeight::get().writes(2))
+ .saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -164,15 +166,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0)
/// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1)
/// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+ /// Storage: `BridgeRococoMessages::OutboundMessages` (r:0 w:2)
+ /// Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
// Proof Size summary in bytes:
// Measured: `357`
// Estimated: `3822`
- // Minimum execution time: 30_593_000 picoseconds.
- Weight::from_parts(31_261_000, 0)
+ // Minimum execution time: 33_287_000 picoseconds.
+ Weight::from_parts(33_769_000, 0)
.saturating_add(Weight::from_parts(0, 3822))
.saturating_add(T::DbWeight::get().reads(5))
- .saturating_add(T::DbWeight::get().writes(2))
+ .saturating_add(T::DbWeight::get().writes(4))
}
/// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -184,15 +188,17 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
/// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0)
/// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2)
/// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+ /// Storage: `BridgeRococoMessages::OutboundMessages` (r:0 w:2)
+ /// Proof: `BridgeRococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(65568), added: 68043, mode: `MaxEncodedLen`)
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
// Proof Size summary in bytes:
// Measured: `357`
// Estimated: `6086`
- // Minimum execution time: 34_682_000 picoseconds.
- Weight::from_parts(35_277_000, 0)
+ // Minimum execution time: 37_136_000 picoseconds.
+ Weight::from_parts(38_294_000, 0)
.saturating_add(Weight::from_parts(0, 6086))
.saturating_add(T::DbWeight::get().reads(6))
- .saturating_add(T::DbWeight::get().writes(3))
+ .saturating_add(T::DbWeight::get().writes(5))
}
/// Storage: `BridgeRococoMessages::PalletOperatingMode` (r:1 w:0)
/// Proof: `BridgeRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`)
@@ -221,11 +227,11 @@ impl pallet_bridge_messages::WeightInfo for WeightInfo<
// Proof Size summary in bytes:
// Measured: `653`
// Estimated: `52645`
- // Minimum execution time: 56_465_000 picoseconds.
- Weight::from_parts(61_575_775, 0)
+ // Minimum execution time: 55_942_000 picoseconds.
+ Weight::from_parts(60_615_769, 0)
.saturating_add(Weight::from_parts(0, 52645))
- // Standard Error: 15
- .saturating_add(Weight::from_parts(7_197, 0).saturating_mul(n.into()))
+ // Standard Error: 14
+ .saturating_add(Weight::from_parts(7_225, 0).saturating_mul(n.into()))
.saturating_add(T::DbWeight::get().reads(10))
.saturating_add(T::DbWeight::get().writes(4))
}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
index 43fc9083937c..e98508ea02e6 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
@@ -243,4 +243,4 @@ std = [
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["sp-api/disable-logging"]
+on-chain-release-build = []
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
index 1fcebb3f16a9..dfa75b8d3cf3 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
@@ -203,4 +203,4 @@ try-runtime = [
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["sp-api/disable-logging"]
+on-chain-release-build = []
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
index 2920bc428d90..07d133c80be7 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
@@ -209,4 +209,4 @@ metadata-hash = ["substrate-wasm-builder/metadata-hash"]
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"]
+on-chain-release-build = ["metadata-hash"]
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
index 07a4332800d7..5029c82f971d 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
@@ -206,4 +206,4 @@ metadata-hash = ["substrate-wasm-builder/metadata-hash"]
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"]
+on-chain-release-build = ["metadata-hash"]
diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml
index d20b62a557b9..09b4ef679d24 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml
@@ -136,4 +136,4 @@ try-runtime = [
# A feature that should be enabled when the runtime should be built for on-chain
# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
# to make it smaller, like logging for example.
-on-chain-release-build = ["sp-api/disable-logging"]
+on-chain-release-build = []
diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
index a732bec2352d..c676587b1de4 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
@@ -191,3 +191,8 @@ try-runtime = [
"polkadot-runtime-common/try-runtime",
"sp-runtime/try-runtime",
]
+
+# A feature that should be enabled when the runtime should be built for on-chain
+# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
+# to make it smaller, like logging for example.
+on-chain-release-build = []
diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
index 20c7e691ebc8..ab7dd04bb78a 100644
--- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
@@ -191,3 +191,8 @@ try-runtime = [
"polkadot-runtime-common/try-runtime",
"sp-runtime/try-runtime",
]
+
+# A feature that should be enabled when the runtime should be built for on-chain
+# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
+# to make it smaller, like logging for example.
+on-chain-release-build = []
diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
index a0ad248bb704..9c905c876277 100644
--- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
+++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
@@ -134,3 +134,8 @@ runtime-benchmarks = [
"xcm-builder/runtime-benchmarks",
"xcm-executor/runtime-benchmarks",
]
+
+# A feature that should be enabled when the runtime should be built for on-chain
+# deployment. This will disable stuff that shouldn't be part of the on-chain wasm
+# to make it smaller, like logging for example.
+on-chain-release-build = []
diff --git a/cumulus/polkadot-parachain/chain-specs/contracts-rococo.json b/cumulus/polkadot-parachain/chain-specs/contracts-rococo.json
deleted file mode 120000
index b9f8e8f31e84..000000000000
--- a/cumulus/polkadot-parachain/chain-specs/contracts-rococo.json
+++ /dev/null
@@ -1 +0,0 @@
-../../parachains/chain-specs/contracts-rococo.json
\ No newline at end of file
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml b/cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml
index 09bde034cf26..066cbfae53ae 100644
--- a/cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml
@@ -38,6 +38,7 @@ sc-consensus = { workspace = true, default-features = true }
frame-support = { optional = true, workspace = true, default-features = true }
sc-cli = { workspace = true, default-features = true }
sc-client-api = { workspace = true, default-features = true }
+sc-client-db = { workspace = true, default-features = true }
sc-executor = { workspace = true, default-features = true }
sc-service = { workspace = true, default-features = true }
sc-telemetry = { workspace = true, default-features = true }
@@ -105,6 +106,7 @@ runtime-benchmarks = [
"parachains-common/runtime-benchmarks",
"polkadot-cli/runtime-benchmarks",
"polkadot-primitives/runtime-benchmarks",
+ "sc-client-db/runtime-benchmarks",
"sc-service/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs
index 2aa2b10fbb67..15d21235d1a1 100644
--- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs
@@ -25,10 +25,10 @@ use clap::{Command, CommandFactory, FromArgMatches};
use sc_chain_spec::ChainSpec;
use sc_cli::{
CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, NetworkParams,
- SharedParams, SubstrateCli,
+ RpcEndpoint, SharedParams, SubstrateCli,
};
use sc_service::{config::PrometheusConfig, BasePath};
-use std::{fmt::Debug, marker::PhantomData, net::SocketAddr, path::PathBuf};
+use std::{fmt::Debug, marker::PhantomData, path::PathBuf};
/// Trait that can be used to customize some of the customer-facing info related to the node binary
/// that is being built using this library.
@@ -36,8 +36,10 @@ use std::{fmt::Debug, marker::PhantomData, net::SocketAddr, path::PathBuf};
/// The related info is shown to the customer as part of logs or help messages.
/// It does not impact functionality.
pub trait CliConfig {
+ /// The version of the resulting node binary.
fn impl_version() -> String;
+ /// The description of the resulting node binary.
fn description(executable_name: String) -> String {
format!(
"The command-line arguments provided first will be passed to the parachain node, \n\
@@ -50,10 +52,13 @@ pub trait CliConfig {
)
}
+ /// The author of the resulting node binary.
fn author() -> String;
+ /// The support URL for the resulting node binary.
fn support_url() -> String;
+ /// The starting copyright year of the resulting node binary.
fn copyright_start_year() -> u16;
}
@@ -300,7 +305,7 @@ impl CliConfiguration for RelayChainCli {
.or_else(|| self.base_path.clone().map(Into::into)))
}
- fn rpc_addr(&self, default_listen_port: u16) -> sc_cli::Result> {
+ fn rpc_addr(&self, default_listen_port: u16) -> sc_cli::Result >> {
self.base.base.rpc_addr(default_listen_port)
}
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs
index 7f915b729e0a..320511ece5e5 100644
--- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs
@@ -14,8 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see .
-#[cfg(feature = "runtime-benchmarks")]
-use crate::service::Block;
use crate::{
cli::{Cli, RelayChainCli, Subcommand},
common::{
@@ -24,32 +22,56 @@ use crate::{
AuraConsensusId, Consensus, Runtime, RuntimeResolver as RuntimeResolverT,
RuntimeResolver,
},
- NodeExtraArgs,
- },
- fake_runtime_api::{
- asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi,
- aura::RuntimeApi as AuraRuntimeApi,
+ spec::DynNodeSpec,
+ types::Block,
+ NodeBlock, NodeExtraArgs,
},
- service::{new_aura_node_spec, DynNodeSpec, ShellNode},
+ fake_runtime_api,
+ runtime::BlockNumber,
+ service::ShellNode,
};
#[cfg(feature = "runtime-benchmarks")]
use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions;
use cumulus_primitives_core::ParaId;
use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE};
use log::info;
-use parachains_common::{AssetHubPolkadotAuraId, AuraId};
use sc_cli::{Result, SubstrateCli};
use sp_runtime::traits::AccountIdConversion;
#[cfg(feature = "runtime-benchmarks")]
use sp_runtime::traits::HashingFor;
+use std::panic::{RefUnwindSafe, UnwindSafe};
/// Structure that can be used in order to provide customizers for different functionalities of the
/// node binary that is being built using this library.
pub struct RunConfig {
+ /// A custom chain spec loader.
pub chain_spec_loader: Box,
+ /// A custom runtime resolver.
pub runtime_resolver: Box,
}
+pub fn new_aura_node_spec(
+ aura_id: AuraConsensusId,
+ extra_args: &NodeExtraArgs,
+) -> Box
+where
+ Block: NodeBlock + UnwindSafe + RefUnwindSafe,
+ Block::BoundedHeader: UnwindSafe + RefUnwindSafe,
+{
+ match aura_id {
+ AuraConsensusId::Sr25519 => crate::service::new_aura_node_spec::<
+ Block,
+ fake_runtime_api::aura_sr25519::RuntimeApi,
+ sp_consensus_aura::sr25519::AuthorityId,
+ >(extra_args),
+ AuraConsensusId::Ed25519 => crate::service::new_aura_node_spec::<
+ Block,
+ fake_runtime_api::aura_ed25519::RuntimeApi,
+ sp_consensus_aura::ed25519::AuthorityId,
+ >(extra_args),
+ }
+}
+
fn new_node_spec(
config: &sc_service::Configuration,
runtime_resolver: &Box,
@@ -59,11 +81,11 @@ fn new_node_spec(
Ok(match runtime {
Runtime::Shell => Box::new(ShellNode),
- Runtime::Omni(consensus) => match consensus {
- Consensus::Aura(AuraConsensusId::Sr25519) =>
- new_aura_node_spec::(extra_args),
- Consensus::Aura(AuraConsensusId::Ed25519) =>
- new_aura_node_spec::(extra_args),
+ Runtime::Omni(block_number, consensus) => match (block_number, consensus) {
+ (BlockNumber::U32, Consensus::Aura(aura_id)) =>
+ new_aura_node_spec::>(aura_id, extra_args),
+ (BlockNumber::U64, Consensus::Aura(aura_id)) =>
+ new_aura_node_spec::>(aura_id, extra_args),
},
})
}
@@ -156,7 +178,7 @@ pub fn run(cmd_config: RunConfig) -> Result<()
match cmd {
#[cfg(feature = "runtime-benchmarks")]
BenchmarkCmd::Pallet(cmd) => runner.sync_run(|config| {
- cmd.run_with_spec::, ReclaimHostFunctions>(Some(
+ cmd.run_with_spec::>, ReclaimHostFunctions>(Some(
config.chain_spec,
))
}),
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/command.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/command.rs
new file mode 100644
index 000000000000..e2826826d40e
--- /dev/null
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/command.rs
@@ -0,0 +1,161 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+use crate::common::spec::NodeSpec;
+use cumulus_client_cli::ExportGenesisHeadCommand;
+use frame_benchmarking_cli::BlockCmd;
+#[cfg(any(feature = "runtime-benchmarks"))]
+use frame_benchmarking_cli::StorageCmd;
+use sc_cli::{CheckBlockCmd, ExportBlocksCmd, ExportStateCmd, ImportBlocksCmd, RevertCmd};
+use sc_service::{Configuration, TaskManager};
+use std::{future::Future, pin::Pin};
+
+type SyncCmdResult = sc_cli::Result<()>;
+
+type AsyncCmdResult<'a> =
+ sc_cli::Result<(Pin + 'a>>, TaskManager)>;
+
+pub trait NodeCommandRunner {
+ fn prepare_check_block_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &CheckBlockCmd,
+ ) -> AsyncCmdResult<'_>;
+
+ fn prepare_export_blocks_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ExportBlocksCmd,
+ ) -> AsyncCmdResult<'_>;
+
+ fn prepare_export_state_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ExportStateCmd,
+ ) -> AsyncCmdResult<'_>;
+
+ fn prepare_import_blocks_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ImportBlocksCmd,
+ ) -> AsyncCmdResult<'_>;
+
+ fn prepare_revert_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &RevertCmd,
+ ) -> AsyncCmdResult<'_>;
+
+ fn run_export_genesis_head_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ExportGenesisHeadCommand,
+ ) -> SyncCmdResult;
+
+ fn run_benchmark_block_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &BlockCmd,
+ ) -> SyncCmdResult;
+
+ #[cfg(any(feature = "runtime-benchmarks"))]
+ fn run_benchmark_storage_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &StorageCmd,
+ ) -> SyncCmdResult;
+}
+
+impl NodeCommandRunner for T
+where
+ T: NodeSpec,
+{
+ fn prepare_check_block_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &CheckBlockCmd,
+ ) -> AsyncCmdResult<'_> {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager))
+ }
+
+ fn prepare_export_blocks_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ExportBlocksCmd,
+ ) -> AsyncCmdResult<'_> {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ Ok((Box::pin(cmd.run(partial.client, config.database)), partial.task_manager))
+ }
+
+ fn prepare_export_state_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ExportStateCmd,
+ ) -> AsyncCmdResult<'_> {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ Ok((Box::pin(cmd.run(partial.client, config.chain_spec)), partial.task_manager))
+ }
+
+ fn prepare_import_blocks_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ImportBlocksCmd,
+ ) -> AsyncCmdResult<'_> {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager))
+ }
+
+ fn prepare_revert_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &RevertCmd,
+ ) -> AsyncCmdResult<'_> {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ Ok((Box::pin(cmd.run(partial.client, partial.backend, None)), partial.task_manager))
+ }
+
+ fn run_export_genesis_head_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &ExportGenesisHeadCommand,
+ ) -> SyncCmdResult {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ cmd.run(partial.client)
+ }
+
+ fn run_benchmark_block_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &BlockCmd,
+ ) -> SyncCmdResult {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ cmd.run(partial.client)
+ }
+
+ #[cfg(any(feature = "runtime-benchmarks"))]
+ fn run_benchmark_storage_cmd(
+ self: Box,
+ config: Configuration,
+ cmd: &StorageCmd,
+ ) -> SyncCmdResult {
+ let partial = T::new_partial(&config).map_err(sc_cli::Error::Service)?;
+ let db = partial.backend.expose_db();
+ let storage = partial.backend.expose_storage();
+
+ cmd.run(config, partial.client, db, storage)
+ }
+}
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs
index 89bc7511dac3..907f09263fc1 100644
--- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs
@@ -20,15 +20,43 @@
pub(crate) mod aura;
pub mod chain_spec;
+pub mod command;
+pub mod rpc;
pub mod runtime;
+pub mod spec;
+pub mod types;
use cumulus_primitives_core::CollectCollationInfo;
+use sc_client_db::DbHash;
use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, Metadata};
use sp_block_builder::BlockBuilder;
-use sp_runtime::traits::Block as BlockT;
+use sp_runtime::{
+ traits::{Block as BlockT, BlockNumber, Header as HeaderT, NumberFor},
+ OpaqueExtrinsic,
+};
use sp_session::SessionKeys;
use sp_transaction_pool::runtime_api::TaggedTransactionQueue;
-use std::path::PathBuf;
+use std::{fmt::Debug, path::PathBuf, str::FromStr};
+
+pub trait NodeBlock:
+ BlockT
+ + for<'de> serde::Deserialize<'de>
+{
+ type BoundedFromStrErr: Debug;
+ type BoundedNumber: FromStr + BlockNumber;
+ type BoundedHeader: HeaderT + Unpin;
+}
+
+impl NodeBlock for T
+where
+ T: BlockT + for<'de> serde::Deserialize<'de>,
+ ::Header: Unpin,
+ as FromStr>::Err: Debug,
+{
+ type BoundedFromStrErr = as FromStr>::Err;
+ type BoundedNumber = NumberFor;
+ type BoundedHeader = ::Header;
+}
/// Convenience trait that defines the basic bounds for the `RuntimeApi` of a parachain node.
pub trait NodeRuntimeApi:
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/rpc.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/rpc.rs
similarity index 59%
rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/rpc.rs
rename to cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/rpc.rs
index 283a73d931d7..a4e157e87216 100644
--- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/rpc.rs
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/rpc.rs
@@ -18,16 +18,13 @@
#![warn(missing_docs)]
-use crate::{
- common::ConstructNodeRuntimeApi,
- service::{ParachainBackend, ParachainClient},
+use crate::common::{
+ types::{AccountId, Balance, Nonce, ParachainBackend, ParachainClient},
+ ConstructNodeRuntimeApi,
};
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
-use parachains_common::{AccountId, Balance, Block, Nonce};
-use sc_rpc::{
- dev::{Dev, DevApiServer},
- DenyUnsafe,
-};
+use sc_rpc::dev::{Dev, DevApiServer};
+use sp_runtime::traits::Block as BlockT;
use std::{marker::PhantomData, sync::Arc};
use substrate_frame_rpc_system::{System, SystemApiServer};
use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};
@@ -37,60 +34,59 @@ pub type RpcExtension = jsonrpsee::RpcModule<()>;
pub(crate) trait BuildRpcExtensions {
fn build_rpc_extensions(
- deny_unsafe: DenyUnsafe,
client: Arc,
backend: Arc,
pool: Arc,
) -> sc_service::error::Result;
}
-pub(crate) struct BuildEmptyRpcExtensions(PhantomData);
+pub(crate) struct BuildEmptyRpcExtensions(PhantomData<(Block, RuntimeApi)>);
-impl
+impl
BuildRpcExtensions<
- ParachainClient,
- ParachainBackend,
- sc_transaction_pool::FullPool>,
- > for BuildEmptyRpcExtensions
+ ParachainClient,
+ ParachainBackend,
+ sc_transaction_pool::FullPool>,
+ > for BuildEmptyRpcExtensions
where
- RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static,
+ RuntimeApi:
+ ConstructNodeRuntimeApi> + Send + Sync + 'static,
{
fn build_rpc_extensions(
- _deny_unsafe: DenyUnsafe,
- _client: Arc>,
- _backend: Arc,
- _pool: Arc>>,
+ _client: Arc>,
+ _backend: Arc>,
+ _pool: Arc>>,
) -> sc_service::error::Result {
Ok(RpcExtension::new(()))
}
}
-pub(crate) struct BuildParachainRpcExtensions(PhantomData);
+pub(crate) struct BuildParachainRpcExtensions(PhantomData<(Block, RuntimeApi)>);
-impl
+impl
BuildRpcExtensions<
- ParachainClient,
- ParachainBackend,
- sc_transaction_pool::FullPool>,
- > for BuildParachainRpcExtensions
+ ParachainClient,
+ ParachainBackend,
+ sc_transaction_pool::FullPool>,
+ > for BuildParachainRpcExtensions
where
- RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static,
+ RuntimeApi:
+ ConstructNodeRuntimeApi> + Send + Sync + 'static,
RuntimeApi::RuntimeApi: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi
+ substrate_frame_rpc_system::AccountNonceApi,
{
fn build_rpc_extensions(
- deny_unsafe: DenyUnsafe,
- client: Arc>,
- backend: Arc,
- pool: Arc>>,
+ client: Arc>,
+ backend: Arc>,
+ pool: Arc>>,
) -> sc_service::error::Result {
let build = || -> Result> {
let mut module = RpcExtension::new(());
- module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
+ module.merge(System::new(client.clone(), pool).into_rpc())?;
module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
- module.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?;
- module.merge(Dev::new(client, deny_unsafe).into_rpc())?;
+ module.merge(StateMigration::new(client.clone(), backend).into_rpc())?;
+ module.merge(Dev::new(client).into_rpc())?;
Ok(module)
};
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs
index c64eda12d5ef..bddbb0a85d03 100644
--- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs
@@ -34,12 +34,21 @@ pub enum Consensus {
Aura(AuraConsensusId),
}
+/// The choice of block number for the parachain omni-node.
+#[derive(PartialEq)]
+pub enum BlockNumber {
+ /// u32
+ U32,
+ /// u64
+ U64,
+}
+
/// Helper enum listing the supported Runtime types
#[derive(PartialEq)]
pub enum Runtime {
/// None of the system-chain runtimes, rather the node will act agnostic to the runtime ie. be
/// an omni-node, and simply run a node with the given consensus algorithm.
- Omni(Consensus),
+ Omni(BlockNumber, Consensus),
/// Shell
Shell,
}
@@ -51,11 +60,11 @@ pub trait RuntimeResolver {
}
/// Default implementation for `RuntimeResolver` that just returns
-/// `Runtime::Omni(Consensus::Aura(AuraConsensusId::Sr25519))`.
+/// `Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))`.
pub struct DefaultRuntimeResolver;
impl RuntimeResolver for DefaultRuntimeResolver {
fn runtime(&self, _chain_spec: &dyn ChainSpec) -> sc_cli::Result {
- Ok(Runtime::Omni(Consensus::Aura(AuraConsensusId::Sr25519)))
+ Ok(Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519)))
}
}
diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/spec.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/spec.rs
new file mode 100644
index 000000000000..55e042aed87e
--- /dev/null
+++ b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/spec.rs
@@ -0,0 +1,392 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
+
+use crate::common::{
+ command::NodeCommandRunner,
+ rpc::BuildRpcExtensions,
+ types::{
+ ParachainBackend, ParachainBlockImport, ParachainClient, ParachainHostFunctions,
+ ParachainService,
+ },
+ ConstructNodeRuntimeApi, NodeBlock, NodeExtraArgs,
+};
+use cumulus_client_cli::CollatorOptions;
+use cumulus_client_service::{
+ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks,
+ BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, StartRelayChainTasksParams,
+};
+use cumulus_primitives_core::{BlockT, ParaId};
+use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
+use parachains_common::Hash;
+use polkadot_primitives::CollatorPair;
+use prometheus_endpoint::Registry;
+use sc_consensus::DefaultImportQueue;
+use sc_executor::{HeapAllocStrategy, DEFAULT_HEAP_ALLOC_STRATEGY};
+use sc_network::{config::FullNetworkConfiguration, NetworkBackend, NetworkBlock};
+use sc_service::{Configuration, ImportQueue, PartialComponents, TaskManager};
+use sc_sysinfo::HwBench;
+use sc_telemetry::{TelemetryHandle, TelemetryWorker};
+use sc_transaction_pool::FullPool;
+use sp_keystore::KeystorePtr;
+use std::{future::Future, pin::Pin, sync::Arc, time::Duration};
+
+pub(crate) trait BuildImportQueue {
+ fn build_import_queue(
+ client: Arc>,
+ block_import: ParachainBlockImport,
+ config: &Configuration,
+ telemetry_handle: Option,
+ task_manager: &TaskManager,
+ ) -> sc_service::error::Result>;
+}
+
+pub(crate) trait StartConsensus
+where
+ RuntimeApi: ConstructNodeRuntimeApi>,
+{
+ fn start_consensus(
+ client: Arc>,
+ block_import: ParachainBlockImport,
+ prometheus_registry: Option<&Registry>,
+ telemetry: Option,
+ task_manager: &TaskManager,
+ relay_chain_interface: Arc,
+ transaction_pool: Arc>>,
+ keystore: KeystorePtr,
+ relay_chain_slot_duration: Duration,
+ para_id: ParaId,
+ collator_key: CollatorPair,
+ overseer_handle: OverseerHandle,
+ announce_block: Arc>) + Send + Sync>,
+ backend: Arc>,
+ node_extra_args: NodeExtraArgs,
+ ) -> Result<(), sc_service::Error>;
+}
+
+/// Checks that the hardware meets the requirements and print a warning otherwise.
+fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) {
+ // Polkadot para-chains should generally use these requirements to ensure that the relay-chain
+ // will not take longer than expected to import its blocks.
+ if let Err(err) = frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench) {
+ log::warn!(
+ "⚠️ The hardware does not meet the minimal requirements {} for role 'Authority' find out more at:\n\
+ https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware",
+ err
+ );
+ }
+}
+
+pub(crate) trait NodeSpec {
+ type Block: NodeBlock;
+
+ type RuntimeApi: ConstructNodeRuntimeApi<
+ Self::Block,
+ ParachainClient,
+ >;
+
+ type BuildImportQueue: BuildImportQueue;
+
+ type BuildRpcExtensions: BuildRpcExtensions<
+ ParachainClient,
+ ParachainBackend,
+ FullPool>,
+ >;
+
+ type StartConsensus: StartConsensus;
+
+ const SYBIL_RESISTANCE: CollatorSybilResistance;
+
+ /// Starts a `ServiceBuilder` for a full service.
+ ///
+ /// Use this macro if you don't actually need the full service, but just the builder in order to
+ /// be able to perform chain operations.
+ fn new_partial(
+ config: &Configuration,
+ ) -> sc_service::error::Result> {
+ let telemetry = config
+ .telemetry_endpoints
+ .clone()
+ .filter(|x| !x.is_empty())
+ .map(|endpoints| -> Result<_, sc_telemetry::Error> {
+ let worker = TelemetryWorker::new(16)?;
+ let telemetry = worker.handle().new_telemetry(endpoints);
+ Ok((worker, telemetry))
+ })
+ .transpose()?;
+
+ let heap_pages = config.default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| {
+ HeapAllocStrategy::Static { extra_pages: h as _ }
+ });
+
+ let executor = sc_executor::WasmExecutor::::builder()
+ .with_execution_method(config.wasm_method)
+ .with_max_runtime_instances(config.max_runtime_instances)
+ .with_runtime_cache_size(config.runtime_cache_size)
+ .with_onchain_heap_alloc_strategy(heap_pages)
+ .with_offchain_heap_alloc_strategy(heap_pages)
+ .build();
+
+ let (client, backend, keystore_container, task_manager) =
+ sc_service::new_full_parts_record_import::(
+ config,
+ telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+ executor,
+ true,
+ )?;
+ let client = Arc::new(client);
+
+ let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+ let telemetry = telemetry.map(|(worker, telemetry)| {
+ task_manager.spawn_handle().spawn("telemetry", None, worker.run());
+ telemetry
+ });
+
+ let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+ config.transaction_pool.clone(),
+ config.role.is_authority().into(),
+ config.prometheus_registry(),
+ task_manager.spawn_essential_handle(),
+ client.clone(),
+ );
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = Self::BuildImportQueue::build_import_queue(
+ client.clone(),
+ block_import.clone(),
+ config,
+ telemetry.as_ref().map(|telemetry| telemetry.handle()),
+ &task_manager,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain: (),
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
+ }
+
+ /// Start a node with the given parachain spec.
+ ///
+ /// This is the actual implementation that is abstract over the executor and the runtime api.
+ fn start_node(
+ parachain_config: Configuration,
+ polkadot_config: Configuration,
+ collator_options: CollatorOptions,
+ para_id: ParaId,
+ hwbench: Option,
+ node_extra_args: NodeExtraArgs,
+ ) -> Pin>>>
+ where
+ Net: NetworkBackend,
+ {
+ Box::pin(async move {
+ let parachain_config = prepare_node_config(parachain_config);
+
+ let params = Self::new_partial(¶chain_config)?;
+ let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
+
+ let client = params.client.clone();
+ let backend = params.backend.clone();
+
+ let mut task_manager = params.task_manager;
+ let (relay_chain_interface, collator_key) = build_relay_chain_interface(
+ polkadot_config,
+ ¶chain_config,
+ telemetry_worker_handle,
+ &mut task_manager,
+ collator_options.clone(),
+ hwbench.clone(),
+ )
+ .await
+ .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
+
+ let validator = parachain_config.role.is_authority();
+ let prometheus_registry = parachain_config.prometheus_registry().cloned();
+ let transaction_pool = params.transaction_pool.clone();
+ let import_queue_service = params.import_queue.service();
+ let net_config = FullNetworkConfiguration::<_, _, Net>::new(
+ ¶chain_config.network,
+ prometheus_registry.clone(),
+ );
+
+ let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
+ build_network(BuildNetworkParams {
+ parachain_config: ¶chain_config,
+ net_config,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ para_id,
+ spawn_handle: task_manager.spawn_handle(),
+ relay_chain_interface: relay_chain_interface.clone(),
+ import_queue: params.import_queue,
+ sybil_resistance_level: Self::SYBIL_RESISTANCE,
+ })
+ .await?;
+
+ let rpc_builder = {
+ let client = client.clone();
+ let transaction_pool = transaction_pool.clone();
+ let backend_for_rpc = backend.clone();
+
+ Box::new(move |_| {
+ Self::BuildRpcExtensions::build_rpc_extensions(
+ client.clone(),
+ backend_for_rpc.clone(),
+ transaction_pool.clone(),
+ )
+ })
+ };
+
+ sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+ rpc_builder,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ task_manager: &mut task_manager,
+ config: parachain_config,
+ keystore: params.keystore_container.keystore(),
+ backend: backend.clone(),
+ network: network.clone(),
+ sync_service: sync_service.clone(),
+ system_rpc_tx,
+ tx_handler_controller,
+ telemetry: telemetry.as_mut(),
+ })?;
+
+ if let Some(hwbench) = hwbench {
+ sc_sysinfo::print_hwbench(&hwbench);
+ if validator {
+ warn_if_slow_hardware(&hwbench);
+ }
+
+ if let Some(ref mut telemetry) = telemetry {
+ let telemetry_handle = telemetry.handle();
+ task_manager.spawn_handle().spawn(
+ "telemetry_hwbench",
+ None,
+ sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
+ );
+ }
+ }
+
+ let announce_block = {
+ let sync_service = sync_service.clone();
+ Arc::new(move |hash, data| sync_service.announce_block(hash, data))
+ };
+
+ let relay_chain_slot_duration = Duration::from_secs(6);
+
+ let overseer_handle = relay_chain_interface
+ .overseer_handle()
+ .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
+
+ start_relay_chain_tasks(StartRelayChainTasksParams {
+ client: client.clone(),
+ announce_block: announce_block.clone(),
+ para_id,
+ relay_chain_interface: relay_chain_interface.clone(),
+ task_manager: &mut task_manager,
+ da_recovery_profile: if validator {
+ DARecoveryProfile::Collator
+ } else {
+ DARecoveryProfile::FullNode
+ },
+ import_queue: import_queue_service,
+ relay_chain_slot_duration,
+ recovery_handle: Box::new(overseer_handle.clone()),
+ sync_service,
+ })?;
+
+ if validator {
+ Self::StartConsensus::start_consensus(
+ client.clone(),
+ block_import,
+ prometheus_registry.as_ref(),
+ telemetry.as_ref().map(|t| t.handle()),
+ &task_manager,
+ relay_chain_interface.clone(),
+ transaction_pool,
+ params.keystore_container.keystore(),
+ relay_chain_slot_duration,
+ para_id,
+ collator_key.expect("Command line arguments do not allow this. qed"),
+ overseer_handle,
+ announce_block,
+ backend.clone(),
+ node_extra_args,
+ )?;
+ }
+
+ start_network.start_network();
+
+ Ok(task_manager)
+ })
+ }
+}
+
+pub(crate) trait DynNodeSpec: NodeCommandRunner {
+ fn start_node(
+ self: Box,
+ parachain_config: Configuration,
+ polkadot_config: Configuration,
+ collator_options: CollatorOptions,
+ para_id: ParaId,
+ hwbench: Option