From c1fbdeee68d9efb586bf04becc13f403d42cee56 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Wed, 18 May 2022 18:29:55 -0500 Subject: [PATCH] Prospective Parachains Subsystem (#4913) * docs and skeleton * subsystem skeleton * main loop * fragment tree basics & fmt * begin fragment trees & view * flesh out more of view update logic * further flesh out update logic * some refcount functions for fragment trees * add fatal/non-fatal errors * use non-fatal results * clear up some TODOs * ideal format for scheduling info * add a bunch of TODOs * some more fluff * extract fragment graph to submodule * begin fragment graph API * trees, not graphs * improve docs * scope and constructor for trees * add some test TODOs * limit max ancestors and store constraints * constructor * constraints: fix bug in HRMP watermarks * fragment tree population logic * set::retain * extract population logic * implement add_and_populate * fmt * add some TODOs in tests * implement child-selection * strip out old stuff based on wrong assumptions * use fatality * implement pruning * remove unused ancestor constraints * fragment tree instantiation * remove outdated comment * add message/request types and skeleton for handling * fmt * implement handle_candidate_seconded * candidate storage: handle backed * implement handle_candidate_backed * implement answer_get_backable_candidate * remove async where not needed * implement fetch_ancestry * add logic for run_iteration * add some docs * remove global allow(unused), fix warnings * make spellcheck happy (despite English) * fmt * bump Cargo.lock * replace tracing with gum * introduce PopulateFrom trait * implement GetHypotheticalDepths * revise docs slightly * first fragment tree scope test * more scope tests * test add_candidate * fmt * test retain * refactor test code * test populate is recursive * test contiguity of depth 0 is maintained * add_and_populate tests * cycle tests * remove PopulateFrom trait * fmt * test hypothetical depths (non-recursive) * have CandidateSeconded return membership * tree membership requests * Add a ProspectiveParachainsSubsystem struct * add a staging API for base constraints * add a `From` impl * add runtime API for staging_validity_constraints * implement fetch_base_constraints * implement `fetch_upcoming_paras` * remove reconstruction of candidate receipt; no obvious usecase * fmt * export message to broader module * remove last TODO * correctly export * fix compilation and add GetMinimumRelayParent request * make provisioner into a real subsystem with proper mesage bounds * fmt * fix ChannelsOut in overseer test * fix overseer tests * fix again * fmt --- Cargo.lock | 479 +++--- Cargo.toml | 1 + node/core/prospective-parachains/Cargo.toml | 26 + node/core/prospective-parachains/src/error.rs | 83 ++ .../src/fragment_tree.rs | 1312 +++++++++++++++++ node/core/prospective-parachains/src/lib.rs | 591 ++++++++ node/core/runtime-api/src/cache.rs | 44 +- node/core/runtime-api/src/lib.rs | 11 + node/core/runtime-api/src/tests.rs | 19 +- node/overseer/src/dummy.rs | 8 +- node/overseer/src/lib.rs | 10 +- node/overseer/src/tests.rs | 18 +- node/service/src/overseer.rs | 2 + node/subsystem-types/src/messages.rs | 83 +- .../src/inclusion_emulator/staging.rs | 106 +- primitives/src/runtime_api.rs | 6 +- primitives/src/v2/mod.rs | 1 + primitives/src/vstaging/mod.rs | 64 + runtime/kusama/src/lib.rs | 4 + runtime/polkadot/src/lib.rs | 4 + runtime/rococo/src/lib.rs | 4 + runtime/test-runtime/src/lib.rs | 4 + runtime/westend/src/lib.rs | 4 + 23 files changed, 2637 insertions(+), 247 deletions(-) create mode 100644 node/core/prospective-parachains/Cargo.toml create mode 100644 node/core/prospective-parachains/src/error.rs create mode 100644 node/core/prospective-parachains/src/fragment_tree.rs create mode 100644 node/core/prospective-parachains/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2f4d62410042..53d7911958ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -441,7 +441,7 @@ dependencies = [ "futures-timer", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-chain-spec", "sc-client-api", @@ -474,7 +474,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-rpc", "sc-utils", @@ -494,7 +494,7 @@ name = "beefy-primitives" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-application-crypto", @@ -543,16 +543,28 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + [[package]] name = "bitvec" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" dependencies = [ - "funty", - "radium", + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.0", ] [[package]] @@ -690,7 +702,7 @@ dependencies = [ "frame-support", "hex", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -705,7 +717,7 @@ version = "0.1.0" dependencies = [ "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-std", ] @@ -714,14 +726,14 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-runtime", "frame-support", "frame-system", "hex", "hex-literal", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -737,7 +749,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -754,7 +766,7 @@ dependencies = [ "bp-polkadot-core", "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "smallvec", "sp-api", "sp-runtime", @@ -770,7 +782,7 @@ dependencies = [ "hash-db", "hex-literal", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -787,7 +799,7 @@ dependencies = [ "bp-header-chain", "ed25519-dalek", "finality-grandpa", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-application-crypto", "sp-finality-grandpa", "sp-runtime", @@ -802,7 +814,7 @@ dependencies = [ "bp-polkadot-core", "bp-rococo", "bp-runtime", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-runtime", "sp-std", @@ -824,7 +836,7 @@ dependencies = [ "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -2024,7 +2036,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.11.2", "scale-info", ] @@ -2098,7 +2110,7 @@ name = "fork-tree" version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", ] [[package]] @@ -2120,7 +2132,7 @@ dependencies = [ "frame-system", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "scale-info", "serde", @@ -2154,7 +2166,7 @@ dependencies = [ "linked-hash-map", "log", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.8.5", "rand_pcg 0.3.1", "sc-block-builder", @@ -2202,7 +2214,7 @@ dependencies = [ "frame-election-provider-solution-type", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -2217,7 +2229,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -2233,7 +2245,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if 1.0.0", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", ] @@ -2250,7 +2262,7 @@ dependencies = [ "k256", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "scale-info", "serde", @@ -2310,7 +2322,7 @@ dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "pretty_assertions", "rustversion", "scale-info", @@ -2332,7 +2344,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", ] @@ -2343,7 +2355,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -2361,7 +2373,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-runtime", @@ -2373,7 +2385,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", ] @@ -2422,6 +2434,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "funty" version = "2.0.0" @@ -3000,7 +3018,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", ] [[package]] @@ -3292,7 +3310,7 @@ name = "kusama-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -3346,7 +3364,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -4875,7 +4893,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -4889,7 +4907,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -4905,7 +4923,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-authorship", "sp-runtime", @@ -4924,7 +4942,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -4947,7 +4965,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -4984,7 +5002,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -4999,7 +5017,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5020,7 +5038,7 @@ dependencies = [ "pallet-beefy", "pallet-mmr", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5039,7 +5057,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5056,7 +5074,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5077,7 +5095,7 @@ dependencies = [ "frame-system", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5092,7 +5110,7 @@ dependencies = [ name = "pallet-bridge-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-message-dispatch", "bp-messages", "bp-runtime", @@ -5102,7 +5120,7 @@ dependencies = [ "log", "num-traits", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5122,7 +5140,7 @@ dependencies = [ "log", "pallet-bounties", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5139,7 +5157,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5155,7 +5173,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-io", @@ -5173,7 +5191,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -5194,7 +5212,7 @@ dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-npos-elections", "sp-runtime", ] @@ -5208,7 +5226,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5225,7 +5243,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-arithmetic", "sp-runtime", @@ -5243,7 +5261,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-core", @@ -5264,7 +5282,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5281,7 +5299,7 @@ dependencies = [ "frame-system", "log", "pallet-authorship", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-core", @@ -5299,7 +5317,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5317,7 +5335,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5334,7 +5352,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5349,7 +5367,7 @@ version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "sp-api", "sp-blockchain", @@ -5366,7 +5384,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5380,7 +5398,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5395,7 +5413,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-runtime", @@ -5415,7 +5433,7 @@ dependencies = [ "pallet-bags-list", "pallet-nomination-pools", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-staking", @@ -5431,7 +5449,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5455,7 +5473,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-staking", @@ -5470,7 +5488,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5486,7 +5504,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5501,7 +5519,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5517,7 +5535,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5534,7 +5552,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5568,7 +5586,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand_chacha 0.2.2", "scale-info", "sp-runtime", @@ -5587,7 +5605,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand_chacha 0.2.2", "scale-info", "serde", @@ -5625,7 +5643,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5641,7 +5659,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-inherents", "sp-io", @@ -5660,7 +5678,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5676,7 +5694,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "smallvec", @@ -5693,7 +5711,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-blockchain", "sp-core", @@ -5707,7 +5725,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-runtime", ] @@ -5722,7 +5740,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5737,7 +5755,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5754,7 +5772,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -5768,7 +5786,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -5793,7 +5811,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -5826,6 +5844,19 @@ dependencies = [ "snap", ] +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.1.2" @@ -5833,7 +5864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" dependencies = [ "arrayvec 0.7.2", - "bitvec", + "bitvec 1.0.0", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -6158,7 +6189,7 @@ name = "polkadot-availability-bitfield-distribution" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "log", @@ -6187,7 +6218,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6217,7 +6248,7 @@ dependencies = [ "futures-timer", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6313,7 +6344,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6334,7 +6365,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "scale-info", "sp-core", @@ -6354,7 +6385,7 @@ dependencies = [ "futures-timer", "lazy_static", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6377,7 +6408,7 @@ dependencies = [ name = "polkadot-erasure-coding" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -6423,7 +6454,7 @@ dependencies = [ "bytes", "futures 0.3.21", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -6444,7 +6475,7 @@ name = "polkadot-node-collation-generation" version = "0.9.22" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6463,7 +6494,7 @@ name = "polkadot-node-core-approval-voting" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "derive_more", "futures 0.3.21", "futures-timer", @@ -6471,7 +6502,7 @@ dependencies = [ "kvdb-memorydb", "lru 0.7.5", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -6501,14 +6532,14 @@ name = "polkadot-node-core-av-store" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6529,7 +6560,7 @@ name = "polkadot-node-core-backing" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "fatality", "futures 0.3.21", "polkadot-erasure-coding", @@ -6573,7 +6604,7 @@ dependencies = [ "assert_matches", "async-trait", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6594,7 +6625,7 @@ version = "0.9.22" dependencies = [ "futures 0.3.21", "maplit", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6616,7 +6647,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6639,7 +6670,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6670,11 +6701,29 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +dependencies = [ + "assert_matches", + "bitvec 1.0.0", + "fatality", + "futures 0.3.21", + "parity-scale-codec 2.3.1", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "polkadot-primitives-test-helpers", + "thiserror", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-provisioner" version = "0.9.22" dependencies = [ - "bitvec", + "bitvec 1.0.0", "fatality", "futures 0.3.21", "futures-timer", @@ -6702,7 +6751,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "pin-project 1.0.10", "polkadot-core-primitives", "polkadot-node-subsystem-util", @@ -6776,7 +6825,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-primitives", @@ -6797,7 +6846,7 @@ dependencies = [ "log", "metered-channel", "nix 0.24.1", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-test-service", "prometheus-parse", @@ -6821,7 +6870,7 @@ dependencies = [ "derive_more", "fatality", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -6840,7 +6889,7 @@ version = "0.9.22" dependencies = [ "bounded-vec", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -6919,7 +6968,7 @@ dependencies = [ "lru 0.7.5", "metered-channel", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.11.2", "pin-project 1.0.10", @@ -7006,7 +7055,7 @@ version = "0.9.22" dependencies = [ "derive_more", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "polkadot-core-primitives", "scale-info", @@ -7034,10 +7083,10 @@ dependencies = [ name = "polkadot-primitives" version = "0.9.22" dependencies = [ - "bitvec", + "bitvec 1.0.0", "frame-system", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "polkadot-core-primitives", "polkadot-parachain", @@ -7106,7 +7155,7 @@ name = "polkadot-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -7153,7 +7202,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7197,7 +7246,7 @@ name = "polkadot-runtime-common" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -7219,7 +7268,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -7260,7 +7309,7 @@ name = "polkadot-runtime-metrics" version = "0.9.22" dependencies = [ "bs58", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "sp-std", "sp-tracing", @@ -7272,7 +7321,7 @@ version = "0.9.22" dependencies = [ "assert_matches", "bitflags", - "bitvec", + "bitvec 1.0.0", "derive_more", "frame-benchmarking", "frame-support", @@ -7289,7 +7338,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", @@ -7437,7 +7486,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "indexmap", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7462,7 +7511,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "sp-core", ] @@ -7472,7 +7521,7 @@ name = "polkadot-test-client" version = "0.9.22" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -7526,7 +7575,7 @@ name = "polkadot-test-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7551,7 +7600,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -7989,6 +8038,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "radium" version = "0.7.0" @@ -8254,7 +8309,7 @@ dependencies = [ "env_logger 0.9.0", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "serde_json", "sp-core", @@ -8405,7 +8460,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8634,7 +8689,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost 0.10.3", "prost-build", "rand 0.7.3", @@ -8658,7 +8713,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -8678,7 +8733,7 @@ name = "sc-block-builder" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sp-api", "sp-block-builder", @@ -8696,7 +8751,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "impl-trait-for-tuples", "memmap2 0.5.0", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-chain-spec-derive", "sc-network", "sc-telemetry", @@ -8730,7 +8785,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.7.3", "regex", "rpassword", @@ -8765,7 +8820,7 @@ dependencies = [ "futures 0.3.21", "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-executor", "sc-transaction-pool-api", @@ -8796,7 +8851,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-client-api", "sc-state-db", @@ -8846,7 +8901,7 @@ dependencies = [ "num-bigint", "num-rational 0.2.4", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "retain_mut", @@ -8904,7 +8959,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "fork-tree", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -8920,7 +8975,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -8954,7 +9009,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "lazy_static", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-executor-common", "sc-executor-wasmi", @@ -8980,7 +9035,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", @@ -8997,7 +9052,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -9014,7 +9069,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-wasm 0.42.2", "sc-allocator", "sc-executor-common", @@ -9038,7 +9093,7 @@ dependencies = [ "futures-timer", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.8.5", "sc-block-builder", @@ -9073,7 +9128,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -9139,7 +9194,7 @@ dependencies = [ "linked_hash_set", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "pin-project 1.0.10", "prost 0.10.3", @@ -9176,7 +9231,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "futures 0.3.21", "libp2p", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost-build", "sc-peerset", "smallvec", @@ -9207,7 +9262,7 @@ dependencies = [ "futures 0.3.21", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost 0.10.3", "prost-build", "sc-client-api", @@ -9231,7 +9286,7 @@ dependencies = [ "libp2p", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost 0.10.3", "prost-build", "sc-client-api", @@ -9262,7 +9317,7 @@ dependencies = [ "hyper-rustls", "num_cpus", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "sc-client-api", @@ -9307,7 +9362,7 @@ dependencies = [ "hash-db", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-block-builder", "sc-chain-spec", @@ -9336,7 +9391,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-chain-spec", "sc-transaction-pool-api", @@ -9377,7 +9432,7 @@ dependencies = [ "hash-db", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "pin-project 1.0.10", @@ -9435,7 +9490,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.12.0", @@ -9449,7 +9504,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -9550,7 +9605,7 @@ dependencies = [ "futures-timer", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "retain_mut", @@ -9600,10 +9655,10 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8980cafbe98a7ee7a9cc16b32ebce542c77883f512d83fbf2ddc8f6a85ea74c9" dependencies = [ - "bitvec", + "bitvec 1.0.0", "cfg-if 1.0.0", "derive_more", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info-derive", "serde", ] @@ -9988,7 +10043,7 @@ name = "slot-range-helper" version = "0.9.22" dependencies = [ "enumn", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "sp-runtime", "sp-std", @@ -10065,7 +10120,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -10092,7 +10147,7 @@ name = "sp-application-crypto" version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -10107,7 +10162,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-debug-derive", @@ -10120,7 +10175,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-application-crypto", @@ -10134,7 +10189,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-inherents", "sp-runtime", "sp-std", @@ -10145,7 +10200,7 @@ name = "sp-block-builder" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-inherents", "sp-runtime", @@ -10160,7 +10215,7 @@ dependencies = [ "futures 0.3.21", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sp-api", "sp-consensus", @@ -10179,7 +10234,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-inherents", "sp-runtime", @@ -10196,7 +10251,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "async-trait", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-api", @@ -10217,7 +10272,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-arithmetic", @@ -10231,7 +10286,7 @@ name = "sp-consensus-vrf" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "schnorrkel", "sp-core", @@ -10260,7 +10315,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "primitive-types", @@ -10335,7 +10390,7 @@ version = "0.12.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "sp-storage", ] @@ -10347,7 +10402,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-api", @@ -10365,7 +10420,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-runtime", "sp-std", @@ -10381,7 +10436,7 @@ dependencies = [ "hash-db", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "secp256k1", "sp-core", @@ -10416,7 +10471,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "schnorrkel", "serde", @@ -10440,7 +10495,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "sp-api", "sp-core", @@ -10454,7 +10509,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-arithmetic", @@ -10502,7 +10557,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "paste", "rand 0.7.3", @@ -10521,7 +10576,7 @@ version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -10550,7 +10605,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-io", "sp-std", @@ -10572,7 +10627,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -10586,7 +10641,7 @@ name = "sp-staking" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -10600,7 +10655,7 @@ dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "smallvec", @@ -10625,7 +10680,7 @@ version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "ref-cast", "serde", "sp-debug-derive", @@ -10653,7 +10708,7 @@ dependencies = [ "async-trait", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-inherents", "sp-runtime", @@ -10666,7 +10721,7 @@ name = "sp-tracing" version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "tracing", "tracing-core", @@ -10689,7 +10744,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "async-trait", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-inherents", @@ -10705,7 +10760,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "hash-db", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-std", @@ -10720,7 +10775,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-wasm 0.42.2", "scale-info", "serde", @@ -10736,7 +10791,7 @@ name = "sp-version-proc-macro" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "proc-macro2", "quote", "syn", @@ -10749,7 +10804,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "wasmi", "wasmtime", @@ -10798,7 +10853,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -10951,7 +11006,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-rpc-api", "sc-transaction-pool-api", @@ -10983,7 +11038,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-rpc-api", "scale-info", @@ -11005,7 +11060,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "hex", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-client-db", "sc-consensus", @@ -11188,7 +11243,7 @@ name = "test-parachain-adder" version = "0.9.22" dependencies = [ "dlmalloc", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "sp-io", "sp-std", @@ -11204,7 +11259,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11235,7 +11290,7 @@ version = "0.9.22" dependencies = [ "dlmalloc", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "sp-io", "sp-std", @@ -11251,7 +11306,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11273,7 +11328,7 @@ dependencies = [ name = "test-parachains" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -11767,7 +11822,7 @@ dependencies = [ "clap", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "remote-externalities", "sc-chain-spec", "sc-cli", @@ -11831,7 +11886,7 @@ version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -12407,7 +12462,7 @@ name = "westend-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -12457,7 +12512,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -12657,6 +12712,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "wyz" version = "0.5.0" @@ -12684,7 +12745,7 @@ dependencies = [ "derivative", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "xcm-procedural", ] @@ -12699,7 +12760,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -12720,7 +12781,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-arithmetic", "sp-core", "sp-io", @@ -12764,7 +12825,7 @@ name = "xcm-simulator" version = "0.9.22" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -12783,7 +12844,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12807,7 +12868,7 @@ dependencies = [ "honggfuzz", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12863,7 +12924,7 @@ version = "0.9.22" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "reqwest", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index b02f6ac1b500..5a5f567fe57d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,7 @@ members = [ "node/core/chain-selection", "node/core/dispute-coordinator", "node/core/parachains-inherent", + "node/core/prospective-parachains", "node/core/provisioner", "node/core/pvf", "node/core/pvf-checker", diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml new file mode 100644 index 000000000000..71374285707b --- /dev/null +++ b/node/core/prospective-parachains/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.19" +gum = { package = "tracing-gum", path = "../../gum" } +parity-scale-codec = "2" +thiserror = "1.0.30" +fatality = "0.0.6" +bitvec = "1" + +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } + +[dev-dependencies] +assert_matches = "1" +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } + +[features] +# If not enabled, the dispute coordinator will do nothing. +disputes = [] diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs new file mode 100644 index 000000000000..e7fa2f0e9641 --- /dev/null +++ b/node/core/prospective-parachains/src/error.rs @@ -0,0 +1,83 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Error types. + +use futures::channel::oneshot; + +use polkadot_node_subsystem::{ + errors::{ChainApiError, RuntimeApiError}, + SubsystemError, +}; + +use crate::LOG_TARGET; +use fatality::Nested; + +#[allow(missing_docs)] +#[fatality::fatality(splitable)] +pub enum Error { + #[fatal] + #[error("SubsystemError::Context error: {0}")] + SubsystemContext(String), + + #[fatal] + #[error("Spawning a task failed: {0}")] + SpawnFailed(SubsystemError), + + #[fatal] + #[error("Participation worker receiver exhausted.")] + ParticipationWorkerReceiverExhausted, + + #[fatal] + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), + + #[error(transparent)] + RuntimeApi(#[from] RuntimeApiError), + + #[error(transparent)] + ChainApi(#[from] ChainApiError), + + #[error(transparent)] + Subsystem(SubsystemError), + + #[error("Request to chain API subsystem dropped")] + ChainApiRequestCanceled(oneshot::Canceled), + + #[error("Request to runtime API subsystem dropped")] + RuntimeApiRequestCanceled(oneshot::Canceled), +} + +/// General `Result` type. +pub type Result = std::result::Result; +/// Result for non-fatal only failures. +pub type JfyiErrorResult = std::result::Result; +/// Result for fatal only failures. +pub type FatalResult = std::result::Result; + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error. This utility function is meant to +/// consume top-level errors by simply logging them +pub fn log_error(result: Result<()>, ctx: &'static str) -> FatalResult<()> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + gum::debug!(target: LOG_TARGET, error = ?jfyi, ctx); + Ok(()) + }, + } +} diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs new file mode 100644 index 000000000000..9972b60490a1 --- /dev/null +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -0,0 +1,1312 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A tree utility for managing parachain fragments not referenced by the relay-chain. +//! +//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] +//! which are meant to be used in close conjunction. Each tree is associated with a particular +//! relay-parent, and it's expected that higher-level code will have a tree for each +//! relay-chain block which might reasonably have blocks built upon it. +//! +//! Trees only store indices into the [`CandidateStorage`] and the storage is meant to +//! be pruned when trees are dropped by higher-level code. +//! +//! Each node in the tree represents a candidate. Nodes do not uniquely refer to a parachain +//! block for two reasons. +//! 1. There's no requirement that head-data is unique +//! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly +//! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is +//! still more than one way to reach the same head-data. +//! 2. and candidates only refer to their parent by its head-data. This whole issue could be +//! resolved by having candidates reference their parent by candidate hash. +//! +//! The implication is that when we receive a candidate receipt, there are actually multiple +//! possibilities for any candidates between the para-head recorded in the relay parent's state +//! and the candidate in question. +//! +//! This means that our candidates need to handle multiple parents and that depth is an +//! attribute of a node in a tree, not a candidate. Put another way, the same candidate might +//! have different depths in different parts of the tree. +//! +//! As an extreme example, a candidate which produces head-data which is the same as its parent +//! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded +//! by the maximum depth allowed by the tree. +//! +//! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, +//! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective +//! about limiting the amount of candidates that are considered. +//! +//! The code in this module is not designed for speed or efficiency, but conceptual simplicity. +//! Our assumption is that the amount of candidates and parachains we consider will be reasonably +//! bounded and in practice will not exceed a few thousand at any time. This naive implementation +//! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. + +use std::collections::{BTreeMap, HashMap, HashSet}; + +use super::LOG_TARGET; +use bitvec::prelude::*; +use polkadot_node_subsystem_util::inclusion_emulator::staging::{ + ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, +}; +use polkadot_primitives::vstaging::{ + BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, + PersistedValidationData, +}; + +/// Kinds of failures to import a candidate into storage. +#[derive(Debug, Clone, PartialEq)] +pub enum CandidateStorageInsertionError { + /// An error indicating that a supplied candidate didn't match the persisted + /// validation data provided alongside it. + PersistedValidationDataMismatch, + /// The candidate was already known. + CandidateAlreadyKnown(CandidateHash), +} + +pub(crate) struct CandidateStorage { + // Index from parent head hash to candidate hashes. + by_parent_head: HashMap>, + + // Index from candidate hash to fragment node. + by_candidate_hash: HashMap, +} + +impl CandidateStorage { + /// Create a new `CandidateStorage`. + pub fn new() -> Self { + CandidateStorage { by_parent_head: HashMap::new(), by_candidate_hash: HashMap::new() } + } + + /// Introduce a new candidate. The candidate passed to this function + /// should have been seconded before introduction. + pub fn add_candidate( + &mut self, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + ) -> Result { + let candidate_hash = candidate.hash(); + + if self.by_candidate_hash.contains_key(&candidate_hash) { + return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) + } + + if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { + return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + } + + let parent_head_hash = persisted_validation_data.parent_head.hash(); + + let entry = CandidateEntry { + candidate_hash, + relay_parent: candidate.descriptor.relay_parent, + state: CandidateState::Seconded, + candidate: ProspectiveCandidate { + commitments: candidate.commitments, + collator: candidate.descriptor.collator, + collator_signature: candidate.descriptor.signature, + persisted_validation_data, + pov_hash: candidate.descriptor.pov_hash, + validation_code_hash: candidate.descriptor.validation_code_hash, + }, + }; + + self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash); + // sanity-checked already. + self.by_candidate_hash.insert(candidate_hash, entry); + + Ok(candidate_hash) + } + + /// Note that an existing candidate has been backed. + pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { + entry.state = CandidateState::Backed; + } + } + + /// Whether a candidate is recorded as being backed. + pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { + self.by_candidate_hash + .get(candidate_hash) + .map_or(false, |e| e.state == CandidateState::Backed) + } + + /// Whether a candidate is contained within the storage already. + pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + self.by_candidate_hash.contains_key(candidate_hash) + } + + /// Retain only candidates which pass the predicate. + pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { + self.by_candidate_hash.retain(|h, _v| pred(h)); + self.by_parent_head.retain(|_parent, children| { + children.retain(|h| pred(h)); + !children.is_empty() + }) + } + + fn iter_para_children<'a>( + &'a self, + parent_head_hash: &Hash, + ) -> impl Iterator + 'a { + let by_candidate_hash = &self.by_candidate_hash; + self.by_parent_head + .get(parent_head_hash) + .into_iter() + .flat_map(|hashes| hashes.iter()) + .filter_map(move |h| by_candidate_hash.get(h)) + } + + fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> { + self.by_candidate_hash.get(candidate_hash) + } +} + +/// The state of a candidate. +/// +/// Candidates aren't even considered until they've at least been seconded. +#[derive(Debug, PartialEq)] +enum CandidateState { + /// The candidate has been seconded. + Seconded, + /// The candidate has been completely backed by the group. + Backed, +} + +struct CandidateEntry { + candidate_hash: CandidateHash, + relay_parent: Hash, + candidate: ProspectiveCandidate, + state: CandidateState, +} + +/// The scope of a [`FragmentTree`]. +#[derive(Debug)] +pub(crate) struct Scope { + para: ParaId, + relay_parent: RelayChainBlockInfo, + ancestors: BTreeMap, + ancestors_by_hash: HashMap, + base_constraints: Constraints, + max_depth: usize, +} + +/// An error variant indicating that ancestors provided to a scope +/// had unexpected order. +#[derive(Debug)] +pub struct UnexpectedAncestor; + +impl Scope { + /// Define a new [`Scope`]. + /// + /// All arguments are straightforward except the ancestors. + /// + /// Ancestors should be in reverse order, starting with the parent + /// of the `relay_parent`, and proceeding backwards in block number + /// increments of 1. Ancestors not following these conditions will be + /// rejected. + /// + /// This function will only consume ancestors up to the `min_relay_parent_number` of + /// the `base_constraints`. + /// + /// Only ancestors whose children have the same session as the relay-parent's + /// children should be provided. + /// + /// It is allowed to provide zero ancestors. + pub fn with_ancestors( + para: ParaId, + relay_parent: RelayChainBlockInfo, + base_constraints: Constraints, + max_depth: usize, + ancestors: impl IntoIterator, + ) -> Result { + let mut ancestors_map = BTreeMap::new(); + let mut ancestors_by_hash = HashMap::new(); + { + let mut prev = relay_parent.number; + for ancestor in ancestors { + if prev == 0 { + return Err(UnexpectedAncestor) + } else if ancestor.number != prev - 1 { + return Err(UnexpectedAncestor) + } else if prev == base_constraints.min_relay_parent_number { + break + } else { + prev = ancestor.number; + ancestors_by_hash.insert(ancestor.hash, ancestor.clone()); + ancestors_map.insert(ancestor.number, ancestor); + } + } + } + + Ok(Scope { + para, + relay_parent, + base_constraints, + max_depth, + ancestors: ancestors_map, + ancestors_by_hash, + }) + } + + /// Get the earliest relay-parent allowed in the scope of the fragment tree. + pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo { + self.ancestors + .iter() + .next() + .map(|(_, v)| v.clone()) + .unwrap_or_else(|| self.relay_parent.clone()) + } + + fn ancestor_by_hash(&self, hash: &Hash) -> Option { + if hash == &self.relay_parent.hash { + return Some(self.relay_parent.clone()) + } + + self.ancestors_by_hash.get(hash).map(|info| info.clone()) + } +} + +// We use indices into a flat vector to refer to nodes in the tree. +// Every tree also has an implicit root. +#[derive(Debug, Clone, Copy, PartialEq)] +enum NodePointer { + Root, + Storage(usize), +} + +/// This is a tree of candidates based on some underlying storage of candidates +/// and a scope. +pub(crate) struct FragmentTree { + scope: Scope, + + // Invariant: a contiguous prefix of the 'nodes' storage will contain + // the top-level children. + nodes: Vec, + + // The candidates stored in this tree, mapped to a bitvec indicating the depths + // where the candidate is stored. + candidates: HashMap>, +} + +impl FragmentTree { + /// Create a new [`FragmentTree`] with given scope and populated from the + /// storage. + pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { + gum::trace!( + target: LOG_TARGET, + relay_parent = ?scope.relay_parent.hash, + relay_parent_num = scope.relay_parent.number, + para_id = ?scope.para, + ancestors = scope.ancestors.len(), + "Instantiating Fragment Tree", + ); + + let mut tree = FragmentTree { scope, nodes: Vec::new(), candidates: HashMap::new() }; + + tree.populate_from_bases(storage, vec![NodePointer::Root]); + + tree + } + + /// Get the scope of the Fragment Tree. + pub fn scope(&self) -> &Scope { + &self.scope + } + + // Inserts a node and updates child references in a non-root parent. + fn insert_node(&mut self, node: FragmentNode) { + let pointer = NodePointer::Storage(self.nodes.len()); + let parent_pointer = node.parent; + let candidate_hash = node.candidate_hash; + + let max_depth = self.scope.max_depth; + + self.candidates + .entry(candidate_hash) + .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth + 1]) + .set(node.depth, true); + + match parent_pointer { + NodePointer::Storage(ptr) => { + self.nodes.push(node); + self.nodes[ptr].children.push((pointer, candidate_hash)) + }, + NodePointer::Root => { + // Maintain the invariant of node storage beginning with depth-0. + if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) { + self.nodes.push(node); + } else { + let pos = + self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); + self.nodes.insert(pos, node); + } + }, + } + } + + fn node_has_candidate_child( + &self, + pointer: NodePointer, + candidate_hash: &CandidateHash, + ) -> bool { + self.node_candidate_child(pointer, candidate_hash).is_some() + } + + fn node_candidate_child( + &self, + pointer: NodePointer, + candidate_hash: &CandidateHash, + ) -> Option { + match pointer { + NodePointer::Root => self + .nodes + .iter() + .take_while(|n| n.parent == NodePointer::Root) + .enumerate() + .find(|(_, n)| &n.candidate_hash == candidate_hash) + .map(|(i, _)| NodePointer::Storage(i)), + NodePointer::Storage(ptr) => + self.nodes.get(ptr).and_then(|n| n.candidate_child(candidate_hash)), + } + } + + /// Returns an O(n) iterator over the hashes of candidates contained in the + /// tree. + pub(crate) fn candidates<'a>(&'a self) -> impl Iterator + 'a { + self.candidates.keys().cloned() + } + + /// Whether the candidate exists and at what depths. + pub(crate) fn candidate(&self, candidate: &CandidateHash) -> Option> { + self.candidates.get(candidate).map(|d| d.iter_ones().collect()) + } + + /// Add a candidate and recursively populate from storage. + pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { + let candidate_entry = match storage.get(&hash) { + None => return, + Some(e) => e, + }; + + let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; + + // Select an initial set of bases, whose required relay-parent matches that of the candidate. + let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { + Some(NodePointer::Root) + } else { + None + }; + + let non_root_bases = self + .nodes + .iter() + .enumerate() + .filter(|(_, n)| { + n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) + }) + .map(|(i, _)| NodePointer::Storage(i)); + + let bases = root_base.into_iter().chain(non_root_bases).collect(); + + // Pass this into the population function, which will sanity-check stuff like depth, fragments, + // etc. and then recursively populate. + self.populate_from_bases(storage, bases); + } + + /// Returns the hypothetical depths where a candidate with the given hash and parent head data + /// would be added to the tree, without applying other candidates recursively on top of it. + /// + /// If the candidate is already known, this returns the actual depths where this + /// candidate is part of the tree. + pub(crate) fn hypothetical_depths( + &self, + hash: CandidateHash, + parent_head_data_hash: Hash, + candidate_relay_parent: Hash, + ) -> Vec { + // if known. + if let Some(depths) = self.candidates.get(&hash) { + return depths.iter_ones().collect() + } + + // if out of scope. + let candidate_relay_parent_number = + if self.scope.relay_parent.hash == candidate_relay_parent { + self.scope.relay_parent.number + } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + info.number + } else { + return Vec::new() + }; + + let max_depth = self.scope.max_depth; + let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; + + // iterate over all nodes < max_depth where parent head-data matches, + // relay-parent number is <= candidate, and depth < max_depth. + for node in &self.nodes { + if node.depth == max_depth { + continue + } + if node.fragment.relay_parent().number > candidate_relay_parent_number { + continue + } + if node.head_data_hash == parent_head_data_hash { + depths.set(node.depth + 1, true); + } + } + + // compare against root as well. + if self.scope.base_constraints.required_parent.hash() == parent_head_data_hash { + depths.set(0, true); + } + + depths.iter_ones().collect() + } + + /// Select a candidate after the given `required_path` which pass + /// the predicate. + /// + /// If there are multiple possibilities, this will select the first one. + /// + /// This returns `None` if there is no candidate meeting those criteria. + /// + /// The intention of the `required_path` is to allow queries on the basis of + /// one or more candidates which were previously pending availability becoming + /// available and opening up more room on the core. + pub(crate) fn select_child( + &self, + required_path: &[CandidateHash], + pred: impl Fn(&CandidateHash) -> bool, + ) -> Option { + let base_node = { + // traverse the required path. + let mut node = NodePointer::Root; + for required_step in required_path { + node = self.node_candidate_child(node, &required_step)?; + } + + node + }; + + // TODO [now]: taking the first selection might introduce bias + // or become gameable. + // + // For plausibly unique parachains, this shouldn't matter much. + // figure out alternative selection criteria? + match base_node { + NodePointer::Root => self + .nodes + .iter() + .take_while(|n| n.parent == NodePointer::Root) + .filter(|n| pred(&n.candidate_hash)) + .map(|n| n.candidate_hash) + .next(), + NodePointer::Storage(ptr) => + self.nodes[ptr].children.iter().filter(|n| pred(&n.1)).map(|n| n.1).next(), + } + } + + fn populate_from_bases<'a>( + &mut self, + storage: &'a CandidateStorage, + initial_bases: Vec, + ) { + // Populate the tree breadth-first. + let mut last_sweep_start = None; + + loop { + let sweep_start = self.nodes.len(); + + if Some(sweep_start) == last_sweep_start { + break + } + + let parents: Vec = if let Some(last_start) = last_sweep_start { + (last_start..self.nodes.len()).map(NodePointer::Storage).collect() + } else { + initial_bases.clone() + }; + + // 1. get parent head and find constraints + // 2. iterate all candidates building on the right head and viable relay parent + // 3. add new node + for parent_pointer in parents { + let (modifications, child_depth, earliest_rp) = match parent_pointer { + NodePointer::Root => + (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), + NodePointer::Storage(ptr) => { + let node = &self.nodes[ptr]; + let parent_rp = self + .scope + .ancestor_by_hash(&node.relay_parent()) + .expect("nodes in tree can only contain ancestors within scope; qed"); + + (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + }, + }; + + if child_depth > self.scope.max_depth { + continue + } + + let child_constraints = + match self.scope.base_constraints.apply_modifications(&modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); + + continue + }, + Ok(c) => c, + }; + + // Add nodes to tree wherever + // 1. parent hash is correct + // 2. relay-parent does not move backwards + // 3. candidate outputs fulfill constraints + let required_head_hash = child_constraints.required_parent.hash(); + for candidate in storage.iter_para_children(&required_head_hash) { + let relay_parent = match self.scope.ancestor_by_hash(&candidate.relay_parent) { + None => continue, // not in chain + Some(info) => { + if info.number < earliest_rp.number { + // moved backwards + continue + } + + info + }, + }; + + // don't add candidates where the parent already has it as a child. + if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) { + continue + } + + let fragment = { + let f = Fragment::new( + relay_parent.clone(), + child_constraints.clone(), + candidate.candidate.clone(), + ); + + match f { + Ok(f) => f, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + err = ?e, + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", + ); + + continue + }, + } + }; + + let mut cumulative_modifications = modifications.clone(); + cumulative_modifications.stack(fragment.constraint_modifications()); + + let head_data_hash = fragment.candidate().commitments.head_data.hash(); + let node = FragmentNode { + parent: parent_pointer, + fragment, + candidate_hash: candidate.candidate_hash.clone(), + depth: child_depth, + cumulative_modifications, + children: Vec::new(), + head_data_hash, + }; + + self.insert_node(node); + } + } + + last_sweep_start = Some(sweep_start); + } + } +} + +struct FragmentNode { + // A pointer to the parent node. + parent: NodePointer, + fragment: Fragment, + candidate_hash: CandidateHash, + depth: usize, + cumulative_modifications: ConstraintModifications, + head_data_hash: Hash, + children: Vec<(NodePointer, CandidateHash)>, +} + +impl FragmentNode { + fn relay_parent(&self) -> Hash { + self.fragment.relay_parent().hash + } + + fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option { + self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; + use polkadot_primitives::vstaging::{ + BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, + }; + use polkadot_primitives_test_helpers as test_helpers; + + fn make_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + ) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash: Hash::repeat_byte(42).into(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + fn make_committed_candidate( + para_id: ParaId, + relay_parent: Hash, + relay_parent_number: BlockNumber, + parent_head: HeadData, + para_head: HeadData, + hrmp_watermark: BlockNumber, + ) -> (PersistedValidationData, CommittedCandidateReceipt) { + let persisted_validation_data = PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id, + relay_parent, + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + }, + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: para_head, + processed_downward_messages: 0, + hrmp_watermark, + }, + }; + + (persisted_validation_data, candidate) + } + + #[test] + fn scope_rejects_ancestors_that_skip_blocks() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 10, + hash: Hash::repeat_byte(10), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 8, + hash: Hash::repeat_byte(8), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); + + assert_matches!( + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), + Err(UnexpectedAncestor) + ); + } + + #[test] + fn scope_rejects_ancestor_for_0_block() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 0, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 99999, + hash: Hash::repeat_byte(99), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); + + assert_matches!( + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), + Err(UnexpectedAncestor) + ); + } + + #[test] + fn scope_only_takes_ancestors_up_to_min() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); + + let scope = + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors) + .unwrap(); + + assert_eq!(scope.ancestors.len(), 2); + assert_eq!(scope.ancestors_by_hash.len(), 2); + } + + #[test] + fn storage_add_candidate() { + let mut storage = CandidateStorage::new(); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + } + + #[test] + fn storage_retain() { + let mut storage = CandidateStorage::new(); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + storage.retain(|_| true); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + + storage.retain(|_| false); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); + } + + #[test] + fn populate_works_recursively() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert!(candidates.contains(&candidate_a_hash)); + assert!(candidates.contains(&candidate_b_hash)); + + assert_eq!(tree.nodes.len(), 2); + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[0].depth, 0); + + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[1].depth, 1); + } + + #[test] + fn children_of_root_are_contiguous() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b, 1].into(), + 0, + ); + let candidate_a2_hash = candidate_a2.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_a2, pvd_a2).unwrap(); + tree.add_and_populate(candidate_a2_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); + } + + #[test] + fn add_candidate_child_of_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + } + + #[test] + fn add_candidate_child_of_non_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + } + + #[test] + fn graceful_cycle_of_0() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 1); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + } + + #[test] + fn graceful_cycle_of_1() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + } + + #[test] + fn hypothetical_depths_known_and_unknown() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HeadData::from(vec![0x0a]).hash(), + relay_parent_a, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_b_hash, + HeadData::from(vec![0x0b]).hash(), + relay_parent_a, + ), + vec![1, 3], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(21)), + HeadData::from(vec![0x0a]).hash(), + relay_parent_a, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(22)), + HeadData::from(vec![0x0b]).hash(), + relay_parent_a, + ), + vec![1, 3] + ); + } +} diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs new file mode 100644 index 000000000000..0e447aa69b1f --- /dev/null +++ b/node/core/prospective-parachains/src/lib.rs @@ -0,0 +1,591 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the Prospective Parachains subsystem - this tracks and handles +//! prospective parachain fragments and informs other backing-stage subsystems +//! of work to be done. +//! +//! This is the main coordinator of work within the node for the collation and +//! backing phases of parachain consensus. +//! +//! This is primarily an implementation of "Fragment Trees", as described in +//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. +//! +//! This also handles concerns such as the relay-chain being forkful, +//! session changes, predicting validator group assignments. + +use std::collections::{HashMap, HashSet}; + +use futures::{channel::oneshot, prelude::*}; + +use polkadot_node_subsystem::{ + messages::{ + ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, + ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, + }, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; +use polkadot_primitives::vstaging::{ + BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, + PersistedValidationData, +}; + +use crate::{ + error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, + fragment_tree::{CandidateStorage, FragmentTree, Scope as TreeScope}, +}; + +mod error; +mod fragment_tree; + +const LOG_TARGET: &str = "parachain::prospective-parachains"; + +// The maximum depth the subsystem will allow. 'depth' is defined as the +// amount of blocks between the para head in a relay-chain block's state +// and a candidate with a particular relay-parent. +// +// This value is chosen mostly for reasons of resource-limitation. +// Without it, a malicious validator group could create arbitrarily long, +// useless prospective parachains and DoS honest nodes. +const MAX_DEPTH: usize = 4; + +// The maximum ancestry we support. +const MAX_ANCESTRY: usize = 5; + +struct RelayBlockViewData { + // Scheduling info for paras and upcoming paras. + fragment_trees: HashMap, +} + +struct View { + // Active or recent relay-chain blocks by block hash. + active_leaves: HashMap, + candidate_storage: HashMap, +} + +impl View { + fn new() -> Self { + View { active_leaves: HashMap::new(), candidate_storage: HashMap::new() } + } +} + +/// The prospective parachains subsystem. +#[derive(Default)] +pub struct ProspectiveParachainsSubsystem; + +#[overseer::subsystem(ProspectiveParachains, error = SubsystemError, prefix = self::overseer)] +impl ProspectiveParachainsSubsystem +where + Context: Send + Sync, +{ + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + future: run(ctx) + .map_err(|e| SubsystemError::with_origin("prospective-parachains", e)) + .boxed(), + name: "prospective-parachains-subsystem", + } + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn run(mut ctx: Context) -> FatalResult<()> { + let mut view = View::new(); + loop { + crate::error::log_error( + run_iteration(&mut ctx, &mut view).await, + "Encountered issue during run iteration", + )?; + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()> { + loop { + match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { + handle_active_leaves_update(&mut *ctx, view, update).await?; + }, + FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, + FromOverseer::Communication { msg } => match msg { + ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd, tx) => + handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd, tx).await?, + ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => + handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, + ProspectiveParachainsMessage::GetBackableCandidate( + relay_parent, + para, + required_path, + tx, + ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), + ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => + answer_hypothetical_depths_request(&view, request, tx), + ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => + answer_tree_membership_request(&view, para, candidate, tx), + ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) => + answer_minimum_relay_parent_request(&view, para, relay_parent, tx), + }, + } + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn handle_active_leaves_update( + ctx: &mut Context, + view: &mut View, + update: ActiveLeavesUpdate, +) -> JfyiErrorResult<()> { + // 1. clean up inactive leaves + // 2. determine all scheduled para at new block + // 3. construct new fragment tree for each para for each new leaf + // 4. prune candidate storage. + + for deactivated in &update.deactivated { + view.active_leaves.remove(deactivated); + } + + for activated in update.activated.into_iter() { + let hash = activated.hash; + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; + + let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Failed to get block info for newly activated leaf block." + ); + + // `update.activated` is an option, but we can use this + // to exit the 'loop' and skip this block without skipping + // pruning logic. + continue + }, + Some(info) => info, + }; + + let ancestry = fetch_ancestry(&mut *ctx, hash, MAX_ANCESTRY).await?; + + // Find constraints. + let mut fragment_trees = HashMap::new(); + for para in scheduled_paras { + let candidate_storage = + view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); + + let constraints = fetch_base_constraints(&mut *ctx, hash, para).await?; + + let constraints = match constraints { + Some(c) => c, + None => { + // This indicates a runtime conflict of some kind. + + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + relay_parent = ?hash, + "Failed to get inclusion constraints." + ); + + continue + }, + }; + + let scope = TreeScope::with_ancestors( + para, + block_info.clone(), + constraints, + MAX_DEPTH, + ancestry.iter().cloned(), + ) + .expect("ancestors are provided in reverse order and correctly; qed"); + + let tree = FragmentTree::populate(scope, &*candidate_storage); + fragment_trees.insert(para, tree); + } + + view.active_leaves.insert(hash, RelayBlockViewData { fragment_trees }); + } + + if !update.deactivated.is_empty() { + // This has potential to be a hotspot. + prune_view_candidate_storage(view); + } + + Ok(()) +} + +fn prune_view_candidate_storage(view: &mut View) { + let active_leaves = &view.active_leaves; + view.candidate_storage.retain(|para_id, storage| { + let mut coverage = HashSet::new(); + let mut contained = false; + for head in active_leaves.values() { + if let Some(tree) = head.fragment_trees.get(¶_id) { + coverage.extend(tree.candidates()); + contained = true; + } + } + + if !contained { + return false + } + + storage.retain(|h| coverage.contains(&h)); + + // Even if `storage` is now empty, we retain. + // This maintains a convenient invariant that para-id storage exists + // as long as there's an active head which schedules the para. + true + }) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn handle_candidate_seconded( + _ctx: &mut Context, + view: &mut View, + para: ParaId, + candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, + tx: oneshot::Sender, +) -> JfyiErrorResult<()> { + // Add the candidate to storage. + // Then attempt to add it to all trees. + let storage = match view.candidate_storage.get_mut(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + candidate_hash = ?candidate.hash(), + "Received seconded candidate for inactive para", + ); + + let _ = tx.send(Vec::new()); + return Ok(()) + }, + Some(storage) => storage, + }; + + let candidate_hash = match storage.add_candidate(candidate, pvd) { + Ok(c) => c, + Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + let _ = tx.send(Vec::new()); + return Ok(()) + }, + Err( + crate::fragment_tree::CandidateStorageInsertionError::PersistedValidationDataMismatch, + ) => { + // We can't log the candidate hash without either doing more ~expensive + // hashing but this branch indicates something is seriously wrong elsewhere + // so it's doubtful that it would affect debugging. + + gum::warn!( + target: LOG_TARGET, + para = ?para, + "Received seconded candidate had mismatching validation data", + ); + + let _ = tx.send(Vec::new()); + return Ok(()) + }, + }; + + let mut membership = Vec::new(); + for (relay_parent, leaf_data) in &mut view.active_leaves { + if let Some(tree) = leaf_data.fragment_trees.get_mut(¶) { + tree.add_and_populate(candidate_hash, &*storage); + if let Some(depths) = tree.candidate(&candidate_hash) { + membership.push((*relay_parent, depths)); + } + } + } + let _ = tx.send(membership); + + Ok(()) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn handle_candidate_backed( + _ctx: &mut Context, + view: &mut View, + para: ParaId, + candidate_hash: CandidateHash, +) -> JfyiErrorResult<()> { + let storage = match view.candidate_storage.get_mut(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instructio to back candidate", + ); + + return Ok(()) + }, + Some(storage) => storage, + }; + + if !storage.contains(&candidate_hash) { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to mark unknown candidate as backed.", + ); + + return Ok(()) + } + + if storage.is_backed(&candidate_hash) { + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received redundant instruction to mark candidate as backed", + ); + + return Ok(()) + } + + storage.mark_backed(&candidate_hash); + Ok(()) +} + +fn answer_get_backable_candidate( + view: &View, + relay_parent: Hash, + para: ParaId, + required_path: Vec, + tx: oneshot::Sender>, +) { + let data = match view.active_leaves.get(&relay_parent) { + None => { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive relay-parent." + ); + + let _ = tx.send(None); + return + }, + Some(d) => d, + }; + + let tree = match data.fragment_trees.get(¶) { + None => { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive para." + ); + + let _ = tx.send(None); + return + }, + Some(tree) => tree, + }; + + let storage = match view.candidate_storage.get(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "No candidate storage for active para", + ); + + let _ = tx.send(None); + return + }, + Some(s) => s, + }; + + let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); +} + +fn answer_hypothetical_depths_request( + view: &View, + request: HypotheticalDepthRequest, + tx: oneshot::Sender>, +) { + match view + .active_leaves + .get(&request.fragment_tree_relay_parent) + .and_then(|l| l.fragment_trees.get(&request.candidate_para)) + { + Some(fragment_tree) => { + let depths = fragment_tree.hypothetical_depths( + request.candidate_hash, + request.parent_head_data_hash, + request.candidate_relay_parent, + ); + let _ = tx.send(depths); + }, + None => { + let _ = tx.send(Vec::new()); + }, + } +} + +fn answer_tree_membership_request( + view: &View, + para: ParaId, + candidate: CandidateHash, + tx: oneshot::Sender, +) { + let mut membership = Vec::new(); + for (relay_parent, view_data) in &view.active_leaves { + if let Some(tree) = view_data.fragment_trees.get(¶) { + if let Some(depths) = tree.candidate(&candidate) { + membership.push((*relay_parent, depths)); + } + } + } + let _ = tx.send(membership); +} + +fn answer_minimum_relay_parent_request( + view: &View, + para: ParaId, + relay_parent: Hash, + tx: oneshot::Sender>, +) { + let res = view + .active_leaves + .get(&relay_parent) + .and_then(|data| data.fragment_trees.get(¶)) + .map(|tree| tree.scope().earliest_relay_parent().number); + + let _ = tx.send(res); +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_base_constraints( + ctx: &mut Context, + relay_parent: Hash, + para_id: ParaId, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::StagingValidityConstraints(para_id, tx), + )) + .await; + + Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(From::from)) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_upcoming_paras( + ctx: &mut Context, + relay_parent: Hash, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + + // This'll have to get more sophisticated with parathreads, + // but for now we can just use the `AvailabilityCores`. + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) + .await; + + let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; + let mut upcoming = HashSet::new(); + for core in cores { + match core { + CoreState::Occupied(occupied) => { + if let Some(next_up_on_available) = occupied.next_up_on_available { + upcoming.insert(next_up_on_available.para_id); + } + if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { + upcoming.insert(next_up_on_time_out.para_id); + } + }, + CoreState::Scheduled(scheduled) => { + upcoming.insert(scheduled.para_id); + }, + CoreState::Free => {}, + } + } + + Ok(upcoming.into_iter().collect()) +} + +// Fetch ancestors in descending order, up to the amount requested. +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_ancestry( + ctx: &mut Context, + relay_hash: Hash, + ancestors: usize, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::Ancestors { + hash: relay_hash, + k: ancestors, + response_channel: tx, + }) + .await; + + let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + let mut block_info = Vec::with_capacity(hashes.len()); + for hash in hashes { + match fetch_block_info(ctx, relay_hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + relay_hash = ?hash, + "Failed to fetch info for hash returned from ancestry.", + ); + + // Return, however far we got. + return Ok(block_info) + }, + Some(info) => { + block_info.push(info); + }, + } + } + + Ok(block_info) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_block_info( + ctx: &mut Context, + relay_hash: Hash, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + + ctx.send_message(ChainApiMessage::BlockHeader(relay_hash, tx)).await; + let header = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + Ok(header.map(|header| RelayChainBlockInfo { + hash: relay_hash, + number: header.number, + storage_root: header.state_root, + })) +} + +#[derive(Clone)] +struct MetricsInner; + +/// Prospective parachain metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 6f5fdc5d4657..4df2206a9e76 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -20,12 +20,15 @@ use memory_lru::{MemoryLruCache, ResidentSize}; use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; use sp_consensus_babe::Epoch; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Hash, Id as ParaId, - InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Hash, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging as vstaging_primitives, }; const AUTHORITIES_CACHE_SIZE: usize = 128 * 1024; @@ -49,6 +52,8 @@ const VALIDATION_CODE_HASH_CACHE_SIZE: usize = 64 * 1024; const VERSION_CACHE_SIZE: usize = 4 * 1024; const DISPUTES_CACHE_SIZE: usize = 64 * 1024; +const STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE: usize = 10 * 1024; + struct ResidentSizeOf(T); impl ResidentSize for ResidentSizeOf { @@ -115,6 +120,10 @@ pub(crate) struct RequestResultCache { (Hash, ParaId, OccupiedCoreAssumption), ResidentSizeOf>, >, + + staging_validity_constraints: + MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>, + version: MemoryLruCache>, disputes: MemoryLruCache< Hash, @@ -146,6 +155,11 @@ impl Default for RequestResultCache { on_chain_votes: MemoryLruCache::new(ON_CHAIN_VOTES_CACHE_SIZE), pvfs_require_precheck: MemoryLruCache::new(PVFS_REQUIRE_PRECHECK_SIZE), validation_code_hash: MemoryLruCache::new(VALIDATION_CODE_HASH_CACHE_SIZE), + + staging_validity_constraints: MemoryLruCache::new( + STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE, + ), + version: MemoryLruCache::new(VERSION_CACHE_SIZE), disputes: MemoryLruCache::new(DISPUTES_CACHE_SIZE), } @@ -406,6 +420,21 @@ impl RequestResultCache { self.validation_code_hash.insert(key, ResidentSizeOf(value)); } + pub(crate) fn staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + ) -> Option<&Option> { + self.staging_validity_constraints.get(&key).map(|v| &v.0) + } + + pub(crate) fn cache_staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + value: Option, + ) { + self.staging_validity_constraints.insert(key, ResidentSizeOf(value)); + } + pub(crate) fn version(&mut self, relay_parent: &Hash) -> Option<&u32> { self.version.get(&relay_parent).map(|v| &v.0) } @@ -462,6 +491,9 @@ pub(crate) enum RequestResult { // This is a request with side-effects and no result, hence (). SubmitPvfCheckStatement(Hash, PvfCheckStatement, ValidatorSignature, ()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), + + StagingValidityConstraints(Hash, ParaId, Option), + Version(Hash, u32), StagingDisputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 1e8908ebe544..dd924675fdb8 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -160,6 +160,11 @@ where ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), + + StagingValidityConstraints(relay_parent, para_id, constraints) => self + .requests_cache + .cache_staging_validity_constraints((relay_parent, para_id), constraints), + Version(relay_parent, version) => self.requests_cache.cache_version(relay_parent, version), StagingDisputes(relay_parent, disputes) => @@ -267,6 +272,9 @@ where .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), Request::StagingDisputes(sender) => query!(disputes(), sender).map(|sender| Request::StagingDisputes(sender)), + Request::StagingValidityConstraints(para, sender) => + query!(staging_validity_constraints(para), sender) + .map(|sender| Request::StagingValidityConstraints(para, sender)), } } @@ -521,5 +529,8 @@ where query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender), Request::StagingDisputes(sender) => query!(StagingDisputes, staging_get_disputes(), ver = 2, sender), + Request::StagingValidityConstraints(para, sender) => { + query!(StagingValidityConstraints, staging_validity_constraints(para), ver = 2, sender) + }, } } diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 4e75df100504..0762f364a732 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -19,12 +19,15 @@ use super::*; use ::test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfiguration}; use polkadot_node_subsystem_test_helpers::make_subsystem_context; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreState, DisputeState, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging, }; use sp_core::testing::TaskExecutor; use std::{ @@ -193,6 +196,10 @@ sp_api::mock_impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl BabeApi for MockRuntimeApi { diff --git a/node/overseer/src/dummy.rs b/node/overseer/src/dummy.rs index b4a97c3e6321..19d24fb82dfa 100644 --- a/node/overseer/src/dummy.rs +++ b/node/overseer/src/dummy.rs @@ -86,6 +86,7 @@ pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, SubsystemError, > @@ -127,6 +128,7 @@ pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>( Sub, Sub, Sub, + Sub, >, SubsystemError, > @@ -154,7 +156,8 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> - + Subsystem, SubsystemError>, + + Subsystem, SubsystemError> + + Subsystem, SubsystemError>, { let metrics = ::register(registry)?; @@ -179,7 +182,8 @@ where .gossip_support(subsystem.clone()) .dispute_coordinator(subsystem.clone()) .dispute_distribution(subsystem.clone()) - .chain_selection(subsystem) + .chain_selection(subsystem.clone()) + .prospective_parachains(subsystem.clone()) .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index bcf486d2a0db..21cd09aee03d 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -83,8 +83,8 @@ use polkadot_node_subsystem_types::messages::{ BitfieldSigningMessage, CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, - NetworkBridgeMessage, ProvisionerMessage, PvfCheckerMessage, RuntimeApiMessage, - StatementDistributionMessage, + NetworkBridgeMessage, ProspectiveParachainsMessage, ProvisionerMessage, PvfCheckerMessage, + RuntimeApiMessage, StatementDistributionMessage, }; pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, @@ -564,6 +564,12 @@ pub struct Overseer { #[subsystem(blocking, ChainSelectionMessage, sends: [ChainApiMessage])] chain_selection: ChainSelection, + #[subsystem(ProspectiveParachainsMessage, sends: [ + RuntimeApiMessage, + ChainApiMessage, + ])] + prospective_parachains: ProspectiveParachains, + /// External listeners waiting for a hash to be in the active-leave set. pub activation_external_listeners: HashMap>>>, diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 9fb030140191..ab7303297aea 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -29,7 +29,7 @@ use polkadot_node_subsystem_types::{ ActivatedLeaf, LeafStatus, }; use polkadot_primitives::v2::{ - CandidateHash, CandidateReceipt, CollatorPair, InvalidDisputeStatementKind, + CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, ValidDisputeStatementKind, ValidatorIndex, }; @@ -910,10 +910,17 @@ fn test_chain_selection_msg() -> ChainSelectionMessage { ChainSelectionMessage::Approved(Default::default()) } +fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { + ProspectiveParachainsMessage::CandidateBacked( + ParaId::from(5), + CandidateHash(Hash::repeat_byte(0)), + ) +} + // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { - const NUM_SUBSYSTEMS: usize = 21; + const NUM_SUBSYSTEMS: usize = 22; // -4 for BitfieldSigning, GossipSupport, AvailabilityDistribution and PvfCheckerSubsystem. const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 4; @@ -998,6 +1005,9 @@ fn overseer_all_subsystems_receive_signals_and_messages() { handle .send_msg_anon(AllMessages::ChainSelection(test_chain_selection_msg())) .await; + handle + .send_msg_anon(AllMessages::ProspectiveParachains(test_prospective_parachains_msg())) + .await; // handle.send_msg_anon(AllMessages::PvfChecker(test_pvf_checker_msg())).await; // Wait until all subsystems have received. Otherwise the messages might race against @@ -1053,6 +1063,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (dispute_distribution_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (chain_selection_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (pvf_checker_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); + let (prospective_parachains_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (candidate_validation_unbounded_tx, _) = metered::unbounded(); let (candidate_backing_unbounded_tx, _) = metered::unbounded(); @@ -1075,6 +1086,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (dispute_distribution_unbounded_tx, _) = metered::unbounded(); let (chain_selection_unbounded_tx, _) = metered::unbounded(); let (pvf_checker_unbounded_tx, _) = metered::unbounded(); + let (prospective_parachains_unbounded_tx, _) = metered::unbounded(); let channels_out = ChannelsOut { candidate_validation: candidate_validation_bounded_tx.clone(), @@ -1098,6 +1110,7 @@ fn context_holds_onto_message_until_enough_signals_received() { dispute_distribution: dispute_distribution_bounded_tx.clone(), chain_selection: chain_selection_bounded_tx.clone(), pvf_checker: pvf_checker_bounded_tx.clone(), + prospective_parachains: prospective_parachains_bounded_tx.clone(), candidate_validation_unbounded: candidate_validation_unbounded_tx.clone(), candidate_backing_unbounded: candidate_backing_unbounded_tx.clone(), @@ -1120,6 +1133,7 @@ fn context_holds_onto_message_until_enough_signals_received() { dispute_distribution_unbounded: dispute_distribution_unbounded_tx.clone(), chain_selection_unbounded: chain_selection_unbounded_tx.clone(), pvf_checker_unbounded: pvf_checker_unbounded_tx.clone(), + prospective_parachains_unbounded: prospective_parachains_unbounded_tx.clone(), }; let (mut signal_tx, signal_rx) = metered::channel(CHANNEL_CAPACITY); diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index bb3d9e840f1c..e0dce76b9393 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -178,6 +178,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( DisputeCoordinatorSubsystem, DisputeDistributionSubsystem, ChainSelectionSubsystem, + polkadot_overseer::DummySubsystem, // TODO [now]: use real prospective parachains >, Error, > @@ -291,6 +292,7 @@ where Metrics::register(registry)?, )) .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) + .prospective_parachains(polkadot_overseer::DummySubsystem) .leaves(Vec::from_iter( leaves .into_iter() diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index db74ab11cd4d..db2bd89286b7 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -38,14 +38,17 @@ use polkadot_node_primitives::{ CollationSecondedSignal, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, ValidationResult, }; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, - DisputeState, GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, - InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, SessionIndex, SessionInfo, - SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, + CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, + DisputeState, GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, SessionIndex, + SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging as vstaging_primitives, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -697,6 +700,9 @@ pub enum RuntimeApiRequest { StagingDisputes( RuntimeApiSender)>>, ), + /// Get the validity constraints of the given para. + /// This is a staging API that will not be available on production runtimes. + StagingValidityConstraints(ParaId, RuntimeApiSender>), } /// A message to the Runtime API subsystem. @@ -933,3 +939,64 @@ pub enum GossipSupportMessage { /// Currently non-instantiable. #[derive(Debug)] pub enum PvfCheckerMessage {} + +/// A request for the depths a hypothetical candidate would occupy within +/// some fragment tree. +#[derive(Debug)] +pub struct HypotheticalDepthRequest { + /// The hash of the potential candidate. + pub candidate_hash: CandidateHash, + /// The para of the candidate. + pub candidate_para: ParaId, + /// The hash of the parent head-data of the candidate. + pub parent_head_data_hash: Hash, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// The relay-parent of the fragment tree we are comparing to. + pub fragment_tree_relay_parent: Hash, +} + +/// Indicates the relay-parents whose fragment tree a candidate +/// is present in and the depths of that tree the candidate is present in. +pub type FragmentTreeMembership = Vec<(Hash, Vec)>; + +/// Messages sent to the Prospective Parachains subsystem. +#[derive(Debug)] +pub enum ProspectiveParachainsMessage { + /// Inform the Prospective Parachains Subsystem of a new candidate. + /// + /// The response sender accepts the candidate membership, which is empty + /// if the candidate was already known. + CandidateSeconded( + ParaId, + CommittedCandidateReceipt, + PersistedValidationData, + oneshot::Sender, + ), + /// Inform the Prospective Parachains Subsystem that a previously seconded candidate + /// has been backed. This requires that `CandidateSeconded` was sent for the candidate + /// some time in the past. + CandidateBacked(ParaId, CandidateHash), + /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, + /// which is a descendant of the given candidate hashes. Returns `None` on the channel + /// if no such candidate exists. + GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), + /// Get the hypothetical depths that a candidate with the given properties would + /// occupy in the fragment tree for the given relay-parent. + /// + /// If the candidate is already known, this returns the depths the candidate + /// occupies. + /// + /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent + /// is unknown. + GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), + /// Get the membership of the candidate in all fragment trees. + GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), + /// Get the minimum accepted relay-parent number in the fragment tree + /// for the given relay-parent and para-id. + /// + /// That is, if the relay-parent is known and there's a fragment tree for it, + /// in this para-id, this returns the minimum relay-parent block number in the + /// same chain which is accepted in the fragment tree for the para-id. + GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>), +} diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index e886a9a0ff22..60eecb9b5180 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -114,8 +114,9 @@ //! in practice at most once every few weeks. use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, - PersistedValidationData, UpgradeRestriction, ValidationCodeHash, + BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, + Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, }; use std::collections::HashMap; @@ -169,6 +170,40 @@ pub struct Constraints { pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, } +impl From for Constraints { + fn from(c: PrimitiveConstraints) -> Self { + Constraints { + min_relay_parent_number: c.min_relay_parent_number, + max_pov_size: c.max_pov_size as _, + max_code_size: c.max_code_size as _, + ump_remaining: c.ump_remaining as _, + ump_remaining_bytes: c.ump_remaining_bytes as _, + dmp_remaining_messages: c.dmp_remaining_messages as _, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: c.hrmp_inbound.valid_watermarks, + }, + hrmp_channels_out: c + .hrmp_channels_out + .into_iter() + .map(|(para_id, limits)| { + ( + para_id, + OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + }, + ) + }) + .collect(), + max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, + required_parent: c.required_parent, + validation_code_hash: c.validation_code_hash, + upgrade_restriction: c.upgrade_restriction, + future_validation_code: c.future_validation_code, + } + } +} + /// Kinds of errors that can occur when modifying constraints. #[derive(Debug, Clone, PartialEq)] pub enum ModificationError { @@ -225,7 +260,8 @@ impl Constraints { &self, modifications: &ConstraintModifications, ) -> Result<(), ModificationError> { - if let Some(hrmp_watermark) = modifications.hrmp_watermark { + if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { + // head updates are always valid. if self .hrmp_inbound .valid_watermarks @@ -300,12 +336,22 @@ impl Constraints { new.required_parent = required_parent.clone(); } - if let Some(hrmp_watermark) = modifications.hrmp_watermark { - match new.hrmp_inbound.valid_watermarks.iter().position(|w| w == &hrmp_watermark) { - Some(pos) => { + if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { + Ok(pos) => { + // Exact match, so this is OK in all cases. let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); }, - None => return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), + Err(pos) => match hrmp_watermark { + HrmpWatermarkUpdate::Head(_) => { + // Updates to Head are always OK. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); + }, + HrmpWatermarkUpdate::Trunk(n) => { + // Trunk update landing on disallowed watermark is not OK. + return Err(ModificationError::DisallowedHrmpWatermark(*n)) + }, + }, } } @@ -388,13 +434,33 @@ pub struct OutboundHrmpChannelModification { pub messages_submitted: usize, } +/// An update to the HRMP Watermark. +#[derive(Debug, Clone, PartialEq)] +pub enum HrmpWatermarkUpdate { + /// This is an update placing the watermark at the head of the chain, + /// which is always legal. + Head(BlockNumber), + /// This is an update placing the watermark behind the head of the + /// chain, which is only legal if it lands on a block where messages + /// were queued. + Trunk(BlockNumber), +} + +impl HrmpWatermarkUpdate { + fn watermark(&self) -> BlockNumber { + match *self { + HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, + } + } +} + /// Modifications to constraints as a result of prospective candidates. #[derive(Debug, Clone, PartialEq)] pub struct ConstraintModifications { /// The required parent head to build upon. pub required_parent: Option, /// The new HRMP watermark - pub hrmp_watermark: Option, + pub hrmp_watermark: Option, /// Outbound HRMP channel modifications. pub outbound_hrmp: HashMap, /// The amount of UMP messages sent. @@ -546,7 +612,13 @@ impl Fragment { let commitments = &candidate.commitments; ConstraintModifications { required_parent: Some(commitments.head_data.clone()), - hrmp_watermark: Some(commitments.hrmp_watermark), + hrmp_watermark: Some({ + if commitments.hrmp_watermark == relay_parent.number { + HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) + } else { + HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) + } + }), outbound_hrmp: { let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); @@ -843,10 +915,10 @@ mod tests { } #[test] - fn constraints_disallowed_watermark() { + fn constraints_disallowed_trunk_watermark() { let constraints = make_constraints(); let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(7); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); assert_eq!( constraints.check_modifications(&modifications), @@ -859,6 +931,18 @@ mod tests { ); } + #[test] + fn constraints_always_allow_head_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); + + assert!(constraints.check_modifications(&modifications).is_ok()); + + let new_constraints = constraints.apply_modifications(&modifications).unwrap(); + assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); + } + #[test] fn constraints_no_such_hrmp_channel() { let constraints = make_constraints(); diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index 84d2cf0ec4ca..fe695336eb82 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -44,7 +44,7 @@ //! For more details about how the API versioning works refer to `spi_api` //! documentation [here](https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html). -use crate::v2; +use crate::{v2, vstaging}; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; use polkadot_parachain::primitives as ppp; @@ -155,5 +155,9 @@ sp_api::decl_runtime_apis! { /// Returns all onchain disputes. /// This is a staging method! Do not use on production runtimes! fn staging_get_disputes() -> Vec<(v2::SessionIndex, v2::CandidateHash, v2::DisputeState)>; + + /// Returns the base constraints of the given para, if they exist. + /// This is a staging method! Do not use on production runtimes! + fn staging_validity_constraints(_: ppp::Id) -> Option; } } diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index 649ffd2f375e..0a9de44480f4 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -1129,6 +1129,7 @@ pub struct AbridgedHrmpChannel { /// A possible upgrade restriction that prevents a parachain from performing an upgrade. #[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum UpgradeRestriction { /// There is an upgrade restriction and there are no details about its specifics nor how long /// it could last. diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index c6dd4d1bb76a..87cf8c8ba85c 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -18,3 +18,67 @@ // Put any primitives used by staging API functions here pub use crate::v2::*; +use sp_std::prelude::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +#[cfg(feature = "std")] +use parity_util_mem::MallocSizeOf; + +/// Useful type alias for Para IDs. +pub type ParaId = Id; + +/// Constraints on inbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: u32, + /// The maximum messages that can be written to the channel. + pub messages_remaining: u32, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: u32, + /// The amount of UMP messages remaining. + pub ump_remaining: u32, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: u32, + /// The amount of remaining DMP messages. + pub dmp_remaining_messages: u32, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: u32, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 935863d683ad..35247456ecea 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1849,6 +1849,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 78d2862e91bd..6c6e854839e0 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1730,6 +1730,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 2b98e31dd497..9ae752fc6388 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1261,6 +1261,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl fg_primitives::GrandpaApi for Runtime { diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 783c1801a8e9..9311e90e7122 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -908,6 +908,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { polkadot_runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index cd266a91667a..aff9c6b4b242 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1375,6 +1375,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime {