-
Notifications
You must be signed in to change notification settings - Fork 711
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
rewrite first test using zombienet-sdk
- Loading branch information
Showing
6 changed files
with
284 additions
and
52 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
// Copyright (C) Parity Technologies (UK) Ltd. | ||
// SPDX-License-Identifier: Apache-2.0 | ||
|
||
#[cfg(feature = "zombie-metadata")] | ||
mod slot_based_3cores; |
218 changes: 218 additions & 0 deletions
218
polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,218 @@ | ||
// Copyright (C) Parity Technologies (UK) Ltd. | ||
// SPDX-License-Identifier: Apache-2.0 | ||
|
||
// Test that parachains that use a single slot-based collator with elastic scaling MVP and with | ||
// elastic scaling with RFC103 can achieve full throughput of 3 candidates per block. | ||
|
||
use anyhow::anyhow; | ||
#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] | ||
pub mod rococo {} | ||
|
||
#[subxt::subxt( | ||
runtime_metadata_path = "metadata-files/cumulus-test-runtime-elastic-scaling-local.scale" | ||
)] | ||
mod elastic_scaling_para {} | ||
|
||
#[subxt::subxt( | ||
runtime_metadata_path = "metadata-files/cumulus-test-runtime-elastic-scaling-mvp-local.scale" | ||
)] | ||
mod elastic_scaling_mvp_para {} | ||
|
||
use rococo::runtime_types::{ | ||
pallet_broker::coretime_interface::CoreAssignment, | ||
polkadot_runtime_parachains::assigner_coretime::PartsOf57600, | ||
}; | ||
use serde_json::json; | ||
use std::collections::HashMap; | ||
use subxt::{OnlineClient, PolkadotConfig}; | ||
use subxt_signer::sr25519::dev; | ||
use zombienet_sdk::NetworkConfigBuilder; | ||
|
||
use rococo::{self as rococo_api}; | ||
|
||
#[tokio::test(flavor = "multi_thread")] | ||
async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { | ||
env_logger::init_from_env( | ||
env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), | ||
); | ||
|
||
let images = zombienet_sdk::environment::get_images_from_env(); | ||
|
||
let config = NetworkConfigBuilder::new() | ||
.with_relaychain(|r| { | ||
let r = r | ||
.with_chain("rococo-local") | ||
.with_default_command("polkadot") | ||
.with_default_image(images.polkadot.as_str()) | ||
.with_genesis_overrides(json!({ | ||
"configuration": { | ||
"config": { | ||
"scheduler_params": { | ||
// Num cores is 4, because 2 extra will be added automatically when registering the paras. | ||
"num_cores": 4, | ||
"max_validators_per_core": 1 | ||
}, | ||
"async_backing_params": { | ||
"max_candidate_depth": 6, | ||
"allowed_ancestry_len": 2 | ||
} | ||
} | ||
} | ||
})) | ||
// Have to set a `with_node` outside of the loop below, so that `r` has the right | ||
// type. | ||
.with_node(|node| node.with_name(&format!("validator-0"))); | ||
|
||
(1..6) | ||
.into_iter() | ||
.fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) | ||
}) | ||
.with_parachain(|p| { | ||
// Para 2100 uses the old elastic scaling mvp, which doesn't send the new UMP signal | ||
// commitment for selecting the core index. | ||
p.with_id(2100) | ||
.with_default_command("test-parachain") | ||
.with_default_image(images.cumulus.as_str()) | ||
.with_chain("elastic-scaling-mvp") | ||
.with_default_args(vec![("--experimental-use-slot-based").into()]) | ||
.with_collator(|n| n.with_name("collator-elastic-mvp")) | ||
}) | ||
.with_parachain(|p| { | ||
// Para 2200 uses the new RFC103-enabled collator which sens the UMP signal commitment | ||
// for selecting the core index | ||
p.with_id(2200) | ||
.with_default_command("test-parachain") | ||
.with_default_image(images.cumulus.as_str()) | ||
.with_chain("elastic-scaling") | ||
.with_default_args(vec![("--experimental-use-slot-based").into()]) | ||
.with_collator(|n| n.with_name("collator-elastic")) | ||
}) | ||
.build() | ||
.map_err(|e| { | ||
let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" "); | ||
anyhow!("config errs: {errs}") | ||
})?; | ||
|
||
let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); | ||
let network = spawn_fn(config).await?; | ||
|
||
let relay_node = network.get_node("validator-0")?; | ||
|
||
let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?; | ||
let alice = dev::alice(); | ||
|
||
// Assign two extra cores to each parachain. | ||
relay_client | ||
.tx() | ||
.sign_and_submit_then_watch_default( | ||
&rococo_api::tx().sudo().sudo( | ||
rococo::runtime_types::rococo_runtime::RuntimeCall::Utility( | ||
rococo::runtime_types::pallet_utility::pallet::Call::batch { | ||
calls: vec![ | ||
rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( | ||
rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { | ||
core: 0, | ||
begin: 0, | ||
assignment: vec![(CoreAssignment::Task(2100), PartsOf57600(57600))], | ||
end_hint: None | ||
} | ||
), | ||
rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( | ||
rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { | ||
core: 1, | ||
begin: 0, | ||
assignment: vec![(CoreAssignment::Task(2100), PartsOf57600(57600))], | ||
end_hint: None | ||
} | ||
), | ||
rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( | ||
rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { | ||
core: 2, | ||
begin: 0, | ||
assignment: vec![(CoreAssignment::Task(2200), PartsOf57600(57600))], | ||
end_hint: None | ||
} | ||
), | ||
rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( | ||
rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { | ||
core: 3, | ||
begin: 0, | ||
assignment: vec![(CoreAssignment::Task(2200), PartsOf57600(57600))], | ||
end_hint: None | ||
} | ||
) | ||
], | ||
}, | ||
), | ||
), | ||
&alice, | ||
) | ||
.await? | ||
.wait_for_finalized_success() | ||
.await?; | ||
|
||
log::info!("2 more cores assigned to each parachain"); | ||
|
||
// Expect a backed candidate count of 40 for each parachain in 15 relay chain blocks (2.66 | ||
// candidates per para per relay chain block). | ||
// Note that only blocks after the first session change and blocks that don't contain a session | ||
// change will be counted. | ||
assert_para_throughput(&relay_client, 15, [(2100, 40), (2200, 40)].into_iter().collect()) | ||
.await?; | ||
|
||
log::info!("Test finished successfully"); | ||
|
||
Ok(()) | ||
} | ||
|
||
async fn assert_para_throughput( | ||
relay_client: &OnlineClient<PolkadotConfig>, | ||
stop_at: u32, | ||
expected_candidate_counts: HashMap<u32, u32>, | ||
) -> Result<(), anyhow::Error> { | ||
let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; | ||
let mut candidate_count: HashMap<u32, u32> = HashMap::new(); | ||
let mut current_block_count = 0; | ||
let mut had_first_session_change = false; | ||
|
||
while let Some(block) = blocks_sub.next().await { | ||
let block = block?; | ||
log::debug!("Finalized relay chain block {}", block.number()); | ||
let events = block.events().await?; | ||
let is_session_change = events.has::<rococo::session::events::NewSession>()?; | ||
|
||
if !had_first_session_change && is_session_change { | ||
had_first_session_change = true; | ||
} | ||
|
||
if had_first_session_change && !is_session_change { | ||
current_block_count += 1; | ||
|
||
for event in events.find::<rococo::para_inclusion::events::CandidateBacked>() { | ||
*(candidate_count.entry(event?.0.descriptor.para_id.0).or_default()) += 1; | ||
} | ||
} | ||
|
||
if current_block_count == stop_at { | ||
break; | ||
} | ||
} | ||
|
||
log::info!( | ||
"Reached {} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", | ||
stop_at, | ||
candidate_count | ||
); | ||
|
||
for (para_id, expected_candidate_count) in expected_candidate_counts { | ||
let actual = *candidate_count | ||
.get(¶_id) | ||
.expect("ParaId did not have any backed candidates"); | ||
assert!( | ||
actual >= expected_candidate_count, | ||
"Expected {expected_candidate_count} lower than actual {actual}" | ||
); | ||
} | ||
|
||
Ok(()) | ||
} |
Oops, something went wrong.