diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index fafe84a6c32..c50cca29682 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -28,7 +28,12 @@ use crate::address::{ C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; -use crate::consts::MICROSTACKS_PER_STACKS; +use crate::consts::{ + MICROSTACKS_PER_STACKS, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, + PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, + PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, + PEER_VERSION_EPOCH_3_1, PEER_VERSION_EPOCH_3_2, PEER_VERSION_EPOCH_3_3, +}; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; @@ -95,9 +100,23 @@ pub const MINING_COMMITMENT_WINDOW: u8 = 6; // Only relevant for Nakamoto (epoch 3.x) pub const MINING_COMMITMENT_FREQUENCY_NAKAMOTO: u8 = 3; -#[repr(u32)] -#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Copy, Serialize, Deserialize)] -pub enum StacksEpochId { +macro_rules! define_stacks_epochs { + ($($variant:ident = $value:expr),* $(,)?) => { + #[repr(u32)] + #[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)] + pub enum StacksEpochId { + $($variant = $value),* + } + + impl StacksEpochId { + pub const ALL: &'static [StacksEpochId] = &[ + $(StacksEpochId::$variant),* + ]; + } + }; +} + +define_stacks_epochs! { Epoch10 = 0x01000, Epoch20 = 0x02000, Epoch2_05 = 0x02005, @@ -112,6 +131,26 @@ pub enum StacksEpochId { Epoch33 = 0x03003, } +impl StacksEpochId { + /// Return the network epoch associated with the StacksEpochId + pub fn network_epoch(epoch: StacksEpochId) -> u8 { + match epoch { + StacksEpochId::Epoch10 => PEER_VERSION_EPOCH_1_0, + StacksEpochId::Epoch20 => PEER_VERSION_EPOCH_2_0, + StacksEpochId::Epoch2_05 => PEER_VERSION_EPOCH_2_05, + StacksEpochId::Epoch21 => PEER_VERSION_EPOCH_2_1, + StacksEpochId::Epoch22 => PEER_VERSION_EPOCH_2_2, + StacksEpochId::Epoch23 => PEER_VERSION_EPOCH_2_3, + StacksEpochId::Epoch24 => PEER_VERSION_EPOCH_2_4, + StacksEpochId::Epoch25 => PEER_VERSION_EPOCH_2_5, + StacksEpochId::Epoch30 => PEER_VERSION_EPOCH_3_0, + StacksEpochId::Epoch31 => PEER_VERSION_EPOCH_3_1, + StacksEpochId::Epoch32 => PEER_VERSION_EPOCH_3_2, + StacksEpochId::Epoch33 => PEER_VERSION_EPOCH_3_3, + } + } +} + #[derive(Debug)] pub enum MempoolCollectionBehavior { ByStacksHeight, @@ -447,13 +486,6 @@ impl StacksEpochId { StacksEpochId::Epoch33 } - pub const ALL_GTE_30: &'static [StacksEpochId] = &[ - StacksEpochId::Epoch30, - StacksEpochId::Epoch31, - StacksEpochId::Epoch32, - StacksEpochId::Epoch33, - ]; - /// In this epoch, how should the mempool perform garbage collection? pub fn mempool_garbage_behavior(&self) -> MempoolCollectionBehavior { match self { diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index f25b2e5fc20..26f22dab0dd 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -1732,6 +1732,16 @@ impl< Ok(None) } + /// A helper function for exposing the private process_new_pox_anchor_test function + #[cfg(test)] + pub fn process_new_pox_anchor_test( + &mut self, + block_id: BlockHeaderHash, + already_processed_burn_blocks: &mut HashSet, + ) -> Result, Error> { + self.process_new_pox_anchor(block_id, already_processed_burn_blocks) + } + /// Process a new PoX anchor block, possibly resulting in the PoX history being unwound and /// replayed through a different sequence of consensus hashes. If the new anchor block causes /// the node to reach a prepare-phase that elects a network-affirmed anchor block that we don't diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index ce736616418..a154c330623 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -363,28 +363,13 @@ pub fn load_nakamoto_reward_set( provider: &U, ) -> Result, Error> { let cycle_start_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); - - let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), cycle_start_height)? - .unwrap_or_else(|| { - panic!( - "FATAL: no epoch defined for burn height {}", - cycle_start_height - ) - }); - - // Find the first Stacks block in this reward cycle's preceding prepare phase. - // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. - // Note that we may not have processed it yet. But, if we do find it, then it's - // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block - // cannot change later). - let first_epoch30_reward_cycle = burnchain - .block_height_to_reward_cycle(epoch_at_height.start_height) - .expect("FATAL: no reward cycle for epoch 3.0 start height"); - - if !epoch_at_height - .epoch_id - .uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) - { + let prepare_phase_start_height = + cycle_start_height.saturating_sub(u64::from(burnchain.pox_constants.prepare_length)); + let epoch_at_height = + SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_phase_start_height)?.unwrap_or_else( + || panic!("FATAL: no epoch defined for burn height {prepare_phase_start_height}"), + ); + if epoch_at_height.epoch_id < StacksEpochId::Epoch30 { // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. @@ -392,7 +377,7 @@ pub fn load_nakamoto_reward_set( get_ancestor_sort_id(&sort_db.index_conn(), cycle_start_height, sortition_tip)? else { // reward cycle is too far in the future - warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height, sortition_tip); + warn!("Requested reward cycle start ancestor sortition ID for cycle {reward_cycle} prepare-end height {cycle_start_height}, but tip is {sortition_tip}"); return Ok(None); }; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 24e813f1e05..e7094c9499b 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -942,7 +942,7 @@ fn block_descendant() { pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; - pox_constants.pox_4_activation_height = 28; + pox_constants.pox_4_activation_height = 33; let mut boot_plan = NakamotoBootPlan::new(function_name!()) .with_test_stackers(test_stackers) @@ -1031,7 +1031,7 @@ fn block_info_tests(use_primary_testnet: bool) { pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; - pox_constants.pox_4_activation_height = 28; + pox_constants.pox_4_activation_height = 33; let chain_id = if use_primary_testnet { CHAIN_ID_TESTNET @@ -1466,7 +1466,7 @@ fn pox_treatment() { pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; - pox_constants.pox_4_activation_height = 28; + pox_constants.pox_4_activation_height = 33; let mut boot_plan = NakamotoBootPlan::new(function_name!()) .with_test_stackers(test_stackers.clone()) @@ -1719,7 +1719,7 @@ fn transactions_indexing() { pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; - pox_constants.pox_4_activation_height = 28; + pox_constants.pox_4_activation_height = 33; let mut boot_plan = NakamotoBootPlan::new(function_name!()) .with_test_stackers(test_stackers.clone()) @@ -1784,7 +1784,7 @@ fn transactions_not_indexing() { pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; - pox_constants.pox_4_activation_height = 28; + pox_constants.pox_4_activation_height = 33; let mut boot_plan = NakamotoBootPlan::new(function_name!()) .with_test_stackers(test_stackers.clone()) @@ -3897,7 +3897,7 @@ fn process_next_nakamoto_block_deadlock() { pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; - pox_constants.pox_4_activation_height = 28; + pox_constants.pox_4_activation_height = 33; let mut boot_plan = NakamotoBootPlan::new(function_name!()) .with_test_stackers(test_stackers) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 9bb0aef5de9..c6836446231 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1064,6 +1064,75 @@ impl TestStacksNode { Ok((block, size, cost)) } + /// Insert a staging pre-Nakamoto block and microblocks + /// then process them as the next ready block + /// NOTE: Will panic if called with unprocessed staging + /// blocks already in the queue. + pub fn process_pre_nakamoto_next_ready_block<'a>( + stacks_node: &mut TestStacksNode, + sortdb: &mut SortitionDB, + miner: &mut TestMiner, + tenure_id_consensus_hash: &ConsensusHash, + coord: &mut ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider<'a, TestEventObserver>, + (), + (), + BitcoinIndexer, + >, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) -> Result, ChainstateError> { + // First append the block to the staging blocks + { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + stacks_node + .chainstate + .preprocess_stacks_epoch(&ic, &tip, block, microblocks) + .unwrap(); + } + + let canonical_sortition_tip = coord.canonical_sortition_tip.clone().expect( + "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", + ); + let mut sort_tx = sortdb.tx_begin_at_tip(); + let res = stacks_node + .chainstate + .process_next_staging_block(&mut sort_tx, coord.dispatcher) + .map(|(epoch_receipt, _)| epoch_receipt)?; + sort_tx.commit()?; + if let Some(block_receipt) = res.as_ref() { + let in_sortition_set = coord + .sortition_db + .is_stacks_block_in_sortition_set( + &canonical_sortition_tip, + &block_receipt.header.anchored_header.block_hash(), + ) + .unwrap(); + if in_sortition_set { + let block_hash = block_receipt.header.anchored_header.block_hash(); + // Was this block sufficiently confirmed by the prepare phase that it was a PoX + // anchor block? And if we're in epoch 2.1, does it match the heaviest-confirmed + // block-commit in the burnchain DB, and is it affirmed by the majority of the + // network? + if let Some(pox_anchor) = coord + .sortition_db + .is_stacks_block_pox_anchor(&block_hash, &canonical_sortition_tip) + .unwrap() + { + debug!("Discovered PoX anchor block {block_hash} off of canonical sortition tip {canonical_sortition_tip}"); + coord + .process_new_pox_anchor_test(pox_anchor, &mut HashSet::new()) + .unwrap(); + } + } + } + Ok(res) + } + /// Insert a staging Nakamoto block as a pushed block and /// then process it as the next ready block /// NOTE: Will panic if called with unprocessed staging diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 55cc74d53cc..8e71d159813 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -17,14 +17,11 @@ use std::sync::LazyLock; use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; -use clarity::consts::{ - CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, - PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, - PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_EPOCH_3_2, - PEER_VERSION_EPOCH_3_3, STACKS_EPOCH_MAX, +use clarity::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; +use clarity::types::chainstate::{ + StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, }; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash}; -use clarity::types::{StacksEpoch, StacksEpochId}; +use clarity::types::{EpochList, StacksEpoch, StacksEpochId}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; @@ -41,15 +38,14 @@ use crate::chainstate::stacks::db::{ClarityTx, StacksChainState, StacksEpochRece use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::tests::TestStacksNode; use crate::chainstate::stacks::{ - Error as ChainstateError, StacksTransaction, TenureChangeCause, TransactionContractCall, - TransactionPayload, TransactionSmartContract, MINER_BLOCK_CONSENSUS_HASH, - MINER_BLOCK_HEADER_HASH, + Error as ChainstateError, StacksTransaction, TransactionContractCall, TransactionPayload, + TransactionSmartContract, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::chainstate::tests::TestChainstate; use crate::core::test_util::{ make_contract_call, make_contract_publish_versioned, make_stacks_transfer_tx, to_addr, }; -use crate::core::{EpochList, BLOCK_LIMIT_MAINNET_21}; +use crate::core::BLOCK_LIMIT_MAINNET_21; use crate::net::tests::NakamotoBootPlan; /// The epochs to test for consensus are the current and upcoming epochs. @@ -94,993 +90,1292 @@ const fn clarity_versions_for_epoch(epoch: StacksEpochId) -> &'static [ClarityVe } } -/// A high-level test harness for running consensus-critical smart contract tests. -/// -/// This struct combines a [`ConsensusTest`] instance for chainstate management and a -/// [`TestTxFactory`] for transaction generation. It provides convenience methods to -/// automate test scenarios involving contract deployments and calls across multiple -/// epochs and Clarity versions. -struct ContractConsensusTest<'a> { - tx_factory: TestTxFactory, - consensus_test: ConsensusTest<'a>, +/// Custom serializer for `Option` to improve snapshot readability. +/// This avoids large diffs in snapshots due to code body changes and focuses on key fields. +fn serialize_opt_tx_payload( + value: &Option, + serializer: S, +) -> Result +where + S: Serializer, +{ + let changed = match value { + None => "BitcoinTx".to_string(), + Some(TransactionPayload::TokenTransfer(sender, amount, memo)) => { + format!("TokenTransfer(from: {sender}, amount: {amount}, memo: {memo})") + } + Some(TransactionPayload::SmartContract( + TransactionSmartContract { name, code_body }, + clarity_version, + )) => { + format!("SmartContract(name: {name}, code_body: [..], clarity_version: {clarity_version:?})") + } + Some(TransactionPayload::ContractCall(TransactionContractCall { + address, + contract_name, + function_name, + function_args, + })) => { + format!("ContractCall(address: {address}, contract_name: {contract_name}, function_name: {function_name}, function_args: [{function_args:?}])") + } + Some(payload) => { + format!("{payload:?}") + } + }; + serializer.serialize_str(&changed) } -impl ContractConsensusTest<'_> { - /// Creates a new `ContractConsensusTest`. - pub fn new(test_name: &str) -> Self { - Self { - tx_factory: TestTxFactory::new(CHAIN_ID_TESTNET), - consensus_test: ConsensusTest::new(test_name, vec![]), - } - } +/// Serialize an optional string field appending a non-consensus breaking info message. +fn serialize_opt_string_ncb(value: &Option, serializer: S) -> Result +where + S: Serializer, +{ + let original = match value.as_deref() { + Some(str) => format!("Some({str})"), + None => "None".to_string(), + }; + let changed = format!("{original} [NON-CONSENSUS BREAKING]"); + serializer.serialize_str(&changed) +} - /// Generates and executes the given transaction in a new block. - /// Increases the nonce if the transaction succeeds. - fn append_tx_block(&mut self, tx_spec: &TestTxSpec) -> ExpectedResult { - let tx = self.tx_factory.generate_tx(tx_spec); - let block = TestBlock { - transactions: vec![tx], - }; +/// Represents the expected output of a transaction in a test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedTransactionOutput { + /// The transaction that was executed. + /// `None` for bitcoin transactions. + #[serde(serialize_with = "serialize_opt_tx_payload")] + pub tx: Option, + /// The possible Clarity VM error message associated to the transaction (non-consensus breaking) + #[serde(serialize_with = "serialize_opt_string_ncb")] + pub vm_error: Option, + /// The expected return value of the transaction. + pub return_type: ClarityValue, + /// The expected execution cost of the transaction. + pub cost: ExecutionCost, +} - let result = self.consensus_test.append_block(block); +/// Represents the expected outputs for a block's execution. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedBlockOutput { + /// The expected block marf + pub marf_hash: TrieHash, + /// The epoch in which the test block was expected to be evaluated + pub evaluated_epoch: StacksEpochId, + /// The expected outputs for each transaction, in input order. + pub transactions: Vec, + /// The total execution cost of the block. + pub total_block_cost: ExecutionCost, +} - if let ExpectedResult::Success(_) = result { - self.tx_factory.increase_nonce_for_tx(tx_spec); - } +/// Represents the expected result of a consensus test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ExpectedResult { + /// The test should succeed with the specified outputs. + Success(ExpectedBlockOutput), + /// The test should fail with an error matching the specified string + /// Cannot match on the exact Error directly as they do not implement + /// Serialize/Deserialize or PartialEq + Failure(String), +} - result +impl ExpectedResult { + fn create_from( + result: Result, + marf_hash: TrieHash, + ) -> Self { + match result { + Ok(epoch_receipt) => { + let transactions: Vec = epoch_receipt + .tx_receipts + .into_iter() + .map(|r| { + let tx = match r.transaction { + TransactionOrigin::Stacks(tx) => Some(tx.payload), + TransactionOrigin::Burn(..) => None, + }; + ExpectedTransactionOutput { + tx, + return_type: r.result, + cost: r.execution_cost, + vm_error: r.vm_error, + } + }) + .collect(); + ExpectedResult::Success(ExpectedBlockOutput { + marf_hash, + evaluated_epoch: epoch_receipt.evaluated_epoch, + transactions, + total_block_cost: epoch_receipt.anchored_block_cost, + }) + } + Err(e) => ExpectedResult::Failure(e.to_string()), + } } +} + +/// Represents a block to be appended in a test and its expected result. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct TestBlock { + /// Transactions to include in the block + pub transactions: Vec, +} + +/// Manages a `TestChainstate` tailored for consensus-rule verification. +/// +/// Initialises the chain with enough burn-chain blocks per epoch to run +/// the requested number of Stacks blocks per epoch. +/// +/// Provides high-level helpers for: +/// - Appending Nakamoto or pre-Nakamoto blocks +pub struct ConsensusChain<'a> { + pub test_chainstate: TestChainstate<'a>, +} - /// Executes a consensus test for a contract function across multiple Stacks epochs. +impl ConsensusChain<'_> { + /// Creates a new [`ConsensusChain`]. /// - /// This helper automates deploying a contract and invoking one of its public functions - /// across different epochs and Clarity versions, ensuring consistent consensus behavior. + /// # Arguments + /// + /// * `test_name` – identifier used for logging / snapshot names / database names + /// * `initial_balances` – `(principal, amount)` pairs that receive an initial STX balance + /// * `num_blocks_per_epoch` – how many **Stacks** blocks must fit into each epoch + /// + /// # Panics /// - /// # Behavior + /// * If `Epoch10` is requested (unsupported) + /// * If any requested epoch is given `0` blocks + pub fn new( + test_name: &str, + initial_balances: Vec<(PrincipalData, u64)>, + num_blocks_per_epoch: HashMap, + ) -> Self { + // Validate blocks + for (epoch_id, num_blocks) in &num_blocks_per_epoch { + assert_ne!( + *epoch_id, + StacksEpochId::Epoch10, + "Epoch10 is not supported" + ); + assert!( + *num_blocks > 0, + "Each epoch must have at least one block. {epoch_id} is empty" + ); + } + // Set up chainstate to support Naka. + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_pox_constants(7, 1) + .with_initial_balances(initial_balances) + .with_private_key(FAUCET_PRIV_KEY.clone()); + let (epochs, first_burnchain_height) = + Self::calculate_epochs(&boot_plan.pox_constants, num_blocks_per_epoch); + boot_plan = boot_plan.with_epochs(epochs); + let test_chainstate = boot_plan.to_chainstate(None, Some(first_burnchain_height)); + Self { test_chainstate } + } + + /// Calculates a valid [`EpochList`] and starting burnchain height for the test harness. /// - /// The function performs two main phases: - /// 1. **Deployment:** Deploys `contract_code` in each epoch listed in `deploy_epochs` for all - /// applicable Clarity versions. - /// 2. **Execution:** Calls `function_name` in each epoch listed in `call_epochs` on every - /// previously deployed contract. + /// The resulting EpochList satisfies the following: + /// - Each epoch has enough burnchain blocks to accommodate all test blocks. + /// - Epoch 2.5 → 3.0 transition satisfies the following constraints: + /// - 2.5 and 3.0 are in **different reward cycles**. + /// - 2.5 starts **before** the prepare phase of the cycle prior to 3.0 activation. + /// - 3.0 does not start on a reward cycle boundary. + /// - All epoch heights are contiguous and correctly ordered. /// - /// ## Example - /// If `deploy_epochs` = `[2.0, 3.0]` and `call_epochs` = `[3.1]`, the following sequence occurs: - /// - Deploy contract in epoch 2.0 with Clarity 1. - /// - Deploy contract in epoch 3.0 with Clarity 1, 2, and 3. - /// - Call the function in epoch 3.1 on all four deployed contracts. + /// The resulting [`EpochList`] is used to initialize the test chainstate with correct + /// epoch boundaries, enabling accurate simulation of epoch transitions and consensus rules. /// /// # Arguments /// - /// * `contract_name` - Base name for the contract. - /// * `contract_code` - Clarity source code of the contract. - /// * `function_name` - Public function to invoke. - /// * `function_args` - Arguments to pass to the function call. - /// * `deploy_epochs` - Epochs during which the contract should be deployed. - /// * `call_epochs` - Epochs during which the function should be executed. + /// * `pox_constants` - PoX configuration (reward cycle length, prepare phase, etc.). + /// * `num_blocks_per_epoch` - Map of epoch IDs to the number of test blocks to run in each. /// /// # Returns /// - /// A `Vec` with the outcome of each block for snapshot testing. + /// `(EpochList, first_burnchain_height)` — the epoch list and the burnchain + /// height at which the first Stacks block is mined. + fn calculate_epochs( + pox_constants: &PoxConstants, + num_blocks_per_epoch: HashMap, + ) -> (EpochList, u64) { + // Helper function to check if a height is at a reward cycle boundary + let is_reward_cycle_boundary = |height: u64, reward_cycle_length: u64| -> bool { + height % reward_cycle_length <= 1 // Covers both 0 (end of cycle) and 1 (start of cycle) + }; + + let first_burnchain_height = + (pox_constants.pox_4_activation_height + pox_constants.reward_cycle_length + 1) as u64; + info!("StacksEpoch calculate_epochs first_burn_height = {first_burnchain_height}"); + let reward_cycle_length = pox_constants.reward_cycle_length as u64; + let prepare_length = pox_constants.prepare_length as u64; + // Initialize heights + let mut epochs = vec![]; + let mut current_height = 0; + for epoch_id in StacksEpochId::ALL.iter() { + let start_height = current_height; + let end_height = match *epoch_id { + StacksEpochId::Epoch10 => first_burnchain_height, + StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { + // Use test vector block count + // Always add 1 so we can ensure we are fully in the epoch before we then execute + // the corresponding test blocks in their own blocks + let num_blocks = num_blocks_per_epoch + .get(epoch_id) + .map(|num_blocks| *num_blocks + 1) + .unwrap_or(0); + start_height + num_blocks + } + StacksEpochId::Epoch25 => { + // Calculate Epoch 2.5 end height and Epoch 3.0 start height. + // Epoch 2.5 must start before the prepare phase of the cycle prior to Epoch 3.0's activation. + // Epoch 2.5 end must equal Epoch 3.0 start + // Epoch 3.0 must not start at a cycle boundary + // Epoch 2.5 and 3.0 cannot be in the same reward cycle. + let num_blocks = num_blocks_per_epoch + .get(epoch_id) + .copied() + .unwrap_or(0) + .saturating_add(1); // Add one block for pox lockups. + + let epoch_25_start = current_height; + let epoch_30_start = epoch_25_start + num_blocks; + + let epoch_25_reward_cycle = epoch_25_start / reward_cycle_length; + let mut epoch_30_start = epoch_30_start; + let mut epoch_30_reward_cycle = epoch_30_start / reward_cycle_length; + // Ensure different reward cycles and Epoch 2.5 starts before prior cycle's prepare phase + let mut prior_cycle = epoch_30_reward_cycle.saturating_sub(1); + let mut prior_prepare_phase_start = + prior_cycle * reward_cycle_length + (reward_cycle_length - prepare_length); + while epoch_25_start + num_blocks >= prior_prepare_phase_start + || epoch_25_reward_cycle >= epoch_30_reward_cycle + || is_reward_cycle_boundary(epoch_30_start, reward_cycle_length) + { + // Advance to 3.0 start so it is not in a reward cycle boundary and to ensure + // 2.5 starts prior to the prepare phase of epoch 30 reward cycle activation + epoch_30_start += 1; + epoch_30_reward_cycle = epoch_30_start / reward_cycle_length; + prior_cycle = epoch_30_reward_cycle.saturating_sub(1); + prior_prepare_phase_start = prior_cycle * reward_cycle_length + + (reward_cycle_length - prepare_length); + } + current_height = epoch_30_start; + epoch_30_start // Epoch 2.5 ends where Epoch 3.0 starts + } + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 | StacksEpochId::Epoch32 => { + // Only need 1 block per Epoch + if num_blocks_per_epoch.contains_key(epoch_id) { + start_height + 1 + } else { + // If we don't care to have any blocks in this epoch + // don't bother giving it an epoch height + start_height + } + } + StacksEpochId::Epoch33 => { + // The last epoch extends to max + STACKS_EPOCH_MAX + } + }; + // Create epoch + let block_limit = if *epoch_id == StacksEpochId::Epoch10 { + ExecutionCost::max_value() + } else { + BLOCK_LIMIT_MAINNET_21.clone() + }; + let network_epoch = StacksEpochId::network_epoch(*epoch_id); + epochs.push(StacksEpoch { + epoch_id: *epoch_id, + start_height, + end_height, + block_limit, + network_epoch, + }); + current_height = end_height; + } + // Validate test vector block counts + for (epoch_id, num_blocks) in num_blocks_per_epoch { + let epoch = epochs + .iter() + .find(|e| e.epoch_id == epoch_id) + .expect("Epoch not found"); + let epoch_length = epoch.end_height - epoch.start_height; + if epoch_id > StacksEpochId::Epoch25 { + assert!( + epoch_length > 0, + "{epoch_id:?} must have at least 1 burn block." + ); + } else { + assert!( + epoch_length >= num_blocks, + "{epoch_id:?} must have at least {num_blocks} burn blocks, got {epoch_length}" + ); + } + } + let epoch_list = EpochList::new(&epochs); + info!("Calculated EpochList from pox constants with first burnchain height of {first_burnchain_height}."; + "epochs" => ?epoch_list, + "first_burnchain_height" => first_burnchain_height + ); + (epoch_list, first_burnchain_height) + } + + /// Appends a single block to the chain as a Nakamoto block and returns the result. /// - /// # Panics + /// This method takes a [`TestBlock`] containing a list of transactions, constructs + /// a fully valid [`NakamotoBlock`], processes it against the current chainstate. /// - /// * If `deploy_epochs` is empty. - /// * If any `call_epoch` precedes the earliest `deploy_epoch`. - pub fn run( - &mut self, - contract_name: &str, - contract_code: &str, - function_name: &str, - function_args: &[ClarityValue], - deploy_epochs: &[StacksEpochId], - call_epochs: &[StacksEpochId], - ) -> Vec { - assert!( - !deploy_epochs.is_empty(), - "At least one deploy epoch is required" + /// # Arguments + /// + /// * `block` - The test block to be processed and appended to the chain. + /// + /// # Returns + /// + /// A [`ExpectedResult`] with the outcome of the block processing. + fn append_nakamoto_block(&mut self, block: TestBlock) -> ExpectedResult { + debug!("--------- Running block {block:?} ---------"); + let (nakamoto_block, _block_size) = self.construct_nakamoto_block(block); + let mut sortdb = self.test_chainstate.sortdb.take().unwrap(); + let mut stacks_node = self.test_chainstate.stacks_node.take().unwrap(); + let chain_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let sig_hash = nakamoto_block.header.signer_signature_hash(); + debug!( + "--------- Processing block {sig_hash} ---------"; + "block" => ?nakamoto_block ); - let min_deploy_epoch = deploy_epochs.iter().min().unwrap(); - assert!( - call_epochs.iter().all(|e| e >= min_deploy_epoch), - "All call epochs must be >= the minimum deploy epoch" + let expected_marf = nakamoto_block.header.state_index_root; + let res = TestStacksNode::process_pushed_next_ready_block( + &mut stacks_node, + &mut sortdb, + &mut self.test_chainstate.miner, + &chain_tip.consensus_hash, + &mut self.test_chainstate.coord, + nakamoto_block.clone(), + ); + debug!( + "--------- Processed block: {sig_hash} ---------"; + "block" => ?nakamoto_block ); + let remapped_result = res.map(|receipt| receipt.unwrap()); + // Restore chainstate for the next block + self.test_chainstate.sortdb = Some(sortdb); + self.test_chainstate.stacks_node = Some(stacks_node); + ExpectedResult::create_from(remapped_result, expected_marf) + } - let all_epochs: BTreeSet = - deploy_epochs.iter().chain(call_epochs).cloned().collect(); + /// Appends a single block to the chain as a Pre-Nakamoto block and returns the result. + /// + /// This method takes a [`TestBlock`] containing a list of transactions, constructs + /// a fully valid [`StacksBlock`], processes it against the current chainstate. + /// + /// # Arguments + /// + /// * `block` - The test block to be processed and appended to the chain. + /// * `coinbase_nonce` - The coinbase nonce to use and increment + /// + /// # Returns + /// + /// A [`ExpectedResult`] with the outcome of the block processing. + fn append_pre_nakamoto_block(&mut self, block: TestBlock) -> ExpectedResult { + debug!("--------- Running Pre-Nakamoto block {block:?} ---------"); + let (ch, bh) = SortitionDB::get_canonical_stacks_chain_tip_hash( + self.test_chainstate.sortdb_ref().conn(), + ) + .unwrap(); + let tip_id = StacksBlockId::new(&ch, &bh); + let (burn_ops, stacks_block, microblocks) = self + .test_chainstate + .make_pre_nakamoto_tenure_with_txs(&block.transactions); + let (_, _, consensus_hash) = self.test_chainstate.next_burnchain_block(burn_ops); - let mut contract_names = vec![]; - let sender = &FAUCET_PRIV_KEY; - let contract_addr = to_addr(sender); - // Create epoch blocks by pairing each epoch with its corresponding transactions - let mut results = vec![]; - all_epochs.into_iter().for_each(|epoch| { - self.consensus_test.advance_to_epoch(epoch); - if deploy_epochs.contains(&epoch) { - let clarity_versions = clarity_versions_for_epoch(epoch); - let epoch_name = format!("Epoch{}", epoch.to_string().replace(".", "_")); - clarity_versions.iter().for_each(|version| { - let name = format!( - "{contract_name}-{epoch_name}-{}", - version.to_string().replace(" ", "") - ); - contract_names.push(name.clone()); - let result = self.append_tx_block(&TestTxSpec::ContractDeploy { - sender, - name: &name, - code: contract_code, - clarity_version: Some(*version), - }); - results.push(result); - }); - } - if call_epochs.contains(&epoch) { - contract_names.iter().for_each(|contract_name| { - let result = self.append_tx_block(&TestTxSpec::ContractCall { - sender, - contract_addr: &contract_addr, - contract_name, - function_name, - args: function_args, - }); - results.push(result); - }); + debug!( + "--------- Processing Pre-Nakamoto block ---------"; + "block" => ?stacks_block + ); + + let mut stacks_node = self.test_chainstate.stacks_node.take().unwrap(); + let mut sortdb = self.test_chainstate.sortdb.take().unwrap(); + let expected_marf = stacks_block.header.state_index_root; + let res = TestStacksNode::process_pre_nakamoto_next_ready_block( + &mut stacks_node, + &mut sortdb, + &mut self.test_chainstate.miner, + &ch, + &mut self.test_chainstate.coord, + &stacks_block, + µblocks, + ); + debug!( + "--------- Processed Pre-Nakamoto block ---------"; + "block" => ?stacks_block + ); + let remapped_result = res.map(|receipt| { + let mut receipt = receipt.unwrap(); + let mut sanitized_receipts = vec![]; + for tx_receipt in &receipt.tx_receipts { + // Remove any coinbase transactions from the output + if tx_receipt.is_coinbase_tx() { + continue; + } + sanitized_receipts.push(tx_receipt.clone()); } + receipt.tx_receipts = sanitized_receipts; + receipt }); - results + // Restore chainstate for the next block + self.test_chainstate.sortdb = Some(sortdb); + self.test_chainstate.stacks_node = Some(stacks_node); + ExpectedResult::create_from(remapped_result, expected_marf) } -} -/// Generates a consensus test for executing a contract function across multiple Stacks epochs. -/// -/// This macro automates both contract deployment and function invocation across different -/// epochs and Clarity versions. -/// It simplifies the setup of consensus-critical tests involving versioned smart contracts. -/// -/// # Behavior -/// -/// - **Deployment:** Deploys `contract_code` in each epoch specified in `deploy_epochs` -/// for every applicable [`ClarityVersion`]. -/// - **Execution:** Calls `function_name` in each epoch from `call_epochs` on all previously -/// deployed contract instances. -/// - **Structure:** Each deployment and function call is executed in its own block, ensuring -/// clear separation between transactions. -/// -/// # Arguments -/// -/// * `$name` — Name of the generated test function. -/// * `contract_name` — The name of the contract. -/// * `contract_code` — The Clarity source code for the contract. -/// * `function_name` — The public function to call. -/// * `function_args` — Function arguments, provided as a slice of [`ClarityValue`]. -/// * `deploy_epochs` — *(optional)* Epochs in which to deploy the contract. Defaults to all epochs ≥ 3.0. -/// * `call_epochs` — *(optional)* Epochs in which to call the function. Defaults to [`EPOCHS_TO_TEST`]. -/// -/// # Example -/// -/// ```rust,ignore -/// contract_call_consensus_test!( -/// my_test, -/// contract_name: "my-contract", -/// contract_code: "(define-public (get-message) (ok \"hello\"))", -/// function_name: "get-message", -/// function_args: &[], -/// ); -/// ``` -macro_rules! contract_call_consensus_test { - ( - $name:ident, - contract_name: $contract_name:expr, - contract_code: $contract_code:expr, - function_name: $function_name:expr, - function_args: $function_args:expr, - $(deploy_epochs: $deploy_epochs:expr,)? - $(call_epochs: $call_epochs:expr,)? - ) => { - #[test] - fn $name() { - let contract_name = $contract_name; - - // Handle deploy_epochs parameter (default to all epochs >= 3.0 if not provided) - let deploy_epochs = StacksEpochId::ALL_GTE_30; - $(let deploy_epochs = $deploy_epochs;)? - - // Handle call_epochs parameter (default to EPOCHS_TO_TEST if not provided) - let call_epochs = EPOCHS_TO_TEST; - $(let call_epochs = $call_epochs;)? - - let mut contract_test = ContractConsensusTest::new(function_name!()); - let result = contract_test.run( - contract_name, - $contract_code, - $function_name, - $function_args, - deploy_epochs, - call_epochs, - ); - - insta::assert_ron_snapshot!(result); - } - }; -} - -/// Generates a consensus test for contract deployment across multiple Stacks epochs. -/// -/// This macro automates deploying a contract across different Stacks epochs and -/// Clarity versions. It is primarily used for consensus-critical testing of contract -/// deployment behavior. -/// -/// # Behavior -/// -/// - **Deployment:** Deploys `contract_code` in each epoch specified by `deploy_epochs` -/// for all applicable [`ClarityVersion`]s. -/// - **Structure:** Each deployment is executed in its own block, ensuring clear -/// separation between transactions. -/// -/// # Arguments -/// -/// * `$name` — Name of the generated test function. -/// * `contract_name` — Name of the contract being tested. -/// * `contract_code` — The Clarity source code of the contract. -/// * `deploy_epochs` — *(optional)* Epochs in which to deploy the contract. Defaults to [`EPOCHS_TO_TEST`]. -/// -/// # Example -/// -/// ```rust,ignore -/// contract_deploy_consensus_test!( -/// deploy_test, -/// contract_name: "my-contract", -/// contract_code: "(define-public (init) (ok true))", -/// ); -/// ``` -macro_rules! contract_deploy_consensus_test { - // Handle the case where deploy_epochs is not provided - ( - $name:ident, - contract_name: $contract_name:expr, - contract_code: $contract_code:expr, - ) => { - contract_deploy_consensus_test!( - $name, - contract_name: $contract_name, - contract_code: $contract_code, - deploy_epochs: EPOCHS_TO_TEST, - ); - }; - ( - $name:ident, - contract_name: $contract_name:expr, - contract_code: $contract_code:expr, - deploy_epochs: $deploy_epochs:expr, - ) => { - contract_call_consensus_test!( - $name, - contract_name: $contract_name, - contract_code: $contract_code, - function_name: "", // No function calls, just deploys - function_args: &[], // No function calls, just deploys - deploy_epochs: $deploy_epochs, - call_epochs: &[], // No function calls, just deploys - ); - }; -} - -/// The type of transaction to create. -pub enum TestTxSpec<'a> { - Transfer { - from: &'a StacksPrivateKey, - to: &'a PrincipalData, - amount: u64, - }, - ContractDeploy { - sender: &'a StacksPrivateKey, - name: &'a str, - code: &'a str, - clarity_version: Option, - }, - ContractCall { - sender: &'a StacksPrivateKey, - contract_addr: &'a StacksAddress, - contract_name: &'a str, - function_name: &'a str, - args: &'a [ClarityValue], - }, -} - -/// A helper to create transactions with incrementing nonces for each account. -pub struct TestTxFactory { - /// Map of address to next nonce - nonce_counter: HashMap, - /// The default chain ID to use for transactions - default_chain_id: u32, -} - -impl TestTxFactory { - /// Creates a new [`TransactionFactory`] with the specified default chain ID. - pub fn new(default_chain_id: u32) -> Self { - Self { - nonce_counter: HashMap::new(), - default_chain_id, - } - } - - /// Manually increments the nonce for the sender of the specified transaction. + /// Appends a single block to the chain and returns the result. /// - /// This method should be called *after* a transaction has been successfully - /// processed to ensure the factory uses the correct next nonce for subsequent - /// transactions from the same sender. + /// This method takes a [`TestBlock`] containing a list of transactions, whether the epoch [`is_naka_epoch`] , + /// constructing a fully valid [`StacksBlock`] or [`NakamotoBlock`] accordingly, processes it against the current chainstate. /// /// # Arguments /// - /// * `tx_spec` - The original specification of the transaction whose sender's - /// nonce should be incremented. - /// - /// # Panics - /// - /// Panics if the sender's address is not found in the nonce counter map. - pub fn increase_nonce_for_tx(&mut self, tx_spec: &TestTxSpec) { - let sender_privk = match tx_spec { - TestTxSpec::Transfer { from, .. } => from, - TestTxSpec::ContractDeploy { sender, .. } => sender, - TestTxSpec::ContractCall { sender, .. } => sender, - }; - let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)); - let nonce = self - .nonce_counter - .get_mut(&address) - .unwrap_or_else(|| panic!("Nonce not found for address {address}")); - *nonce += 1; - } - - /// Generates a new transaction of the specified type. + /// * `block` - The test block to be processed and appended to the chain. + /// * `coinbase_nonce` - The coinbase nonce to use and increment /// - /// Arguments: - /// - `tx_type`: The type of transaction to create. + /// # Returns /// - /// Returns: - /// A [`StacksTransaction`] representing the created transaction. - pub fn generate_tx(&mut self, tx_spec: &TestTxSpec) -> StacksTransaction { - match tx_spec { - TestTxSpec::Transfer { from, to, amount } => self.transfer(from, to, *amount), - TestTxSpec::ContractDeploy { - sender, - name, - code, - clarity_version, - } => self.contract_deploy(sender, name, code, *clarity_version), - TestTxSpec::ContractCall { - sender, - contract_addr, - contract_name, - function_name, - args, - } => self.contract_call(sender, contract_addr, contract_name, function_name, args), + /// A [`ExpectedResult`] with the outcome of the block processing. + pub fn append_block(&mut self, block: TestBlock, is_naka_epoch: bool) -> ExpectedResult { + if is_naka_epoch { + self.append_nakamoto_block(block) + } else { + self.append_pre_nakamoto_block(block) } } - /// Create a STX transfer transaction. - /// - /// Arguments: - /// - `from`: The sender's private key. - /// - `to`: The recipient's principal data. - /// - `amount`: The amount of STX to transfer. - /// - /// Returns: - /// A [`StacksTransaction`] representing the transfer. - /// - /// Note: The transaction fee is set to 180 micro-STX. - pub fn transfer( - &mut self, - from: &StacksPrivateKey, - to: &PrincipalData, - amount: u64, - ) -> StacksTransaction { - let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(from)); - let nonce = self.nonce_counter.entry(address).or_insert(0); - make_stacks_transfer_tx(from, *nonce, 180, self.default_chain_id, to, amount) - } + /// Constructs a Nakamoto block with the given [`TestBlock`] configuration. + fn construct_nakamoto_block(&mut self, test_block: TestBlock) -> (NakamotoBlock, usize) { + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.test_chainstate + .stacks_node + .as_ref() + .unwrap() + .chainstate + .db(), + self.test_chainstate.sortdb.as_ref().unwrap(), + ) + .unwrap() + .unwrap(); + let cycle = self.test_chainstate.get_reward_cycle(); + let burn_spent = SortitionDB::get_block_snapshot_consensus( + self.test_chainstate.sortdb_ref().conn(), + &chain_tip.consensus_hash, + ) + .unwrap() + .map(|sn| sn.total_burn) + .unwrap(); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: chain_tip.stacks_block_height + 1, + burn_spent, + consensus_hash: chain_tip.consensus_hash.clone(), + parent_block_id: chain_tip.index_block_hash(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), + state_index_root: TrieHash::from_empty_data(), + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: test_block.transactions, + }; - /// Create a contract deployment transaction. - /// - /// Arguments: - /// `sender`: The sender's private key. - /// `name`: The name of the contract. - /// `code`: The contract code as a string. - /// - /// Returns: - /// A [`StacksTransaction`] representing the contract deployment. - /// - /// Note: The transaction fee is set based on the contract code length. - pub fn contract_deploy( - &mut self, - sender: &StacksPrivateKey, - name: &str, - code: &str, - clarity_version: Option, - ) -> StacksTransaction { - let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender)); - let nonce = self.nonce_counter.entry(address).or_insert(0); - let tx_bytes = make_contract_publish_versioned( - sender, - *nonce, - (code.len() * 100) as u64, - self.default_chain_id, - name, - code, - clarity_version, - ); - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap() + let tx_merkle_root = { + let txid_vecs: Vec<_> = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + MerkleTree::::new(&txid_vecs).root() + }; + block.header.tx_merkle_root = tx_merkle_root; + + // Set the MARF root hash or use an all-zero hash in case of failure. + // NOTE: It is expected to fail when trying computing the marf for invalid block/transactions. + let marf_result = self.compute_block_marf_root_hash(block.header.timestamp, &block.txs); + block.header.state_index_root = match marf_result { + Ok(marf) => marf, + Err(_) => TrieHash::from_bytes(&[0; 32]).unwrap(), + }; + + self.test_chainstate.miner.sign_nakamoto_block(&mut block); + let mut signers = self + .test_chainstate + .config + .test_signers + .clone() + .unwrap_or_default(); + signers.sign_nakamoto_block(&mut block, cycle); + let block_len = block.serialize_to_vec().len(); + (block, block_len) } - /// Create a contract call transaction. - /// - /// Arguments: - /// `sender`: The sender's private key. - /// `contract_addr`: The address of the contract. - /// `contract_name`: The name of the contract. - /// `function_name`: The name of the function to call. - /// `args`: The arguments to pass to the function. - /// - /// Returns: - /// A [`StacksTransaction`] representing the contract call. + /// Computes the MARF root hash for a block. /// - /// Note: The transaction fee is set to 200 micro-STX. - pub fn contract_call( + /// This function is intended for use in success test cases only, where all + /// transactions are valid. In other scenarios, the computation may fail. + /// + /// The implementation is deliberately minimal: it does not cover every + /// possible situation (such as new tenure handling), but it should be + /// sufficient for the scope of our test cases. + fn compute_block_marf_root_hash( &mut self, - sender: &StacksPrivateKey, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - args: &[ClarityValue], - ) -> StacksTransaction { - let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender)); - let nonce = self.nonce_counter.entry(address).or_insert(0); - let tx_bytes = make_contract_call( - sender, - *nonce, - 200, - self.default_chain_id, - contract_addr, - contract_name, - function_name, - args, + block_time: u64, + block_txs: &[StacksTransaction], + ) -> Result { + let node = self.test_chainstate.stacks_node.as_mut().unwrap(); + let sortdb = self.test_chainstate.sortdb.as_ref().unwrap(); + let burndb_conn = sortdb.index_handle_at_tip(); + let chainstate = &mut node.chainstate; + + let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap() + .unwrap(); + + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); + let burndb_conn = sortdb.index_handle_at_tip(); + + let mut clarity_tx = StacksChainState::chainstate_block_begin( + &chainstate_tx, + clarity_instance, + &burndb_conn, + &chain_tip.consensus_hash, + &chain_tip.anchored_header.block_hash(), + &MINER_BLOCK_CONSENSUS_HASH, + &MINER_BLOCK_HEADER_HASH, ); - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap() + let result = Self::inner_compute_block_marf_root_hash( + &mut clarity_tx, + block_time, + block_txs, + chain_tip.burn_header_height, + ); + clarity_tx.rollback_block(); + result + } + + /// This is where the real MARF computation happens. + /// It is extrapolated into an _inner_ method to simplify rollback handling, + /// ensuring that rollback can be applied consistently on both success and failure + /// in the _outer_ method. + fn inner_compute_block_marf_root_hash( + clarity_tx: &mut ClarityTx, + block_time: u64, + block_txs: &[StacksTransaction], + burn_header_height: u32, + ) -> Result { + clarity_tx + .connection() + .as_free_transaction(|clarity_tx_conn| { + clarity_tx_conn.with_clarity_db(|db| { + db.setup_block_metadata(Some(block_time))?; + Ok(()) + }) + }) + .map_err(|e| e.to_string())?; + + StacksChainState::process_block_transactions(clarity_tx, block_txs, 0) + .map_err(|e| e.to_string())?; + + NakamotoChainState::finish_block(clarity_tx, None, false, burn_header_height) + .map_err(|e| e.to_string())?; + + Ok(clarity_tx.seal()) } } -fn epoch_3_0_onwards(first_burnchain_height: u64) -> EpochList { - info!("StacksEpoch unit_test first_burn_height = {first_burnchain_height}"); - - EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch23, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_3, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch24, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_4, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch25, - start_height: 0, - end_height: first_burnchain_height, - block_limit: BLOCK_LIMIT_MAINNET_21, - network_epoch: PEER_VERSION_EPOCH_2_5, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch30, - start_height: first_burnchain_height, - end_height: first_burnchain_height + 1, - block_limit: BLOCK_LIMIT_MAINNET_21, - network_epoch: PEER_VERSION_EPOCH_3_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch31, - start_height: first_burnchain_height + 1, - end_height: first_burnchain_height + 2, - block_limit: BLOCK_LIMIT_MAINNET_21, - network_epoch: PEER_VERSION_EPOCH_3_1, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch32, - start_height: first_burnchain_height + 2, - end_height: first_burnchain_height + 3, - block_limit: BLOCK_LIMIT_MAINNET_21, - network_epoch: PEER_VERSION_EPOCH_3_2, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch33, - start_height: first_burnchain_height + 3, - end_height: STACKS_EPOCH_MAX, - block_limit: BLOCK_LIMIT_MAINNET_21, - network_epoch: PEER_VERSION_EPOCH_3_3, - }, - ]) +/// A complete consensus test that drives a [`ConsensusChain`] through a series of epochs. +/// +/// It stores the blocks to execute per epoch and runs them in chronological order, +/// producing a vector of [`ExpectedResult`] suitable for snapshot testing. +pub struct ConsensusTest<'a> { + pub chain: ConsensusChain<'a>, + epoch_blocks: HashMap>, } -/// Custom serializer for `Option` to improve snapshot readability. -/// This avoids large diffs in snapshots due to code body changes and focuses on key fields. -fn serialize_opt_tx_payload( - value: &Option, - serializer: S, -) -> Result -where - S: Serializer, -{ - let changed = match value { - None => "BitcoinTx".to_string(), - Some(TransactionPayload::TokenTransfer(sender, amount, memo)) => { - format!("TokenTransfer(from: {sender}, amount: {amount}, memo: {memo})") - } - Some(TransactionPayload::SmartContract( - TransactionSmartContract { name, code_body }, - clarity_version, - )) => { - format!("SmartContract(name: {name}, code_body: [..], clarity_version: {clarity_version:?})") - } - Some(TransactionPayload::ContractCall(TransactionContractCall { - address, - contract_name, - function_name, - function_args, - })) => { - format!("ContractCall(address: {address}, contract_name: {contract_name}, function_name: {function_name}, function_args: [{function_args:?}])") +impl ConsensusTest<'_> { + /// Constructs a [`ConsensusTest`] from a map of **epoch → blocks**. + /// + /// The map is converted into `num_blocks_per_epoch` for chain initialisation. + pub fn new( + test_name: &str, + initial_balances: Vec<(PrincipalData, u64)>, + epoch_blocks: HashMap>, + ) -> Self { + let mut num_blocks_per_epoch = HashMap::new(); + for (epoch, blocks) in &epoch_blocks { + num_blocks_per_epoch.insert(*epoch, blocks.len() as u64); } - Some(payload) => { - format!("{payload:?}") + Self { + chain: ConsensusChain::new(test_name, initial_balances, num_blocks_per_epoch), + epoch_blocks, } - }; - serializer.serialize_str(&changed) -} + } -/// Serialize an optional string field appending a non-consensus breaking info message. -fn serialize_opt_string_ncb(value: &Option, serializer: S) -> Result -where - S: Serializer, -{ - let original = match value.as_deref() { - Some(str) => format!("Some({str})"), - None => "None".to_string(), - }; - let changed = format!("{original} [NON-CONSENSUS BREAKING]"); - serializer.serialize_str(&changed) -} + /// Executes a full test plan by processing blocks across multiple epochs. + /// + /// This function serves as the primary test runner. It iterates through the + /// provided epochs in chronological order, automatically advancing the + /// chainstate to the start of each epoch. It then processes all [`TestBlock`]'s + /// associated with that epoch and collects their results. + /// + /// # Returns + /// + /// A Vec<['ExpectedResult`]> with the outcome of each block for snapshot testing. + pub fn run(mut self) -> Vec { + let mut sorted_epochs: Vec<_> = self.epoch_blocks.clone().into_iter().collect(); + sorted_epochs.sort_by_key(|(epoch_id, _)| *epoch_id); -/// Represents the expected output of a transaction in a test. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ExpectedTransactionOutput { - /// The transaction that was executed. - /// `None` for bitcoin transactions. - #[serde(serialize_with = "serialize_opt_tx_payload")] - pub tx: Option, - /// The possible Clarity VM error message associated to the transaction (non-consensus breaking) - #[serde(serialize_with = "serialize_opt_string_ncb")] - pub vm_error: Option, - /// The expected return value of the transaction. - pub return_type: ClarityValue, - /// The expected execution cost of the transaction. - pub cost: ExecutionCost, -} + let mut results = vec![]; -/// Represents the expected outputs for a block's execution. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ExpectedBlockOutput { - /// The expected block marf - pub marf_hash: TrieHash, - /// The epoch in which the test block was expected to be evaluated - pub evaluated_epoch: StacksEpochId, - /// The expected outputs for each transaction, in input order. - pub transactions: Vec, - /// The total execution cost of the block. - pub total_block_cost: ExecutionCost, + for (epoch, blocks) in sorted_epochs { + debug!( + "--------- Processing epoch {epoch:?} with {} blocks ---------", + blocks.len() + ); + // Use the miner key to prevent messing with FAUCET nonces. + let miner_key = self.chain.test_chainstate.miner.nakamoto_miner_key(); + self.chain + .test_chainstate + .advance_into_epoch(&miner_key, epoch); + + for block in blocks { + results.push(self.chain.append_block(block, epoch.uses_nakamoto_blocks())); + } + } + results + } } -/// Represents the expected result of a consensus test. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub enum ExpectedResult { - /// The test should succeed with the specified outputs. - Success(ExpectedBlockOutput), - /// The test should fail with an error matching the specified string - /// Cannot match on the exact Error directly as they do not implement - /// Serialize/Deserialize or PartialEq - Failure(String), +/// A high-level test harness for running consensus-critical smart contract tests. +/// +/// This struct enables end-to-end testing of Clarity smart contracts under varying epoch conditions, +/// including different Clarity language versions and block rule sets. It automates: +/// +/// - Contract deployment in specified epochs (with epoch-appropriate Clarity versions) +/// - Function execution in subsequent or same epochs +/// - Block-by-block execution with precise control over transaction ordering and nonces +/// - Snapshot testing of execution outcomes via [`ExpectedResult`] +/// +/// It integrates: +/// - [`ConsensusChain`] for chain simulation and block production +/// - [`TestTxFactory`] for deterministic transaction generation +/// +/// NOTE: The **majority of logic and state computation occurs during construction to enable a deterministic TestChainstate** (`new()`): +/// - All contract names are generated and versioned +/// - Block counts per epoch are precomputed +/// - Epoch order is finalized +/// - Transaction sequencing is fully planned +struct ContractConsensusTest<'a> { + /// Factory for generating signed, nonce-managed transactions. + tx_factory: TestTxFactory, + /// Underlying chainstate used for block execution and consensus checks. + chain: ConsensusChain<'a>, + /// Address of the contract deployer (the test faucet). + contract_addr: StacksAddress, + /// Mapping of epoch → list of `(contract_name, ClarityVersion)` deployed in that epoch. + /// Multiple versions may exist per epoch (e.g., Clarity 1, 2, 3 in Epoch 3.0). + contract_deploys_per_epoch: HashMap>, + /// Mapping of epoch → list of `contract_names` that should be called in that epoch. + contract_calls_per_epoch: HashMap>, + /// Source code of the Clarity contract being deployed and called. + contract_code: String, + /// Name of the public function to invoke during the call phase. + function_name: String, + /// Arguments to pass to `function_name` on every call. + function_args: Vec, + /// Sorted, deduplicated set of all epochs involved. + /// Used to iterate through test phases in chronological order. + all_epochs: BTreeSet, } -impl ExpectedResult { - fn create_from( - result: Result, - marf_hash: TrieHash, +impl ContractConsensusTest<'_> { + /// Creates a new [`ContractConsensusTest`] instance. + /// + /// Initializes the test environment to: + /// - Deploy `contract_code` under `contract_name` in each `deploy_epochs` + /// - Call `function_name` with `function_args` in each `call_epochs` + /// - Track all contract instances per epoch and Clarity version + /// - Precompute block counts per epoch for stable chain simulation + /// + /// # Arguments + /// + /// * `test_name` - Unique identifier for the test run (used in logging and snapshots) + /// * `initial_balances` - Initial STX balances for principals (e.g., faucet, users) + /// * `deploy_epochs` - List of epochs where contract deployment should occur + /// * `call_epochs` - List of epochs where function calls should be executed + /// * `contract_name` - Base name for deployed contracts (versioned suffixes added automatically) + /// * `contract_code` - Clarity source code of the contract + /// * `function_name` - Contract function to test + /// * `function_args` - Arguments passed to `function_name` on every call + /// + /// # Panics + /// + /// - If `deploy_epochs` is empty. + /// - If any `call_epoch` is less than the minimum `deploy_epoch`. + #[allow(clippy::too_many_arguments)] + pub fn new( + test_name: &str, + initial_balances: Vec<(PrincipalData, u64)>, + deploy_epochs: &[StacksEpochId], + call_epochs: &[StacksEpochId], + contract_name: &str, + contract_code: &str, + function_name: &str, + function_args: &[ClarityValue], ) -> Self { - match result { - Ok(epoch_receipt) => { - let transactions: Vec = epoch_receipt - .tx_receipts - .into_iter() - .map(|r| { - let tx = match r.transaction { - TransactionOrigin::Stacks(tx) => Some(tx.payload), - TransactionOrigin::Burn(..) => None, - }; - ExpectedTransactionOutput { - tx, - return_type: r.result, - cost: r.execution_cost, - vm_error: r.vm_error, - } - }) - .collect(); - ExpectedResult::Success(ExpectedBlockOutput { - marf_hash, - evaluated_epoch: epoch_receipt.evaluated_epoch, - transactions, - total_block_cost: epoch_receipt.anchored_block_cost, - }) + assert!( + !deploy_epochs.is_empty(), + "At least one deploy epoch is required" + ); + let min_deploy_epoch = deploy_epochs.iter().min().unwrap(); + assert!( + call_epochs.iter().all(|e| e >= min_deploy_epoch), + "All call epochs must be >= the minimum deploy epoch" + ); + + // Build epoch_blocks map based on deploy and call epochs + let mut num_blocks_per_epoch: HashMap = HashMap::new(); + let mut contract_deploys_per_epoch: HashMap> = + HashMap::new(); + let mut contract_calls_per_epoch: HashMap> = HashMap::new(); + let mut contract_names = vec![]; + + // Combine and sort unique epochs + let all_epochs: BTreeSet = + deploy_epochs.iter().chain(call_epochs).cloned().collect(); + + // Precompute contract names and block counts + for epoch in &all_epochs { + let mut num_blocks = 0; + + if deploy_epochs.contains(epoch) { + let clarity_versions = clarity_versions_for_epoch(*epoch); + let epoch_name = format!("Epoch{}", epoch.to_string().replace('.', "_")); + + // Each deployment is a seperate TestBlock + for &version in clarity_versions { + let version_tag = version.to_string().replace(' ', ""); + let name = format!("{contract_name}-{epoch_name}-{version_tag}"); + contract_deploys_per_epoch + .entry(*epoch) + .or_default() + .push((name.clone(), version)); + contract_names.push(name.clone()); + num_blocks += 1; + } } - Err(e) => ExpectedResult::Failure(e.to_string()), + + if call_epochs.contains(epoch) { + // Each call is a separate TestBlock + for name in &contract_names { + // Each call is a separate TestBlock + contract_calls_per_epoch + .entry(*epoch) + .or_default() + .push(name.clone()); + num_blocks += 1; + } + } + if num_blocks > 0 { + num_blocks_per_epoch.insert(*epoch, num_blocks); + } + } + + Self { + tx_factory: TestTxFactory::new(CHAIN_ID_TESTNET), + chain: ConsensusChain::new(test_name, initial_balances, num_blocks_per_epoch), + contract_addr: to_addr(&FAUCET_PRIV_KEY), + contract_deploys_per_epoch, + contract_calls_per_epoch, + contract_code: contract_code.to_string(), + function_name: function_name.to_string(), + function_args: function_args.to_vec(), + all_epochs, } } -} -/// Represents a block to be appended in a test and its expected result. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct TestBlock { - /// Transactions to include in the block - pub transactions: Vec, -} + /// Generates a transaction, appends it to a new test block, and executes the block. + /// + /// If the transaction succeeds, this function automatically increments the sender's + /// nonce for subsequent transactions. + /// + /// # Arguments + /// + /// - `tx_spec`: The transaction specification to generate and execute. + /// - `is_naka_block`: Whether this block is mined under Nakamoto consensus rules. + /// + /// # Returns + /// + /// The [`ExpectedResult`] of block execution (success/failure with VM output) + fn append_tx_block(&mut self, tx_spec: &TestTxSpec, is_naka_block: bool) -> ExpectedResult { + let tx = self.tx_factory.generate_tx(tx_spec); + let block = TestBlock { + transactions: vec![tx], + }; -/// Represents a consensus test with chainstate. -pub struct ConsensusTest<'a> { - pub chain: TestChainstate<'a>, -} + let result = self.chain.append_block(block, is_naka_block); -impl ConsensusTest<'_> { - /// Creates a new `ConsensusTest` with the given test name and initial balances. - pub fn new(test_name: &str, initial_balances: Vec<(PrincipalData, u64)>) -> Self { - // Set up chainstate to start at Epoch 3.0 - let mut boot_plan = NakamotoBootPlan::new(test_name) - // These are the minimum values found for the fastest test execution. - // - // If changing these values, ensure the following conditions are met: - // 1. Min 6 reward blocks (test framework limitation). - // 2. Epoch 3.0 starts in the reward phase. - // 3. Tests bypass mainnet's prepare_length >= 3 (allowing 1). - // - Current boot sequence: - // - Cycle 3: Signers at height 27 register for 12 reward cycles - // - Cycle 4: Epoch 3.0 starts at height 30 - // Tests generate 1 bitcoin block per epoch transition after 3.0 - // staying within the registration window - .with_pox_constants(7, 1) - .with_initial_balances(initial_balances) - .with_private_key(FAUCET_PRIV_KEY.clone()); - let epochs = epoch_3_0_onwards( - (boot_plan.pox_constants.pox_4_activation_height - + boot_plan.pox_constants.reward_cycle_length - + 1) as u64, - ); - boot_plan = boot_plan.with_epochs(epochs); - let chain = boot_plan.boot_nakamoto_chainstate(None); + if let ExpectedResult::Success(_) = result { + self.tx_factory.increase_nonce_for_tx(tx_spec); + } - Self { chain } + result } - /// Advances the chainstate to the specified epoch. Creating a tenure change block per burn block height - pub fn advance_to_epoch(&mut self, target_epoch: StacksEpochId) { - let burn_block_height = self.chain.get_burn_block_height(); - let mut current_epoch = - SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) - .unwrap() - .unwrap() - .epoch_id; - assert!(current_epoch <= target_epoch, "Chainstate is already at a higher epoch than the target. Current epoch: {current_epoch}. Target epoch: {target_epoch}"); - while current_epoch < target_epoch { - let (burn_ops, mut tenure_change, miner_key) = self - .chain - .begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, header_hash, consensus_hash) = self.chain.next_burnchain_block(burn_ops); - let vrf_proof = self.chain.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = self.chain.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = self.chain.miner.make_nakamoto_coinbase(None, vrf_proof); - - let blocks_and_sizes = self - .chain - .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)) - .unwrap(); - assert_eq!( - blocks_and_sizes.len(), - 1, - "Mined more than one Nakamoto block" - ); - let burn_block_height = self.chain.get_burn_block_height(); - current_epoch = - SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) - .unwrap() - .unwrap() - .epoch_id; - } + /// Deploys all contract versions scheduled for the given epoch. + /// + /// For each Clarity version supported in the epoch: + /// - Generates a unique contract name (e.g., `my-contract-Epoch30-Clarity3`) + /// - Deploys in a **separate block** + /// - Uses `None` for Clarity version in pre-2.1 epochs (behaviour defaults to Clarity 1) + /// + /// # Returns + /// A vector of [`ExpectedResult`] values, one per deployment block. + fn deploy_contracts(&mut self, epoch: StacksEpochId) -> Vec { + let Some(contract_names) = self.contract_deploys_per_epoch.get(&epoch) else { + warn!("No contract deployments found for {epoch}."); + return vec![]; + }; + + let is_naka_block = epoch.uses_nakamoto_blocks(); + contract_names + .clone() + .iter() + .map(|(name, version)| { + let clarity_version = if epoch < StacksEpochId::Epoch21 { + // Old epochs have no concept of clarity version. It defaults to + // clarity version 1 behaviour. + None + } else { + Some(*version) + }; + self.append_tx_block( + &TestTxSpec::ContractDeploy { + sender: &FAUCET_PRIV_KEY, + name, + code: &self.contract_code.clone(), + clarity_version, + }, + is_naka_block, + ) + }) + .collect() } - /// Appends a single block to the chain and returns the result. + /// Executes the test function on **all** contracts deployed in the given epoch. /// - /// This method takes a [`TestBlock`] containing a list of transactions, constructs - /// a fully valid [`NakamotoBlock`], processes it against the current chainstate. + /// Each call occurs in a **separate block** to isolate side effects and enable + /// fine-grained snapshot assertions. All prior deployments (even from earlier epochs) + /// are callable if they exist in the chain state. /// /// # Arguments /// - /// * `block` - The test block to be processed and appended to the chain. + /// - `epoch`: The epoch in which to perform contract calls. /// /// # Returns /// - /// A [`ExpectedResult`] with the outcome of the block processing. - pub fn append_block(&mut self, block: TestBlock) -> ExpectedResult { - debug!("--------- Running block {block:?} ---------"); - let (nakamoto_block, block_size) = self.construct_nakamoto_block(block); - let mut sortdb = self.chain.sortdb.take().unwrap(); - let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let chain_tip = - NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - let pox_constants = PoxConstants::test_default(); - let sig_hash = nakamoto_block.header.signer_signature_hash(); - debug!( - "--------- Processing block {sig_hash} ---------"; - "block" => ?nakamoto_block - ); - let expected_marf = nakamoto_block.header.state_index_root; - let res = TestStacksNode::process_pushed_next_ready_block( - &mut stacks_node, - &mut sortdb, - &mut self.chain.miner, - &chain_tip.consensus_hash, - &mut self.chain.coord, - nakamoto_block.clone(), - ); - debug!( - "--------- Processed block: {sig_hash} ---------"; - "block" => ?nakamoto_block - ); - let remapped_result = res.map(|receipt| receipt.unwrap()); - // Restore chainstate for the next block - self.chain.sortdb = Some(sortdb); - self.chain.stacks_node = Some(stacks_node); - ExpectedResult::create_from(remapped_result, expected_marf) + /// A Vec<['ExpectedResult`]> with one entry per function call + fn call_contracts(&mut self, epoch: StacksEpochId) -> Vec { + let Some(contract_names) = self.contract_calls_per_epoch.get(&epoch) else { + warn!("No contract calls found for {epoch}."); + return vec![]; + }; + + let is_naka_block = epoch.uses_nakamoto_blocks(); + contract_names + .clone() + .iter() + .map(|contract_name| { + self.append_tx_block( + &TestTxSpec::ContractCall { + sender: &FAUCET_PRIV_KEY, + contract_addr: &self.contract_addr.clone(), + contract_name, + function_name: &self.function_name.clone(), + args: &self.function_args.clone(), + }, + is_naka_block, + ) + }) + .collect() } - /// Executes a full test plan by processing blocks across multiple epochs. + /// Executes the full consensus test: deploy in [`Self::contract_deploys_per_epoch`], call in [`Self::contract_calls_per_epoch`]. /// - /// This function serves as the primary test runner. It iterates through the - /// provided epochs in chronological order, automatically advancing the - /// chainstate to the start of each epoch. It then processes all [`TestBlock`]'s - /// associated with that epoch and collects their results. + /// Processes epochs in **sorted order** using [`Self::all_epochs`]. For each epoch: + /// - Advances the chain into the target epoch + /// - Deploys contracts (if scheduled) + /// - Executes function calls (if scheduled) /// - /// # Arguments + /// # Execution Order Example /// - /// * `epoch_blocks` - A map where keys are [`StacksEpochId`]s and values are the - /// sequence of blocks to be executed during that epoch. + /// Given at test instantiation: + /// ```rust,ignore + /// deploy_epochs = [Epoch20, Epoch30] + /// call_epochs = [Epoch30, Epoch31] + /// ``` /// - /// # Returns + /// The sequence is: + /// 1. Enter Epoch 2.0 → Deploy `contract-v1` + /// 2. Enter Epoch 3.0 → Deploy `contract-v1`, `contract-v2`, `contract-v3` + /// 3. Enter Epoch 3.0 → Call function on all 4 deployed contracts + /// 4. Enter Epoch 3.1 → Call function on all 4 deployed contracts /// - /// A `Vec` with the outcome of each block for snapshot testing. - pub fn run( - mut self, - epoch_blocks: HashMap>, - ) -> Vec { - // Validate blocks - for (epoch_id, blocks) in epoch_blocks.iter() { - assert!( - !matches!( - *epoch_id, - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - ), - "Pre-Nakamoto Tenures are not Supported" - ); - assert!( - !blocks.is_empty(), - "Each epoch must have at least one block" - ); + /// # Returns + /// + /// A Vec<['ExpectedResult`]> with the outcome of each block for snapshot testing. + pub fn run(mut self) -> Vec { + let mut results = Vec::new(); + + // Process epochs in order + for epoch in self.all_epochs.clone() { + // Use the miner as the sender to prevent messing with the block transaction nonces of the deployer/callers + let private_key = self.chain.test_chainstate.miner.nakamoto_miner_key(); + + // Advance the chain into the target epoch + self.chain + .test_chainstate + .advance_into_epoch(&private_key, epoch); + + results.extend(self.deploy_contracts(epoch)); + results.extend(self.call_contracts(epoch)); } - let mut sorted_epochs: Vec<_> = epoch_blocks.into_iter().collect(); - sorted_epochs.sort_by_key(|(epoch_id, _)| *epoch_id); + results + } +} - let mut results = vec![]; +/// The type of transaction to create. +pub enum TestTxSpec<'a> { + Transfer { + from: &'a StacksPrivateKey, + to: &'a PrincipalData, + amount: u64, + }, + ContractDeploy { + sender: &'a StacksPrivateKey, + name: &'a str, + code: &'a str, + clarity_version: Option, + }, + ContractCall { + sender: &'a StacksPrivateKey, + contract_addr: &'a StacksAddress, + contract_name: &'a str, + function_name: &'a str, + args: &'a [ClarityValue], + }, +} - for (epoch, blocks) in sorted_epochs { - debug!( - "--------- Processing epoch {epoch:?} with {} blocks ---------", - blocks.len() - ); - self.advance_to_epoch(epoch); +/// A helper to create transactions with incrementing nonces for each account. +pub struct TestTxFactory { + /// Map of address to next nonce + nonce_counter: HashMap, + /// The default chain ID to use for transactions + default_chain_id: u32, +} - for block in blocks { - results.push(self.append_block(block)); - } +impl TestTxFactory { + /// Creates a new [`TransactionFactory`] with the specified default chain ID. + pub fn new(default_chain_id: u32) -> Self { + Self { + nonce_counter: HashMap::new(), + default_chain_id, } - results } - /// Constructs a Nakamoto block with the given [`TestBlock`] configuration. - fn construct_nakamoto_block(&mut self, test_block: TestBlock) -> (NakamotoBlock, usize) { - let chain_tip = NakamotoChainState::get_canonical_block_header( - self.chain.stacks_node.as_ref().unwrap().chainstate.db(), - self.chain.sortdb.as_ref().unwrap(), - ) - .unwrap() - .unwrap(); - let cycle = self.chain.get_reward_cycle(); - let burn_spent = SortitionDB::get_block_snapshot_consensus( - self.chain.sortdb_ref().conn(), - &chain_tip.consensus_hash, - ) - .unwrap() - .map(|sn| sn.total_burn) - .unwrap(); - let mut block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 1, - chain_length: chain_tip.stacks_block_height + 1, - burn_spent, - consensus_hash: chain_tip.consensus_hash.clone(), - parent_block_id: chain_tip.index_block_hash(), - tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), - state_index_root: TrieHash::from_empty_data(), - timestamp: 1, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::ones(1).unwrap(), - }, - txs: test_block.transactions, - }; - - let tx_merkle_root = { - let txid_vecs: Vec<_> = block - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - MerkleTree::::new(&txid_vecs).root() + /// Manually increments the nonce for the sender of the specified transaction. + /// + /// This method should be called *after* a transaction has been successfully + /// processed to ensure the factory uses the correct next nonce for subsequent + /// transactions from the same sender. + /// + /// # Arguments + /// + /// * `tx_spec` - The original specification of the transaction whose sender's + /// nonce should be incremented. + /// + /// # Panics + /// + /// Panics if the sender's address is not found in the nonce counter map. + pub fn increase_nonce_for_tx(&mut self, tx_spec: &TestTxSpec) { + let sender_privk = match tx_spec { + TestTxSpec::Transfer { from, .. } => from, + TestTxSpec::ContractDeploy { sender, .. } => sender, + TestTxSpec::ContractCall { sender, .. } => sender, }; - block.header.tx_merkle_root = tx_merkle_root; + let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)); + let nonce = self + .nonce_counter + .get_mut(&address) + .unwrap_or_else(|| panic!("Nonce not found for address {address}")); + *nonce += 1; + } - // Set the MARF root hash or use an all-zero hash in case of failure. - // NOTE: It is expected to fail when trying computing the marf for invalid block/transactions. - let marf_result = self.compute_block_marf_root_hash(block.header.timestamp, &block.txs); - block.header.state_index_root = match marf_result { - Ok(marf) => marf, - Err(_) => TrieHash::from_bytes(&[0; 32]).unwrap(), - }; + /// Generates a new transaction of the specified type. + /// + /// Arguments: + /// - `tx_type`: The type of transaction to create. + /// + /// Returns: + /// A [`StacksTransaction`] representing the created transaction. + pub fn generate_tx(&mut self, tx_spec: &TestTxSpec) -> StacksTransaction { + match tx_spec { + TestTxSpec::Transfer { from, to, amount } => self.transfer(from, to, *amount), + TestTxSpec::ContractDeploy { + sender, + name, + code, + clarity_version, + } => self.contract_deploy(sender, name, code, *clarity_version), + TestTxSpec::ContractCall { + sender, + contract_addr, + contract_name, + function_name, + args, + } => self.contract_call(sender, contract_addr, contract_name, function_name, args), + } + } - self.chain.miner.sign_nakamoto_block(&mut block); - let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); - signers.sign_nakamoto_block(&mut block, cycle); - let block_len = block.serialize_to_vec().len(); - (block, block_len) + /// Create a STX transfer transaction. + /// + /// Arguments: + /// - `from`: The sender's private key. + /// - `to`: The recipient's principal data. + /// - `amount`: The amount of STX to transfer. + /// + /// Returns: + /// A [`StacksTransaction`] representing the transfer. + /// + /// Note: The transaction fee is set to 180 micro-STX. + pub fn transfer( + &mut self, + from: &StacksPrivateKey, + to: &PrincipalData, + amount: u64, + ) -> StacksTransaction { + let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(from)); + let nonce = self.nonce_counter.entry(address).or_insert(0); + make_stacks_transfer_tx(from, *nonce, 180, self.default_chain_id, to, amount) } - /// Computes the MARF root hash for a block. + /// Create a contract deployment transaction. /// - /// This function is intended for use in success test cases only, where all - /// transactions are valid. In other scenarios, the computation may fail. + /// Arguments: + /// `sender`: The sender's private key. + /// `name`: The name of the contract. + /// `code`: The contract code as a string. /// - /// The implementation is deliberately minimal: it does not cover every - /// possible situation (such as new tenure handling), but it should be - /// sufficient for the scope of our test cases. - fn compute_block_marf_root_hash( + /// Returns: + /// A [`StacksTransaction`] representing the contract deployment. + /// + /// Note: The transaction fee is set based on the contract code length. + pub fn contract_deploy( &mut self, - block_time: u64, - block_txs: &[StacksTransaction], - ) -> Result { - let node = self.chain.stacks_node.as_mut().unwrap(); - let sortdb = self.chain.sortdb.as_ref().unwrap(); - let burndb_conn = sortdb.index_handle_at_tip(); - let chainstate = &mut node.chainstate; - - let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap() - .unwrap(); - - let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let burndb_conn = sortdb.index_handle_at_tip(); - - let mut clarity_tx = StacksChainState::chainstate_block_begin( - &chainstate_tx, - clarity_instance, - &burndb_conn, - &chain_tip.consensus_hash, - &chain_tip.anchored_header.block_hash(), - &MINER_BLOCK_CONSENSUS_HASH, - &MINER_BLOCK_HEADER_HASH, - ); - let result = Self::inner_compute_block_marf_root_hash( - &mut clarity_tx, - block_time, - block_txs, - chain_tip.burn_header_height, + sender: &StacksPrivateKey, + name: &str, + code: &str, + clarity_version: Option, + ) -> StacksTransaction { + let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender)); + let nonce = self.nonce_counter.entry(address).or_insert(0); + let tx_bytes = make_contract_publish_versioned( + sender, + *nonce, + (code.len() * 100) as u64, + self.default_chain_id, + name, + code, + clarity_version, ); - clarity_tx.rollback_block(); - result + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap() } - /// This is where the real MARF computation happens. - /// It is extrapolated into an _inner_ method to simplify rollback handling, - /// ensuring that rollback can be applied consistently on both success and failure - /// in the _outer_ method. - fn inner_compute_block_marf_root_hash( - clarity_tx: &mut ClarityTx, - block_time: u64, - block_txs: &[StacksTransaction], - burn_header_height: u32, - ) -> Result { - clarity_tx - .connection() - .as_free_transaction(|clarity_tx_conn| { - clarity_tx_conn.with_clarity_db(|db| { - db.setup_block_metadata(Some(block_time))?; - Ok(()) - }) - }) - .map_err(|e| e.to_string())?; + /// Create a contract call transaction. + /// + /// Arguments: + /// `sender`: The sender's private key. + /// `contract_addr`: The address of the contract. + /// `contract_name`: The name of the contract. + /// `function_name`: The name of the function to call. + /// `args`: The arguments to pass to the function. + /// + /// Returns: + /// A [`StacksTransaction`] representing the contract call. + /// + /// Note: The transaction fee is set to 200 micro-STX. + pub fn contract_call( + &mut self, + sender: &StacksPrivateKey, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + args: &[ClarityValue], + ) -> StacksTransaction { + let address = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender)); + let nonce = self.nonce_counter.entry(address).or_insert(0); + let tx_bytes = make_contract_call( + sender, + *nonce, + 200, + self.default_chain_id, + contract_addr, + contract_name, + function_name, + args, + ); + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap() + } +} - StacksChainState::process_block_transactions(clarity_tx, block_txs, 0) - .map_err(|e| e.to_string())?; +/// Generates a consensus test for executing a contract function across multiple Stacks epochs. +/// +/// This macro automates both contract deployment and function invocation across different +/// epochs and Clarity versions. +/// It simplifies the setup of consensus-critical tests involving versioned smart contracts. +/// +/// # Behavior +/// +/// - **Deployment:** Deploys `contract_code` in each epoch specified in `deploy_epochs` +/// for every applicable [`ClarityVersion`]. +/// - **Execution:** Calls `function_name` in each epoch from `call_epochs` on all previously +/// deployed contract instances. +/// - **Structure:** Each deployment and function call is executed in its own block, ensuring +/// clear separation between transactions. +/// +/// # Arguments +/// +/// * `$name` — Name of the generated test function. +/// * `contract_name` — The name of the contract. +/// * `contract_code` — The Clarity source code for the contract. +/// * `function_name` — The public function to call. +/// * `function_args` — Function arguments, provided as a slice of [`ClarityValue`]. +/// * `deploy_epochs` — *(optional)* Epochs in which to deploy the contract. Defaults to all epochs ≥ 2.0. +/// * `call_epochs` — *(optional)* Epochs in which to call the function. Defaults to [`EPOCHS_TO_TEST`]. +/// +/// # Example +/// +/// ```rust,ignore +/// contract_call_consensus_test!( +/// my_test, +/// contract_name: "my-contract", +/// contract_code: "(define-public (get-message) (ok \"hello\"))", +/// function_name: "get-message", +/// function_args: &[], +/// ); +/// ``` +macro_rules! contract_call_consensus_test { + ( + $name:ident, + contract_name: $contract_name:expr, + contract_code: $contract_code:expr, + function_name: $function_name:expr, + function_args: $function_args:expr, + $(deploy_epochs: $deploy_epochs:expr,)? + $(call_epochs: $call_epochs:expr,)? + ) => { + #[test] + fn $name() { + // Handle deploy_epochs parameter (default to all epochs >= 2.0 if not provided) + let deploy_epochs = &StacksEpochId::ALL[1..]; + $(let deploy_epochs = $deploy_epochs;)? - NakamotoChainState::finish_block(clarity_tx, None, false, burn_header_height) - .map_err(|e| e.to_string())?; + // Handle call_epochs parameter (default to EPOCHS_TO_TEST if not provided) + let call_epochs = EPOCHS_TO_TEST; + $(let call_epochs = $call_epochs;)? + let contract_test = ContractConsensusTest::new( + function_name!(), + vec![], + deploy_epochs, + call_epochs, + $contract_name, + $contract_code, + $function_name, + $function_args, + ); + let result = contract_test.run(); + insta::assert_ron_snapshot!(result); + } + }; +} - Ok(clarity_tx.seal()) - } +/// Generates a consensus test for contract deployment across multiple Stacks epochs. +/// +/// This macro automates deploying a contract across different Stacks epochs and +/// Clarity versions. It is primarily used for consensus-critical testing of contract +/// deployment behavior. +/// +/// # Behavior +/// +/// - **Deployment:** Deploys `contract_code` in each epoch specified by `deploy_epochs` +/// for all applicable [`ClarityVersion`]s. +/// - **Structure:** Each deployment is executed in its own block, ensuring clear +/// separation between transactions. +/// +/// # Arguments +/// +/// * `$name` — Name of the generated test function. +/// * `contract_name` — Name of the contract being tested. +/// * `contract_code` — The Clarity source code of the contract. +/// * `deploy_epochs` — *(optional)* Epochs in which to deploy the contract. Defaults to [`EPOCHS_TO_TEST`]. +/// +/// # Example +/// +/// ```rust,ignore +/// contract_deploy_consensus_test!( +/// deploy_test, +/// contract_name: "my-contract", +/// contract_code: "(define-public (init) (ok true))", +/// ); +/// ``` +macro_rules! contract_deploy_consensus_test { + // Handle the case where deploy_epochs is not provided + ( + $name:ident, + contract_name: $contract_name:expr, + contract_code: $contract_code:expr, + ) => { + contract_deploy_consensus_test!( + $name, + contract_name: $contract_name, + contract_code: $contract_code, + deploy_epochs: EPOCHS_TO_TEST, + ); + }; + ( + $name:ident, + contract_name: $contract_name:expr, + contract_code: $contract_code:expr, + deploy_epochs: $deploy_epochs:expr, + ) => { + contract_call_consensus_test!( + $name, + contract_name: $contract_name, + contract_code: $contract_code, + function_name: "", // No function calls, just deploys + function_args: &[], // No function calls, just deploys + deploy_epochs: $deploy_epochs, + call_epochs: &[], // No function calls, just deploys + ); + }; } #[test] @@ -1093,7 +1388,7 @@ fn test_append_empty_blocks() { epoch_blocks.insert(*epoch, empty_test_blocks.clone()); } - let result = ConsensusTest::new(function_name!(), vec![]).run(epoch_blocks); + let result = ConsensusTest::new(function_name!(), vec![], epoch_blocks).run(); insta::assert_ron_snapshot!(result); } @@ -1137,11 +1432,10 @@ fn test_append_stx_transfers_success() { tx }) .collect(); - epoch_blocks.insert(*epoch, vec![TestBlock { transactions }]); } - let result = ConsensusTest::new(function_name!(), initial_balances).run(epoch_blocks); + let result = ConsensusTest::new(function_name!(), initial_balances, epoch_blocks).run(); insta::assert_ron_snapshot!(result); } diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index 7b2dcfdb5cc..02df4fefc24 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -16,6 +16,12 @@ pub mod consensus; use std::fs; +use clarity::consts::{ + PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_EPOCH_3_2, + PEER_VERSION_EPOCH_3_3, STACKS_EPOCH_MAX, +}; use clarity::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; @@ -53,9 +59,12 @@ use crate::chainstate::stacks::boot::test::{get_parent_tip, make_pox_4_lockup_ch use crate::chainstate::stacks::db::{StacksChainState, *}; use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::{Error as ChainstateError, StacksMicroblockHeader, *}; -use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, BOOT_BLOCK_HASH}; +use crate::core::{ + EpochList, StacksEpoch, StacksEpochExtension, BLOCK_LIMIT_MAINNET_21, BOOT_BLOCK_HASH, +}; use crate::net::relay::Relayer; use crate::net::test::TestEventObserver; +use crate::net::tests::NakamotoBootPlan; use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; use crate::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -363,55 +372,122 @@ impl<'a> TestChainstate<'a> { } } - // Advances a TestChainstate to the Nakamoto epoch - pub fn advance_to_nakamoto_epoch(&mut self, private_key: &StacksPrivateKey, nonce: &mut usize) { - let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(private_key)); - let default_pox_addr = - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); + /// Advances the chainstate to the specified epoch boundary by creating a tenure change block per burn block height. + /// Panics if already past the target epoch activation height. + pub fn advance_to_epoch_boundary( + &mut self, + private_key: &StacksPrivateKey, + target_epoch: StacksEpochId, + ) { + let mut burn_block_height = self.get_burn_block_height(); + let mut target_height = self + .config + .epochs + .as_ref() + .expect("Epoch configuration missing") + .iter() + .find(|e| e.epoch_id == target_epoch) + .expect("Target epoch not found") + .start_height; + + assert!( + burn_block_height <= target_height, + "Already advanced past target epoch ({target_epoch}) activation height ({target_height}). Current burn block height: {burn_block_height}." + ); + target_height = target_height.saturating_sub(1); - let mut sortition_height = self.get_burn_block_height(); - debug!("\n\n======================"); - debug!("PoxConstants = {:#?}", &self.config.burnchain.pox_constants); - debug!("tip = {sortition_height}"); - debug!("========================\n\n"); + debug!("Advancing to epoch {target_epoch} boundary at {target_height}. Current burn block height: {burn_block_height}"); let epoch_25_height = self .config .epochs .as_ref() - .unwrap() + .expect("Epoch configuration missing") .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch25) - .unwrap() - .start_height; + .find_map(|e| { + if e.epoch_id == StacksEpochId::Epoch25 { + Some(e.start_height) + } else { + None + } + }) + .unwrap_or(u64::MAX); let epoch_30_height = self .config .epochs .as_ref() - .unwrap() + .expect("Epoch configuration missing") .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch30) - .unwrap() - .start_height; + .find_map(|e| { + if e.epoch_id == StacksEpochId::Epoch30 { + Some(e.start_height) + } else { + None + } + }) + .unwrap_or(u64::MAX); - // Advance to just past PoX-4 instantiation - let mut blocks_produced = false; - while sortition_height <= epoch_25_height { - self.tenure_with_txs(&[], nonce); - sortition_height = self.get_burn_block_height(); - blocks_produced = true; + let epoch_30_reward_cycle = self + .config + .burnchain + .block_height_to_reward_cycle(epoch_30_height) + .unwrap_or(u64::MAX); + + let mut mined_pox_4_lockup = false; + while burn_block_height < target_height { + if burn_block_height < epoch_30_height - 1 { + let current_reward_cycle = self.get_reward_cycle(); + // Before we can mine pox 4 lockup, make sure we mine at least one block. + // If we have mined the lockup already, just mine a regular tenure + // Note, we cannot mine a pox 4 lockup, if it isn't activated yet + // And must mine it in the reward cycle directly prior to the Nakamoto + // activated reward cycle + if !mined_pox_4_lockup + && burn_block_height > self.config.current_block + && burn_block_height + 1 >= epoch_25_height + && current_reward_cycle + 1 == epoch_30_reward_cycle + { + debug!("Mining pox-4 lockup"); + self.mine_pox_4_lockup(private_key); + mined_pox_4_lockup = true; + } else { + debug!("Mining pre-nakamoto tenure"); + let stacks_block = self.mine_pre_nakamoto_tenure_with_txs(&[]); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()) + .expect("Failed to get canonical chain tip"); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(stacks_block, stacks_tip); + } + } else { + debug!("Mining post-nakamoto tenure"); + self.mine_nakamoto_tenure(); + } + burn_block_height = self.get_burn_block_height(); } + } - // Ensure at least one block is produced before PoX-4 lockups - if !blocks_produced { - self.tenure_with_txs(&[], nonce); - sortition_height = self.get_burn_block_height(); - } + /// This must be called after pox 4 activation and at or past the Epoch 2.5 boundary + fn mine_pox_4_lockup(&mut self, private_key: &StacksPrivateKey) { + let sortition_height = self.get_burn_block_height(); + let epoch_25_height = self + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + assert!( + sortition_height + 1 >= epoch_25_height, + "Cannot mine pox-4 lockups if not at or past Epoch 2.5 boundary" + ); - debug!("\n\n======================"); - debug!("Make PoX-4 lockups"); - debug!("========================\n\n"); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); let reward_cycle = self .config @@ -460,49 +536,69 @@ impl<'a> TestChainstate<'a> { }) .collect(); - let stacks_block = self.tenure_with_txs(&stack_txs, nonce); + let stacks_block = self.mine_pre_nakamoto_tenure_with_txs(&stack_txs); let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(stacks_block, stacks_tip); + } - debug!("\n\n======================"); - debug!("Advance to the Prepare Phase"); - debug!("========================\n\n"); - - // Advance to the prepare phase - while !self.config.burnchain.is_in_prepare_phase(sortition_height) { - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); - let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - let stacks_block = self.tenure_with_txs(&[], nonce); - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_ne!(old_tip, stacks_tip); - sortition_height = self.get_burn_block_height(); - } + /// Mines a new bitcoin block with a new tenure block-commit, using it to mine the start of a new Stacks Nakmoto tenure, + /// It will mine subsequently mine the coinbase and tenure change Stacks txs. + /// NOTE: mines a total of one Bitcoin block and one Stacks block. + fn mine_nakamoto_tenure(&mut self) { + let burn_block_height = self.get_burn_block_height(); + let (burn_ops, mut tenure_change, miner_key) = + self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = self.next_burnchain_block(burn_ops); + let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let tenure_change_tx = self.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = self.miner.make_nakamoto_coinbase(None, vrf_proof); + + let blocks_and_sizes = self + .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)) + .unwrap(); + assert_eq!( + blocks_and_sizes.len(), + 1, + "Mined more than one Nakamoto block" + ); + } - debug!("\n\n======================"); - debug!("Advance to Epoch 3.0"); - debug!("========================\n\n"); - - // Advance to Epoch 3.0 - while sortition_height < epoch_30_height - 1 { - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); - let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - self.tenure_with_txs(&[], nonce); - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_ne!(old_tip, stacks_tip); - sortition_height = self.get_burn_block_height(); + /// Advance a TestChainstate into the provided epoch. + /// Does nothing if chainstate is already in the target epoch. Panics if it is past the epoch. + pub fn advance_into_epoch( + &mut self, + private_key: &StacksPrivateKey, + target_epoch: StacksEpochId, + ) { + let burn_block_height = self.get_burn_block_height(); + let target_height = self + .config + .epochs + .as_ref() + .expect("Epoch configuration missing") + .iter() + .find(|e| e.epoch_id == target_epoch) + .expect("Target epoch not found") + .start_height; + assert!(burn_block_height <= target_height, "We cannot advance backwards. Examine your bootstrap setup. Current burn block height: {burn_block_height}. Target height: {target_height}"); + // Don't bother advancing to the boundary if we are already at it. + if burn_block_height < target_height { + self.advance_to_epoch_boundary(private_key, target_epoch); + if target_epoch < StacksEpochId::Epoch30 { + self.mine_pre_nakamoto_tenure_with_txs(&[]); + } else { + self.mine_nakamoto_tenure(); + } } - - debug!("\n\n======================"); - debug!("Welcome to Nakamoto!"); - debug!("========================\n\n"); + let burn_block_height = self.get_burn_block_height(); + debug!( + "Advanced into epoch {target_epoch}. Current burn block height: {burn_block_height}" + ); } pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { @@ -1044,21 +1140,33 @@ impl<'a> TestChainstate<'a> { self.stacks_node.as_ref().unwrap() } - /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments - /// the provided reference. - pub fn tenure_with_txs( + /// Mines a pre-naka tenure with the given transactions. Creates a coinbase tx. Processes the tenure + /// NOTE: mines one burnchain block and one Stacks block. + fn mine_pre_nakamoto_tenure_with_txs(&mut self, txs: &[StacksTransaction]) -> StacksBlockId { + let (burn_ops, stacks_block, microblocks) = self.make_pre_nakamoto_tenure_with_txs(txs); + + let (_, _, consensus_hash) = self.next_burnchain_block(burn_ops); + self.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + StacksBlockId::new(&consensus_hash, &stacks_block.block_hash()) + } + + /// Make a pre-naka tenure with the given transactions + pub fn make_pre_nakamoto_tenure_with_txs( &mut self, txs: &[StacksTransaction], - coinbase_nonce: &mut usize, - ) -> StacksBlockId { + ) -> ( + Vec, + StacksBlock, + Vec, + ) { let microblock_privkey = self.miner.next_microblock_privkey(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) .unwrap(); let burnchain = self.config.burnchain.clone(); - - let (burn_ops, stacks_block, microblocks) = self.make_tenure( + self.make_tenure( |ref mut miner, ref mut sortdb, ref mut chainstate, @@ -1066,7 +1174,7 @@ impl<'a> TestChainstate<'a> { ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); - let coinbase_tx = make_coinbase(miner, *coinbase_nonce); + let coinbase_tx = make_coinbase(miner, tip.block_height.try_into().unwrap()); let mut block_txs = vec![coinbase_tx]; block_txs.extend_from_slice(txs); @@ -1089,14 +1197,7 @@ impl<'a> TestChainstate<'a> { .unwrap(); (anchored_block, vec![]) }, - ); - - let (_, _, consensus_hash) = self.next_burnchain_block(burn_ops); - self.process_stacks_epoch_at_tip(&stacks_block, µblocks); - - *coinbase_nonce += 1; - - StacksBlockId::new(&consensus_hash, &stacks_block.block_hash()) + ) } /// Make a tenure, using `tenure_builder` to generate a Stacks block and a list of @@ -1518,4 +1619,308 @@ impl<'a> TestChainstate<'a> { self.stacks_node = Some(stacks_node); Ok(block_data) } + + /// Create an epoch list for testing Epoch 2.5 onwards + pub fn epoch_2_5_onwards(first_burnchain_height: u64) -> EpochList { + info!( + "StacksEpoch 2.5 onwards unit test first_burnchain_height = {first_burnchain_height}" + ); + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 0, + end_height: first_burnchain_height, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 1, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 1, + end_height: first_burnchain_height + 2, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch32, + start_height: first_burnchain_height + 2, + end_height: first_burnchain_height + 3, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch33, + start_height: first_burnchain_height + 3, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_3, + }, + ]) + } + + pub fn all_epochs(first_burnchain_height: u64) -> EpochList { + info!("StacksEpoch all_epochs first_burn_height = {first_burnchain_height}"); + + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 1, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 1, + end_height: first_burnchain_height + 2, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + // Give a few extra blocks for pre naka blocks + // Since we may want to create multiple stacks blocks + // per epoch (especially for clarity version testing) + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 2, + end_height: first_burnchain_height + 4, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + // Give an extra couple burn blocks for epoch 25 to activate pox-4 + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 22, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height + 22, + end_height: first_burnchain_height + 23, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 23, + end_height: first_burnchain_height + 24, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch32, + start_height: first_burnchain_height + 24, + end_height: first_burnchain_height + 25, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch33, + start_height: first_burnchain_height + 25, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_2, + }, + ]) + } +} + +#[test] +/// Tests that we can instantiate a chainstate from nothing and advance sequentially through every epoch +fn advance_through_all_epochs() { + let privk = StacksPrivateKey::random(); + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_pox_constants(7, 1) + .with_private_key(privk.clone()); + let first_burnchain_height = (boot_plan.pox_constants.pox_4_activation_height + + boot_plan.pox_constants.reward_cycle_length + + 1) as u64; + + let epochs = TestChainstate::all_epochs(first_burnchain_height); + boot_plan = boot_plan.with_epochs(epochs); + let mut chainstate = boot_plan.to_chainstate(None, Some(first_burnchain_height)); + let burn_block_height = chainstate.get_burn_block_height(); + let current_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert_eq!(current_epoch, StacksEpochId::Epoch20); + + // Make sure we can advance through every single epoch. + for target_epoch in [ + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + StacksEpochId::Epoch30, + StacksEpochId::Epoch31, + StacksEpochId::Epoch32, + StacksEpochId::Epoch33, + ] { + chainstate.advance_to_epoch_boundary(&privk, target_epoch); + let burn_block_height = chainstate.get_burn_block_height(); + let current_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert!(current_epoch < target_epoch); + let next_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height + 1) + .unwrap() + .unwrap() + .epoch_id; + assert_eq!(next_epoch, target_epoch); + } +} + +#[test] +/// Tests that we can instantiate a chainstate from nothing and +/// bootstrap to nakamoto +fn advance_to_nakamoto_bootstrapped() { + let privk = StacksPrivateKey::random(); + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_pox_constants(7, 1) + .with_private_key(privk.clone()); + let epochs = TestChainstate::epoch_2_5_onwards( + (boot_plan.pox_constants.pox_4_activation_height + + boot_plan.pox_constants.reward_cycle_length + + 1) as u64, + ); + boot_plan = boot_plan.with_epochs(epochs); + let mut chainstate = boot_plan.to_chainstate(None, None); + chainstate.advance_to_epoch_boundary(&privk, StacksEpochId::Epoch30); + let burn_block_height = chainstate.get_burn_block_height(); + let current_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert_eq!(current_epoch, StacksEpochId::Epoch25); + let next_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height + 1) + .unwrap() + .unwrap() + .epoch_id; + assert_eq!(next_epoch, StacksEpochId::Epoch30); +} + +#[test] +/// Tests that we can instantiate a chainstate from nothing and +/// bootstrap directly from nakamoto and across it +fn advance_through_nakamoto_bootstrapped() { + let privk = StacksPrivateKey::random(); + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_pox_constants(7, 1) + .with_private_key(privk.clone()); + let epochs = TestChainstate::epoch_2_5_onwards( + (boot_plan.pox_constants.pox_4_activation_height + + boot_plan.pox_constants.reward_cycle_length + + 1) as u64, + ); + let activation_height = boot_plan.pox_constants.pox_4_activation_height; + boot_plan = boot_plan.with_epochs(epochs); + let mut chainstate = boot_plan.to_chainstate(None, Some(activation_height.into())); + // Make sure we can advance through every single epoch. + chainstate.advance_to_epoch_boundary(&privk, StacksEpochId::Epoch33); + let burn_block_height = chainstate.get_burn_block_height(); + let current_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert_eq!(current_epoch, StacksEpochId::Epoch32); + let next_epoch = + SortitionDB::get_stacks_epoch(chainstate.sortdb().conn(), burn_block_height + 1) + .unwrap() + .unwrap() + .epoch_id; + assert_eq!(next_epoch, StacksEpochId::Epoch33); } diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap index 4e4f738e68f..afc4d2aa573 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__append_stx_transfers_success.snap @@ -4,7 +4,7 @@ expression: result --- [ Success(ExpectedBlockOutput( - marf_hash: "095b12065b5aa5f0cc29dc004b16507be9fb7964a1d656e891a232931db1810a", + marf_hash: "82d8f3918bafbdc48320936b9c949749539ecc30eb233ab25eefd56541c1b4c6", evaluated_epoch: Epoch32, transactions: [ ExpectedTransactionOutput( @@ -62,7 +62,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "180457415b54799e13905b076af575c96c77614e13e26f8211aa32e7cb1e893e", + marf_hash: "9b89306b88a2f5c4918c42fbfc3ade6cf3835755e49c755734119a0ac28c995f", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__chainstate_error_expression_stack_depth_too_deep.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__chainstate_error_expression_stack_depth_too_deep.snap index b35f1d4c795..6f94a6c104c 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__chainstate_error_expression_stack_depth_too_deep.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__chainstate_error_expression_stack_depth_too_deep.snap @@ -3,11 +3,11 @@ source: stackslib/src/chainstate/tests/consensus.rs expression: result --- [ - Failure("Invalid Stacks block 4283815e1f66aa52f455cfe8a415c8ff1c3d28794b83a08384534e30650554e2: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block b97c37a0da184764a8eb0769d6c8a9af0a6c98b0e6c950423d324286a349bf0c: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 121b03507be0248b0abd05ccf93403e3c50bd8969e8b1c4d2d3f215fd6243576: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 0cb105df328cc9565f4817071f27374a10d73f1fd88a2a91ccf2885b41bef3e3: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 516690436438389acc8b58ea7bbbd0a04d844f4f69f8d1ea0500458d9fa8e4d7: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 96d7c8d5ce7b5452a16079b20a97effd3d3c839e0d97af86484df1f5e48c2b74: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), - Failure("Invalid Stacks block 57ed3ad80ddb289a20f936cc27ad6c07640b5f3f5acfbfcda39a900dc38735f3: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block 89ffe2098a1819614dbe39713ee71a253ddb084a3726ace97687a5d2a98e5d3f: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block 11fd9c1a6704fe0308238004527fbf085b6dd5cb19911042d1f2355085fe26d7: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block 5922d7841de6eb9e4ec86c9f7b2dbaddbb557251557fc002d706f01e3f289685: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block be26e479f7bc6acb1b0c7fe513d69b9a6c5ee23d5a2d02cdb5b5f31642dbf269: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block 4b5c7c4849c322772ef3249251f5b21176c25fd3259318889e811db72ebcb389: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block 54704f76511cab356262297b98acd78acbebcc1506ada34c926418d89079cb04: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), + Failure("Invalid Stacks block ec782f5cc2b99824b3ba3793f817b75674a40ef9c60c8895d50f428b021c4310: ClarityError(Parse(ParseError { err: ExpressionStackDepthTooDeep, pre_expressions: None, diagnostic: Diagnostic { level: Error, message: \"AST has too deep of an expression nesting. The maximum stack depth is 64\", spans: [], suggestion: None } }))"), ] diff --git a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__successfully_deploy_and_call.snap b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__successfully_deploy_and_call.snap index 9827a42a1df..055bf8f7443 100644 --- a/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__successfully_deploy_and_call.snap +++ b/stackslib/src/chainstate/tests/snapshots/blockstack_lib__chainstate__tests__consensus__successfully_deploy_and_call.snap @@ -4,11 +4,291 @@ expression: result --- [ Success(ExpectedBlockOutput( - marf_hash: "a590886094b514abf4406e1a5c3a26978e4b7e8dd159aa310382be65b632db1a", - evaluated_epoch: Epoch30, + marf_hash: "c740de32d7b9273518899f798a6c66ea543dc67c4df2c97f428e37cf86f36857", + evaluated_epoch: Epoch20, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_0-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + tx: "SmartContract(name: foo_contract-Epoch2_0-Clarity1, code_body: [..], clarity_version: None)", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 1175000, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 1175000, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "c197b006221151c65e298beaf88adcc9532f8b494b7f7564b8ef60fb217a4eb5", + evaluated_epoch: Epoch2_05, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_05-Clarity1, code_body: [..], clarity_version: None)", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 315491, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 315491, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "e568aa031e6c9a4c39dc23202fbc80a267eba760160ce288f3315dd30d9bb4a4", + evaluated_epoch: Epoch21, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_1-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "0d25118932d001ca3b324614c28b2de4353557df43ec0c8868904a87b1ee9f9a", + evaluated_epoch: Epoch21, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_1-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "211b8b332246d41b5927b4a19ba3d6af7c527dd4f619d92fde6dfcd049503b41", + evaluated_epoch: Epoch22, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_2-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "4dccf47885edf1f17736a78f772717bea62ea5136e99a981114eacf4ee1c292b", + evaluated_epoch: Epoch22, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_2-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "f06090bf6277fba5c9e95d3ccea4fdf8d2efe978e435684c38d1216f0f378ff9", + evaluated_epoch: Epoch23, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_3-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "af69aa70ce09a12e0bac066b84f24f103bb3cd4cd49b45b2711fa25e90b70c25", + evaluated_epoch: Epoch23, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_3-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "bf8243ccf29da7518f4e4f1cb894c17814463806f7ccd9d693e41175329bf017", + evaluated_epoch: Epoch24, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_4-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "7a009ae7dfd79b021f74d17d1b1ae9015b9f1d8984734099b9d7c1388d254316", + evaluated_epoch: Epoch24, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_4-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "46990434b77ba670bf7bc908be33b4083bbd7147cd86dffa52a821fc88abbf2f", + evaluated_epoch: Epoch25, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_5-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -24,131 +304,887 @@ expression: result ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "ca156312aaf28bd8d37bf879d936a7590fe8b09723f6f5bf65da38c9d8982929", + evaluated_epoch: Epoch25, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch2_5-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "85ac414f14ae115109c11a1e96dca20daf469108ba8a4cb50cd41adc88f5df23", + evaluated_epoch: Epoch30, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_0-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "f881675119756d835bdeb52563df013cba96400650ca46ad4a6b17e3523421a6", + evaluated_epoch: Epoch30, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_0-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "96c02c20a470487d4c0840cd0907e431a62025705050517aec6273388aaa15b5", + evaluated_epoch: Epoch30, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_0-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "eda6ac6c341148306e527afd259f67f1960eb6b0e3675457b0ebdd39f6331875", + evaluated_epoch: Epoch31, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_1-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "4246ce7c78c780a20fe493c0907efe97b4a5dfc09e13bd12904678e74784781e", + evaluated_epoch: Epoch31, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_1-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "263bdc37c726aa7045bfbacfdb2c28c9b224a3b38455251d474a5e6e80485e68", + evaluated_epoch: Epoch31, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_1-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "8f128dbc3ea384d85bea79d9933e5153874215ef37dc4f53f1385b220d64644f", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_2-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "b03cb4e4df40f04e9709c57b8fe0016b6e2079b5b52797d227f52c8e8a41cccb", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_2-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "8334f3de48ac9c8994bc89a9b141fd7d3a6553fb3dec2ccf1730dbbd06ccf481", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "SmartContract(name: foo_contract-Epoch3_2-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: Bool(true), + )), + cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 121, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 11968, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "dfd7987ed3594de16a5a049afddcc6b1554d1c872ff62505193d651f39358ba1", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_0-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "eb2610cbcb9bcb2b9ef4ca096ea86ba5c650c0827948e2ea3d7217134f78abbb", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_05-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "aacde9ab4bb9e268861e798a60cebe203ad63eb620557a807c18be6ca4778b36", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_1-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "1dadd6143cdd8858b1be818ec305284d5d7b5ea5c74798a55bbaa65fd4960da4", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_1-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "33f654fae321df1a7a8c999ff5bd9f7157838686cc66052713350fbabfec6475", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_2-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "a62d1626724274fde8a5ff7896df804e8d9d9b360fd15e3f5c4163c1b1883fc4", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_2-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "03113d14b71423632965e65c09ae3c25e02fcadb50643597fbb422b63fb40046", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_3-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "a98500c481c27c5a9a145be38e97899bff6594094792c2e096554dc1d673ac6c", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_3-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "662d3b6c0b7626514a6b6db933085131eabbed7e511a652821e04b391293f562", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_4-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "d1542be1ac31dcad1cd400f4da054ff9483e8288f4eb62c81ad0c738565c1748", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_4-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "de0cc119f7d0fb0c6bee1dbc197f8e00b50a818b7cb71b197271d0d52547ee18", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_5-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "6d65110f96a096b36de9e731d2c05d1e489a60964db8a68192794c3ab188d698", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_5-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "4c1bfbe42283cc5e98bedaf29f389b9dbddbcae1fc6a9f3dd58a159f8acaa0d2", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_0-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "7e6068e2124476b0bc68b5ea1b92af144b69b2e5cc99070dafdf7ecbf6ffa3ab", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_0-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "5f3f0b515bdf85a87ddf95d0630c988c96e2e8f4072435755d4572a2a19ee13f", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_0-Clarity3, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "38c1fe84e0a3a5b865dc3c04c2834f8fe6ef0ac0ac5bc0976496af7b3d99e9e1", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_1-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + )), + Success(ExpectedBlockOutput( + marf_hash: "83cb893a388306c229959558bc8840911099641abf724707687cb36d1a49ea8f", + evaluated_epoch: Epoch32, + transactions: [ + ExpectedTransactionOutput( + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_1-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + vm_error: "None [NON-CONSENSUS BREAKING]", + return_type: Response(ResponseData( + committed: true, + data: UInt(1), + )), + cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, + ), + ), + ], + total_block_cost: ExecutionCost( + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "dc0aabf612da63b0140560522efdb8ce896660315cdd2fa09adbf9369da7abb7", - evaluated_epoch: Epoch30, + marf_hash: "54e129237da03ab5215ec50928b16950a3f1a11a25a7d509e5ca488e30904a70", + evaluated_epoch: Epoch32, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_0-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_1-Clarity3, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "82e841d52ca2e9d1c226cf961006eff1e6257ce6c714cbeb198f05536708a7b3", - evaluated_epoch: Epoch30, + marf_hash: "7abd4e113f31e7d9431617303f68757d880cf98badbba442269c79f1397fece0", + evaluated_epoch: Epoch32, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_0-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_2-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "3c02272c7a6681809ba820cd27eae4cf6a947ffbb94472c19858a54e8f3ae262", - evaluated_epoch: Epoch31, + marf_hash: "0ef2659b0f4a3e3cd79b983c42308a8791d73d9c54f98ce1babad6bce4f450af", + evaluated_epoch: Epoch32, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_1-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_2-Clarity2, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "29aad28c9ea29def9bca1065d0aa00e2fd3c55777382f3200f17a4a3d6367a31", - evaluated_epoch: Epoch31, + marf_hash: "6fb786959add35c7f138469509e89a151410346410432724577e1dafe1209fb1", + evaluated_epoch: Epoch32, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_1-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_2-Clarity3, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "50a8d6594da285e03915111a3d405675069606fbd45f263c0da68b8ed2def295", - evaluated_epoch: Epoch31, + marf_hash: "7c49b5e1ef621f812e098505662ec22466e35dba63790f0207ca975ba140e1bf", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_1-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -172,11 +1208,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "969f0c7e8124e21997bb02259043885d0f0b6825244832b3bd02e15079b02358", - evaluated_epoch: Epoch32, + marf_hash: "1ce3dbfc54ed002976f2ac1b9481002cc5693c6986a0a276aa493204ce5c50cd", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_2-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", + tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -200,11 +1236,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "2afc4a781e1f87d793e3e876fbb7eb90b00c34d34f88b03ed04adebf7e1d2a34", - evaluated_epoch: Epoch32, + marf_hash: "bc53a4af2876d50e4b1f2fc617662a2c7fe407f399a5e9ed70f1a5d6ef632c96", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_2-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -228,11 +1264,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "e8bbd5ff2cfab0812da17d25cdb468e87da8ca43b3dd6bf0cb20f5ffb9b5c434", - evaluated_epoch: Epoch32, + marf_hash: "1a5f4dcc0061e5f372a7fcea574f14942ac7366f16ef7254adbdcd08b1559a0a", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_2-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity4, code_body: [..], clarity_version: Some(Clarity4))", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -256,11 +1292,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "8e1fa3c731374492b0d95090c1df2a37016685a2e8aa0d8838392f325c363da8", - evaluated_epoch: Epoch32, + marf_hash: "611f8299c20e38188d76c1d8981a8ea5d000c73ede60c854010598d23c75f7f4", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_0-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_0-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -284,11 +1320,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "18433deeefcbf8171b64dbe2e954662884dc87276df423111a24bbd0b3ee7f8d", - evaluated_epoch: Epoch32, + marf_hash: "fad2c2400a9ed905609b276a3e0ac12e82666495a187cc9d96c6f15c2ce0f9f2", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_0-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_05-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -312,11 +1348,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "e78662e11643b6bb442bc661e4b5e6e885563144f51c2a77268f67c5db3f3584", - evaluated_epoch: Epoch32, + marf_hash: "ff78af2148d178968917f4dcc3bf0e84e99e94e7e03cce0481e7bb2ed722e64f", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_0-Clarity3, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_1-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -340,11 +1376,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "73f22023951bb8a04e5ccb7c569b979fcaa5a56590c14442a5dcfab5ea5607b5", - evaluated_epoch: Epoch32, + marf_hash: "c24417065f0ae7793e7b7048483d88d6d847d06f5dc141a2e82f5d9cfc708036", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_1-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_1-Clarity2, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -368,11 +1404,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "027a3d14203b456780ea26eafecd9ab5261b7601ad711793772c7c6e0fa02e59", - evaluated_epoch: Epoch32, + marf_hash: "0cd887293455e8a6d347eae14d5b9790dcc7cb015294c2a929477ca3e7afd51f", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_1-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_2-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -396,11 +1432,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "57409ac5f17d76a2fb7bb5a96f6df5eda1d3689a23c626f7fbc104addf1cf32f", - evaluated_epoch: Epoch32, + marf_hash: "130b8d2b572b04c64fd7d9b34cd63ec0400a792f5a3bb1eb905a80074f10a68b", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_1-Clarity3, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_2-Clarity2, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -424,11 +1460,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "e5a30cda5e2d36eac5711d89e95b1ed54242fbf7ceb4c85c7bf9306766fd9ca0", - evaluated_epoch: Epoch32, + marf_hash: "ed142964c3dafbe32c916591ef45f871fbf31330d770884ec466aa13fdc57790", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_2-Clarity1, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_3-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -452,11 +1488,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "a3bcc31b429da98ee5235387601d803cb305a3db4af5fdb7ba9c3f5bcc0d41b2", - evaluated_epoch: Epoch32, + marf_hash: "7b61d1f32b9ee3c3672aee6fe1c31298c2b5a92f04304df3b2e1c16e51501dfe", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_2-Clarity2, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_3-Clarity2, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -480,11 +1516,11 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "ef8e8f6580de93248e1d1cc2f87bbdbefb3ddb86d403c2101b4620c5449365c9", - evaluated_epoch: Epoch32, + marf_hash: "3f01fae225c8345bd4d00385351a0e06c368975409fdb5ed550413a8c2752bde", + evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch3_2-Clarity3, function_name: bar, function_args: [[UInt(1)]])", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_4-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, @@ -508,119 +1544,91 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "a4d7a7e6071908e2780d665ce73f1fc248a06419857e1148af6a289b178f7562", + marf_hash: "c19b2af6f7214f293596ed8ee49f4a719d117699655fcf0dfdae6a94c993f318", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity1, code_body: [..], clarity_version: Some(Clarity1))", - vm_error: "None [NON-CONSENSUS BREAKING]", - return_type: Response(ResponseData( - committed: true, - data: Bool(true), - )), - cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, - ), - ), - ], - total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, - ), - )), - Success(ExpectedBlockOutput( - marf_hash: "f93629deb181a073d7f1f7f26c1b4d2eaf844cff08f74c2fd9881fa19c4d8408", - evaluated_epoch: Epoch33, - transactions: [ - ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity2, code_body: [..], clarity_version: Some(Clarity2))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_4-Clarity2, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "0a43d8c0d910d9a2ca58b723c4533c63e1c32ca90dce7fab5cfe9216c5e34432", + marf_hash: "6742b63ec373563e0f67b06e09e48dcaeff96501d8b2971af9db4309d353d120", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity3, code_body: [..], clarity_version: Some(Clarity3))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_5-Clarity1, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "ec3fa2b2fa5766499c2ee43c70c59c09d65ab9d04b04bcecf86117224243ebf7", + marf_hash: "d8649197d538722552412dce20ce4c197035bf92b5c247b2668361d9a4755942", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( - tx: "SmartContract(name: foo_contract-Epoch3_3-Clarity4, code_body: [..], clarity_version: Some(Clarity4))", + tx: "ContractCall(address: ST1AW6EKPGT61SQ9FNVDS17RKNWT8ZP582VF9HSCP, contract_name: foo_contract-Epoch2_5-Clarity2, function_name: bar, function_args: [[UInt(1)]])", vm_error: "None [NON-CONSENSUS BREAKING]", return_type: Response(ResponseData( committed: true, - data: Bool(true), + data: UInt(1), )), cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), ), ], total_block_cost: ExecutionCost( - write_length: 121, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 11968, + write_length: 0, + write_count: 0, + read_length: 103, + read_count: 3, + runtime: 499, ), )), Success(ExpectedBlockOutput( - marf_hash: "d3451f558343a34dc1013cacf3f9c07d04f76bf84e1f5ff7a5738230364c453f", + marf_hash: "6cde15ab15ad480cd95a0f4beb707e5db334c55cff6027f37bc0b1b8cb44ea41", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -648,7 +1656,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "56f27df16675d8dbe666f8d0793c8f31fe8a7616436f0638041dad6aaf12ffd7", + marf_hash: "d6c085c796f234102b6de6259e0e014aa88e789923d00fb8f7a25857cc9f1a15", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -676,7 +1684,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "61ed7b44320cc7100664c4732d431d0e3103869bbb8e7b095a2a2cb256b32f41", + marf_hash: "645db2f08fb74c7cd7830cb90d8db8dccc3c4e37b89be42d3c7b2bd214f58556", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -704,7 +1712,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "3dd0da4b5c81ddb60394adc0078f07951b94be164b0acbb3dc18dd7a824f38b3", + marf_hash: "43e8fb9110329a7a8892e227509f5833b3a0cfe54f4c0c1d1ae58d17622d6e01", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -732,7 +1740,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "d615a756f0feeda86821fc3df4176ac8ad81403fc4d8cc905cbe23e32eeb97a2", + marf_hash: "6564f3eccdf42fb951cc61b687b493b339c68f3a185328eede8f3bbc2d60435d", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -760,7 +1768,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "13274516ac921e85a216911865d829fde2a03ec6069f184aea8d872cd2d68691", + marf_hash: "1c926315fee79fd91f30ece3612ea499866ff5d8e26ea76947d3147675d2463c", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -788,7 +1796,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "03f0b38ef9c1689d6b6d94c6dafb3559aeca63b3e4a20e2b3095afe470e228a1", + marf_hash: "6fd0a270a21d6defe7796db517f01351dbf4690a3087b2ad045530f0f10f0d9c", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -816,7 +1824,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "bddb1e348ec4af76001dd26054a5b15f7ee73734a33a96d1f42b8d070401f3d0", + marf_hash: "12dcc58b837c4780423c76953847dcfa787adfc3a12745319e245cd944aa0c17", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -844,7 +1852,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "b36b6a71291844658462fd63b1404505dac195ab6d1e0f617ce741b6763bc002", + marf_hash: "9bcf59d22afcbbdd3ea4660e823b385ebc9b0098310c893b0810378041d63d06", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -872,7 +1880,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "1509e7208cbc57f11f7130e9a723d88a55bd7dd7d1db21bfebde27694f82a3c9", + marf_hash: "157c33212448710e558bf87edcb73a65f7c7d64ea25f11c0d1197e9c3ebe7b85", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -900,7 +1908,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "b35dd5af7b0489b95ad09fc4a4a0cd824e0722d54dcfefb97331062ccad7d411", + marf_hash: "d36b236e0c9430cf3c6782b8d1d56b777c83328861d428bcc72a6fe497d4c5ee", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -928,7 +1936,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "f93386433ae622b3ccbac5c4f8fca3de479af09ee16ad7935a521b68f5a3713f", + marf_hash: "29c9591ed858e7f921eb0af320f8823ca5c80255a4003d86d444bc5cbb9128ad", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( @@ -956,7 +1964,7 @@ expression: result ), )), Success(ExpectedBlockOutput( - marf_hash: "3717b93edcec46ca2e785a0709ffd725ff078b2cdf3ddc8b7fa0eb9a2bb8e4d0", + marf_hash: "76de1f6c346b550f60eca65ae89861c1fd7a73f463ea928791bd2e63ff15b18f", evaluated_epoch: Epoch33, transactions: [ ExpectedTransactionOutput( diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 3b79f72ba4c..82d8c0bb24b 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -584,33 +584,19 @@ impl Config { fn check_nakamoto_config(&self, burnchain: &Burnchain) { let epochs = self.burnchain.get_epoch_list(); - let Some(epoch_30) = epochs.get(StacksEpochId::Epoch30) else { - // no Epoch 3.0, so just return + if epochs + .iter() + .all(|epoch| epoch.epoch_id < StacksEpochId::Epoch30) + { return; - }; + } if burnchain.pox_constants.prepare_length < 3 { panic!( "FATAL: Nakamoto rules require a prepare length >= 3. Prepare length set to {}", burnchain.pox_constants.prepare_length ); } - if burnchain.is_in_prepare_phase(epoch_30.start_height) { - panic!( - "FATAL: Epoch 3.0 must start *during* a reward phase, not a prepare phase. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", - epoch_30.start_height, - &burnchain.pox_constants - ); - } - let activation_reward_cycle = burnchain - .block_height_to_reward_cycle(epoch_30.start_height) - .expect("FATAL: Epoch 3.0 starts before the first burnchain block"); - if activation_reward_cycle < 2 { - panic!( - "FATAL: Epoch 3.0 must start at or after the second reward cycle. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", - epoch_30.start_height, - &burnchain.pox_constants - ); - } + StacksEpoch::validate_nakamoto_transition_schedule(&epochs, burnchain); } /// Connect to the MempoolDB using the configured cost estimation @@ -686,6 +672,7 @@ impl Config { "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {burnchain:?}" ); } + StacksEpoch::validate_nakamoto_transition_schedule(epochs, burnchain); } // TODO: add tests from mutation testing results #4866 diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 9ad2d1b950a..8079457ee49 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -29,6 +29,7 @@ use stacks_common::types::{EpochList as GenericEpochList, StacksEpoch as Generic pub use self::mempool::MemPoolDB; use crate::burnchains::bitcoin::indexer::get_bitcoin_stacks_epochs; use crate::burnchains::bitcoin::BitcoinNetworkType; +use crate::burnchains::Burnchain; use crate::chainstate::burn::ConsensusHash; pub mod mempool; pub mod nonce_cache; @@ -883,6 +884,9 @@ pub trait StacksEpochExtension { bitcoin_network: BitcoinNetworkType, configured_epochs: Option<&EpochList>, ) -> EpochList; + /// Validates that Epoch 3.0 activation (if present) satisfies all required safety + /// invariants for Nakamoto transition, using the provided burnchain configuration. + fn validate_nakamoto_transition_schedule(epochs: &[StacksEpoch], burnchain: &Burnchain); } impl StacksEpochExtension for StacksEpoch { @@ -2347,6 +2351,118 @@ impl StacksEpochExtension for StacksEpoch { } assert_eq!(epoch_end_height, STACKS_EPOCH_MAX); + EpochList::new(&epochs) } + + /// Validates that Epoch 3.0 activation (if present) satisfies all required safety + /// invariants for Nakamoto transition, using the provided burnchain configuration. + /// + /// This function is only relevant when **Nakamoto epochs** (Epoch 3.0+) exist in the + /// epoch list. If no post Epoch 2.5 is defined, the function returns early with no checks. + /// + /// ### Required Invariants for Safe Epoch 3.0 Activation + /// + /// 1. **Epoch 2.5 must exist** and start **before** the prepare phase of the reward + /// cycle immediately preceding Epoch 3.0. + /// 2. **Epoch 2.5 must end exactly at the start of Epoch 3.0** — they are contiguous. + /// 3. **Epoch 2.5 and Epoch 3.0 must be in different reward cycles** + /// 4. **Epoch 3.0 must start during a reward phase**, not in a prepare phase. + /// 5. **Epoch 3.0 must not start at a reward cycle boundary** (i.e., block height + /// modulo `reward_cycle_length` must not be 0 or 1). + /// 6. **Epoch 3.0 must activate at or after reward cycle 2** (cycle 0 and 1 are + /// reserved for early network bootstrapping). + /// + /// # Parameters + /// + /// - `epochs`: List of defined Stacks epochs. + /// - `burnchain`: Burnchain configuration, providing PoX reward cycle parameters + /// (`reward_cycle_length`, `prepare_length`) and height-to-cycle utilities. + /// + /// # Panics + /// + /// This function panics if any of the invariants fail. + /// These panics are intended to catch **misconfigured networks** at startup + fn validate_nakamoto_transition_schedule(epochs: &[StacksEpoch], burnchain: &Burnchain) { + // Early return if no Nakamoto epochs are defined + if epochs + .iter() + .all(|epoch| epoch.epoch_id < StacksEpochId::Epoch30) + { + return; + } + let epoch_3_0 = epochs + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .expect("FATAL: Cannot activate Epoch 3.0 without specifying its activation height"); + let epoch_2_5 = epochs + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .expect("FATAL: Epoch 2.5 not found"); + let epoch_3_0_start = epoch_3_0.start_height; + let epoch_2_5_start = epoch_2_5.start_height; + let epoch_2_5_end = epoch_2_5.end_height; + + let reward_cycle_length = u64::from(burnchain.pox_constants.reward_cycle_length); + let prepare_length = u64::from(burnchain.pox_constants.prepare_length); + + assert!( + !burnchain.is_in_prepare_phase(epoch_3_0_start), + "FATAL: Epoch 3.0 must start *during* a reward phase, not prepare phase. \ + Activation height: {epoch_3_0_start}, PoX Parameters: {:?}", + burnchain.pox_constants + ); + + let activation_reward_cycle = burnchain + .block_height_to_reward_cycle(epoch_3_0_start) + .expect("FATAL: Epoch 3.0 cannot start before the first burnchain block"); + assert!( + activation_reward_cycle >= 2, + "FATAL: Epoch 3.0 must start at or after reward cycle 2. \ + Activation height: {epoch_3_0_start}, cycle: {activation_reward_cycle}, \ + PoX Parameters: {:?}", + burnchain.pox_constants + ); + + let epoch_2_5_reward_cycle = epoch_2_5_start / reward_cycle_length; + let epoch_3_0_reward_cycle = epoch_3_0_start / reward_cycle_length; + // Start of prepare phase in the cycle before Epoch 3.0 + let prior_cycle = epoch_3_0_reward_cycle.saturating_sub(1); + let epoch_3_0_prepare_phase_start = + prior_cycle * reward_cycle_length + (reward_cycle_length - prepare_length); + assert!( + epoch_2_5_start < epoch_3_0_prepare_phase_start, + "FATAL: Epoch 2.5 must start before the prepare phase of the cycle prior to Epoch 3.0. \ + Epoch 2.5 start: {epoch_2_5_start}, \ + Epoch 3.0 prior cycle prepare phase start: {epoch_3_0_prepare_phase_start}, \ + PoX Parameters: {:?}", + burnchain.pox_constants + ); + + assert_eq!( + epoch_2_5_end, epoch_3_0_start, + "FATAL: Epoch 2.5 end must equal Epoch 3.0 start. \ + End: {epoch_2_5_end}, Start: {epoch_3_0_start}" + ); + + assert_ne!( + epoch_2_5_reward_cycle, epoch_3_0_reward_cycle, + "FATAL: Epoch 2.5 and Epoch 3.0 must not be in the same reward cycle. \ + Epoch 2.5 cycle: {epoch_2_5_reward_cycle}, \ + Epoch 3.0 cycle: {epoch_3_0_reward_cycle}, \ + PoX Parameters: {:?}", + burnchain.pox_constants + ); + + // Epoch 2.5 has some confusing boundary logic for calculating the reward set hence why + // the boundary is viewed as both 0 and 1. + assert!( + epoch_3_0_start % reward_cycle_length > 1, + "FATAL: Epoch 3.0 must not start at a reward cycle boundary (offset 0 or 1). \ + Activation height: {epoch_3_0_start}, \ + offset: {}, PoX Parameters: {:?}", + epoch_3_0_start % reward_cycle_length, + burnchain.pox_constants + ); + } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 94cd916f847..c6dd5d00cf8 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -25,7 +25,7 @@ pub mod relay; use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use clarity::types::EpochList; +use clarity::types::{EpochList, StacksEpochId}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libstackerdb::StackerDBChunkData; @@ -186,6 +186,13 @@ impl NakamotoBootPlan { chainstate_config.test_stackers = Some(self.test_stackers.clone()); chainstate_config.burnchain.pox_constants = self.pox_constants.clone(); + if let Some(epochs) = chainstate_config.epochs.as_ref() { + StacksEpoch::validate_nakamoto_transition_schedule( + epochs, + &chainstate_config.burnchain, + ); + } + chainstate_config } @@ -426,17 +433,19 @@ impl NakamotoBootPlan { } } - /// Make a chainstate and transition it into the Nakamoto epoch. + /// Make a chainstate capable of transitioning into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - pub fn boot_nakamoto_chainstate( + pub fn to_chainstate( self, observer: Option<&TestEventObserver>, + current_block: Option, ) -> TestChainstate<'_> { - let chainstate_config = self.build_nakamoto_chainstate_config(); + let mut chainstate_config = self.build_nakamoto_chainstate_config(); + if let Some(current_block) = current_block { + chainstate_config.current_block = current_block; + } let mut chain = TestChainstate::new_with_observer(chainstate_config, observer); chain.mine_malleablized_blocks = self.malleablized_blocks; - let mut chain_nonce = 0; - chain.advance_to_nakamoto_epoch(&self.private_key, &mut chain_nonce); chain } @@ -475,18 +484,13 @@ impl NakamotoBootPlan { other_peers.push(other_peer); } - let mut peer_nonce = 0; - let mut other_peer_nonces = vec![0; other_peers.len()]; - // Advance primary peer and other peers to Nakamoto epoch peer.chain - .advance_to_nakamoto_epoch(&self.private_key, &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { + .advance_to_epoch_boundary(&self.private_key, StacksEpochId::Epoch30); + for other_peer in &mut other_peers { other_peer .chain - .advance_to_nakamoto_epoch(&self.private_key, other_peer_nonce); + .advance_to_epoch_boundary(&self.private_key, StacksEpochId::Epoch30); } (peer, other_peers)