diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source index b8da585fe2..80c434e8d5 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-source +++ b/.github/actions/dockerfiles/Dockerfile.debian-source @@ -24,5 +24,5 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM --platform=${TARGETPLATFORM} debian:bookworm -COPY --from=build /out/stacks-node /out/stacks-signer /bin/ +COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ CMD ["stacks-node", "mainnet"] diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index ebe9f433a9..6dec7efb11 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -122,10 +122,18 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend + - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_after_idle_miner + - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt + - tests::signer::v0::stx_transfers_dont_effect_idle_timeout + - tests::signer::v0::idle_tenure_extend_active_mining - tests::signer::v0::multiple_miners_with_custom_chain_id - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::tenure_extend_after_bad_commit + - tests::signer::v0::block_proposal_max_age_rejections + - tests::signer::v0::global_acceptance_depends_on_block_announcement - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -136,10 +144,13 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles + - tests::nakamoto_integrations::nakamoto_lockup_events - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint + - tests::nakamoto_integrations::test_shadow_recovery - tests::nakamoto_integrations::signer_chainstate + - tests::nakamoto_integrations::sip029_coinbase_change - tests::nakamoto_integrations::clarity_cost_spend_down - tests::nakamoto_integrations::v3_blockbyheight_api_endpoint # TODO: enable these once v1 signer is supported by a new nakamoto epoch diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..4be5785f3d --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,40 @@ +## Perform Clippy checks - currently set to defaults +## https://github.com/rust-lang/rust-clippy#usage +## https://rust-lang.github.io/rust-clippy/master/index.html +## +name: Clippy Checks + +# Only run when: +# - PRs are (re)opened against develop branch +on: + pull_request: + branches: + - develop + types: + - opened + - reopened + - synchronize + +jobs: + clippy_check: + name: Clippy Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: clippy + - name: Clippy + id: clippy + uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: -p libstackerdb -p stacks-signer -p pox-locking -p clarity -p libsigner --no-deps --tests --all-features -- -D warnings \ No newline at end of file diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml index 1c33eca0fb..81790bdc12 100644 --- a/.github/workflows/p2p-tests.yml +++ b/.github/workflows/p2p-tests.yml @@ -43,10 +43,10 @@ jobs: - net::tests::convergence::test_walk_star_15_org_biased - net::tests::convergence::test_walk_inbound_line_15 - net::api::tests::postblock_proposal::test_try_make_response - - net::server::tests::test_http_10_threads_getinfo - - net::server::tests::test_http_10_threads_getblock - - net::server::tests::test_http_too_many_clients - - net::server::tests::test_http_slow_client + - net::server::test::test_http_10_threads_getinfo + - net::server::test::test_http_10_threads_getblock + - net::server::test::test_http_too_many_clients + - net::server::test::test_http_slow_client steps: ## Setup test environment - name: Setup Test Environment diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 98eb5cf92c..457a2aaefd 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -18,55 +18,6 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: - # Full genesis test with code coverage - full-genesis: - name: Full Genesis Test - runs-on: ubuntu-latest - strategy: - ## Continue with the test matrix even if we've had a failure - fail-fast: false - ## Run a maximum of 2 concurrent tests from the test matrix - max-parallel: 2 - matrix: - test-name: - - neon_integrations::bitcoind_integration_test - steps: - ## Setup test environment - - name: Setup Test Environment - id: setup_tests - uses: stacks-network/actions/stacks-core/testenv@main - with: - genesis: true - btc-version: "25.0" - - ## Run test matrix using restored cache of archive file - ## - Test will timeout after env.TEST_TIMEOUT minutes - - name: Run Tests - id: run_tests - timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@main - with: - test-name: ${{ matrix.test-name }} - threads: 1 - archive-file: ~/genesis_archive.tar.zst - - ## Upload code coverage file - - name: Code Coverage - id: codecov - uses: stacks-network/actions/codecov@main - with: - test-name: large_genesis - filename: ./lcov.info - - - name: Status Output - run: | - echo "run_tests: ${{ steps.run_tests.outputs.status }}" - echo "codecov: ${{ steps.codecov.outputs.status }}" - - - name: Check Failures - if: steps.run_tests.outputs.status == 'failure' || steps.codecov.outputs.status == 'failure' - run: exit 1 - # Unit tests with code coverage unit-tests: name: Unit Tests @@ -186,7 +137,6 @@ jobs: runs-on: ubuntu-latest if: always() needs: - - full-genesis - open-api-validation - core-contracts-clarinet-test steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f9d50c249..d9474493fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,45 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added +- Add `tenure_timeout_secs` to the miner for determining when a time-based tenure extend should be attempted. + +### Changed + +- Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) + +## [3.1.0.0.1] + +### Added + +- A miner will now generate a tenure-extend when at least 70% of the signers have confirmed that they are willing to allow one, via the new timestamp included in block responses. This allows the miner to refresh its budget in between Bitcoin blocks. ([#5476](https://github.com/stacks-network/stacks-core/discussions/5476)) + +### Changed + +## [3.1.0.0.0] + +### Added + +- **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/stacksgov/sips/blob/main/sips/sip-029/sip-029-halving-alignment.md) for details) +- New RPC endpoints + - `/v2/clarity/marf/:marf_key_hash` + - `/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key` +- When a proposed block is validated by a node, the block can be validated even when the block version is different than the node's default ([#5539](https://github.com/stacks-network/stacks-core/pull/5539)) + +### Changed + +## [3.0.0.0.4] + +### Added + +### Changed + +- Use the same burn view loader in both block validation and block processing + +## [3.0.0.0.3] + +### Added + ### Changed - Add index for StacksBlockId to nakamoto block headers table (improves node performance) - Remove the panic for reporting DB deadlocks (just error and continue waiting) @@ -14,6 +53,16 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. - Add `tenure_cost_limit_per_block_percentage` to the miner config file to control the percentage remaining tenure cost limit to consume per nakamoto block. - Add `/v3/blocks/height/:block_height` rpc endpoint +- If the winning miner of a sortition is committed to the wrong parent tenure, the previous miner can immediately tenure extend and continue mining since the winning miner would never be able to propose a valid block. (#5361) + +## [3.0.0.0.2] + +### Added + +### Changed +- Fixes a few bugs in the relayer and networking stack + - detects and deprioritizes unhealthy replicas + - fixes an issue in the p2p stack which was preventing it from caching the reward set. ## [3.0.0.0.1] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8d6c3aabba..b8c63abc2c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -579,7 +579,7 @@ _Do_ document things that are not clear, e.g.: Keep in mind that better variable names can reduce the need for comments, e.g.: - `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to explain that the inputs are microblocks - `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment # Licensing and contributor license agreement diff --git a/Cargo.lock b/Cargo.lock index 8a3769b6a8..47621472bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -637,7 +637,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "mutants", @@ -1411,13 +1411,19 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + [[package]] name = "hashlink" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -1683,12 +1689,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.2", ] [[package]] @@ -1841,7 +1847,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "libstackerdb", @@ -2621,7 +2627,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -3270,7 +3276,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libc", "nix", @@ -3305,7 +3311,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown", + "hashbrown 0.14.3", "http-types", "lazy_static", "libc", @@ -3332,7 +3338,6 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml", "tracing", "tracing-subscriber", "url", @@ -3346,7 +3351,7 @@ dependencies = [ "backoff", "clap 4.5.0", "clarity", - "hashbrown", + "hashbrown 0.14.3", "lazy_static", "libsigner", "libstackerdb", @@ -3385,7 +3390,7 @@ dependencies = [ "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown", + "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "libc", @@ -3421,6 +3426,7 @@ dependencies = [ "stx-genesis", "tikv-jemallocator", "time 0.2.27", + "toml", "url", "winapi 0.3.9", ] diff --git a/Cargo.toml b/Cargo.toml index c00c223c47..194e946ef4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,8 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } -thiserror = { version = "1.0.65" } +thiserror = "1.0.65" +toml = "0.5.6" # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/clarity/src/vm/analysis/analysis_db.rs b/clarity/src/vm/analysis/analysis_db.rs index 1bef2834a8..36e1f8c970 100644 --- a/clarity/src/vm/analysis/analysis_db.rs +++ b/clarity/src/vm/analysis/analysis_db.rs @@ -50,11 +50,11 @@ impl<'a> AnalysisDatabase<'a> { self.begin(); let result = f(self).or_else(|e| { self.roll_back() - .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Err(e) })?; self.commit() - .map_err(|e| CheckErrors::Expects(format!("{e:?}")).into())?; + .map_err(|e| CheckErrors::Expects(format!("{e:?}")))?; Ok(result) } @@ -130,9 +130,9 @@ impl<'a> AnalysisDatabase<'a> { .map_err(|_| CheckErrors::Expects("Bad data deserialized from DB".into())) }) .transpose()? - .and_then(|mut x| { + .map(|mut x| { x.canonicalize_types(epoch); - Some(x) + x })) } diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index aa69f650f0..429907b4c6 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -68,7 +68,7 @@ impl std::fmt::Display for Error { } } -impl<'a> ArithmeticOnlyChecker<'a> { +impl ArithmeticOnlyChecker<'_> { pub fn check_contract_cost_eligible(contract_analysis: &mut ContractAnalysis) { let is_eligible = ArithmeticOnlyChecker::run(contract_analysis).is_ok(); contract_analysis.is_cost_contract_eligible = is_eligible; diff --git a/clarity/src/vm/analysis/contract_interface_builder/mod.rs b/clarity/src/vm/analysis/contract_interface_builder/mod.rs index 6d91f33b1c..4e0aa9a0cb 100644 --- a/clarity/src/vm/analysis/contract_interface_builder/mod.rs +++ b/clarity/src/vm/analysis/contract_interface_builder/mod.rs @@ -276,7 +276,7 @@ impl ContractInterfaceFunction { outputs: ContractInterfaceFunctionOutput { type_f: match function_type { FunctionType::Fixed(FixedFunction { returns, .. }) => { - ContractInterfaceAtomType::from_type_signature(&returns) + ContractInterfaceAtomType::from_type_signature(returns) } _ => return Err(CheckErrors::Expects( "Contract functions should only have fixed function return types!" @@ -287,7 +287,7 @@ impl ContractInterfaceFunction { }, args: match function_type { FunctionType::Fixed(FixedFunction { args, .. }) => { - ContractInterfaceFunctionArg::from_function_args(&args) + ContractInterfaceFunctionArg::from_function_args(args) } _ => { return Err(CheckErrors::Expects( diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index f86308f8d9..5c3f68c7f9 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -207,10 +207,10 @@ impl CheckErrors { /// Does this check error indicate that the transaction should be /// rejected? pub fn rejectable(&self) -> bool { - match &self { - CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) => true, - _ => false, - } + matches!( + self, + CheckErrors::SupertypeTooLarge | CheckErrors::Expects(_) + ) } } @@ -323,7 +323,7 @@ pub fn check_arguments_at_most(expected: usize, args: &[T]) -> Result<(), Che } } -fn formatted_expected_types(expected_types: &Vec) -> String { +fn formatted_expected_types(expected_types: &[TypeSignature]) -> String { let mut expected_types_joined = format!("'{}'", expected_types[0]); if expected_types.len() > 2 { diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 6a8f64f1b2..8dde917df9 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -17,7 +17,6 @@ pub mod analysis_db; pub mod arithmetic_checker; pub mod contract_interface_builder; -#[allow(clippy::result_large_err)] pub mod errors; pub mod read_only_checker; pub mod trait_checker; @@ -52,7 +51,7 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let mut contract = build_ast_with_rules( + let contract = build_ast_with_rules( &contract_identifier, snippet, &mut (), @@ -68,7 +67,7 @@ pub fn mem_type_check( let cost_tracker = LimitedCostTracker::new_free(); match run_analysis( &QualifiedContractIdentifier::transient(), - &mut contract, + &contract, &mut analysis_db, false, cost_tracker, @@ -120,6 +119,7 @@ pub fn type_check( .map_err(|(e, _cost_tracker)| e) } +#[allow(clippy::too_many_arguments)] pub fn run_analysis( contract_identifier: &QualifiedContractIdentifier, expressions: &[SymbolicExpression], @@ -148,7 +148,8 @@ pub fn run_analysis( | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db, build_type_map) } StacksEpochId::Epoch10 => { diff --git a/clarity/src/vm/analysis/read_only_checker/mod.rs b/clarity/src/vm/analysis/read_only_checker/mod.rs index 006b4f0cfe..f60ce11a44 100644 --- a/clarity/src/vm/analysis/read_only_checker/mod.rs +++ b/clarity/src/vm/analysis/read_only_checker/mod.rs @@ -50,7 +50,7 @@ pub struct ReadOnlyChecker<'a, 'b> { clarity_version: ClarityVersion, } -impl<'a, 'b> AnalysisPass for ReadOnlyChecker<'a, 'b> { +impl AnalysisPass for ReadOnlyChecker<'_, '_> { fn run_pass( epoch: &StacksEpochId, contract_analysis: &mut ContractAnalysis, @@ -250,13 +250,12 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { Ok(result) } - /// Checks the native function application of the function named by the - /// string `function` to `args` to determine whether it is read-only - /// compliant. + /// Checks the native function application of the function named by the string `function` + /// to `args` to determine whether it is read-only compliant. /// /// - Returns `None` if there is no native function named `function`. - /// - If there is such a native function, returns `true` iff this function application is - /// read-only. + /// - If there is such a native function, returns `true` iff this function + /// application is read-only. /// /// # Errors /// - Contract parsing errors @@ -414,15 +413,15 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { } } - /// Checks the native and user-defined function applications implied by `expressions`. The - /// first expression is used as the function name, and the tail expressions are used as the - /// arguments. + /// Checks the native and user-defined function applications implied by `expressions`. + /// + /// The first expression is used as the function name, and the tail expressions are used as the arguments. /// /// Returns `true` iff the function application is read-only. /// /// # Errors /// - `CheckErrors::NonFunctionApplication` if there is no first expression, or if the first - /// expression is not a `ClarityName`. + /// expression is not a `ClarityName`. /// - `CheckErrors::UnknownFunction` if the first expression does not name a known function. fn check_expression_application_is_read_only( &mut self, diff --git a/clarity/src/vm/analysis/type_checker/contexts.rs b/clarity/src/vm/analysis/type_checker/contexts.rs index 936cc47bc4..356ebf5944 100644 --- a/clarity/src/vm/analysis/type_checker/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/contexts.rs @@ -92,7 +92,7 @@ impl TypeMap { } } -impl<'a> TypingContext<'a> { +impl TypingContext<'_> { pub fn new(epoch: StacksEpochId, clarity_version: ClarityVersion) -> TypingContext<'static> { TypingContext { epoch, diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index 800347d0f0..36aa2519cc 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -52,9 +52,10 @@ impl FunctionType { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.check_args_2_1(accounting, args, clarity_version), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } } } @@ -75,21 +76,19 @@ impl FunctionType { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) + Err(CheckErrors::Expects("Epoch10 is not supported".into()).into()) } } } } fn is_reserved_word_v3(word: &str) -> bool { - match word { - "block-height" => true, - _ => false, - } + word == "block-height" } /// Is this a reserved word that should trigger an analysis error for the given diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index 2b913a3ac9..77083b88cf 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -239,10 +239,7 @@ impl FunctionType { Ok(TypeSignature::BoolType) } FunctionType::Binary(_, _, _) => { - return Err(CheckErrors::Expects( - "Binary type should not be reached in 2.05".into(), - ) - .into()) + Err(CheckErrors::Expects("Binary type should not be reached in 2.05".into()).into()) } } } @@ -286,8 +283,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch2_05, &value)? { - let actual_type = TypeSignature::type_of(&value)?; + if !expected_type.admits(&StacksEpochId::Epoch2_05, value)? { + let actual_type = TypeSignature::type_of(value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -438,41 +435,39 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - match (&expr.expr, expected_type) { - ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::TraitReferenceType(trait_identifier), - ) => { - let contract_to_check = self - .db - .load_contract(&contract_identifier, &StacksEpochId::Epoch2_05)? - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, - &StacksEpochId::Epoch2_05, - )? - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( + if let ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::TraitReferenceType(trait_identifier), + ) = (&expr.expr, expected_type) + { + let contract_to_check = self + .db + .load_contract(contract_identifier, &StacksEpochId::Epoch2_05)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, &StacksEpochId::Epoch2_05, - trait_identifier, - trait_definition, - )?; - return Ok(expected_type.clone()); - } - (_, _) => {} + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( + &StacksEpochId::Epoch2_05, + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); } let actual_type = self.type_check(expr, context)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 201c307986..3c5ab99029 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -776,8 +776,7 @@ impl TypedNativeFunction { | ReplaceAt | GetStacksBlockInfo | GetTenureInfo => { return Err(CheckErrors::Expects( "Clarity 2+ keywords should not show up in 2.05".into(), - ) - .into()) + )) } }; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 7caf775c19..7899b3e27d 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -247,7 +247,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(arg_types.len(), arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } // For the following function types, the visitor will just // tell the processor that any results greater than len 1 or 2 @@ -260,7 +260,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(1, arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } FunctionType::ArithmeticBinary | FunctionType::ArithmeticComparison @@ -271,7 +271,7 @@ impl FunctionType { Err(CheckErrors::IncorrectArgumentCount(2, arg_index).into()), ); } - return (None, Ok(None)); + (None, Ok(None)) } } } @@ -576,8 +576,8 @@ impl FunctionType { )?; } (expected_type, value) => { - if !expected_type.admits(&StacksEpochId::Epoch21, &value)? { - let actual_type = TypeSignature::type_of(&value)?; + if !expected_type.admits(&StacksEpochId::Epoch21, value)? { + let actual_type = TypeSignature::type_of(value)?; return Err( CheckErrors::TypeError(expected_type.clone(), actual_type).into() ); @@ -854,7 +854,7 @@ fn clarity2_inner_type_check_type( TypeSignature::CallableType(CallableSubtype::Trait(expected_trait_id)), ) => { let contract_to_check = match db - .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? + .load_contract(contract_identifier, &StacksEpochId::Epoch21)? { Some(contract) => { runtime_cost( @@ -1014,7 +1014,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { build_type_map: bool, ) -> TypeChecker<'a, 'b> { Self { - epoch: epoch.clone(), + epoch: *epoch, db, cost_track, contract_context: ContractContext::new(contract_identifier.clone(), *clarity_version), @@ -1240,6 +1240,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { .cloned() } + #[allow(clippy::unnecessary_lazy_evaluations)] fn type_check_define_function( &mut self, signature: &[SymbolicExpression], @@ -1440,41 +1441,39 @@ impl<'a, 'b> TypeChecker<'a, 'b> { context: &TypingContext, expected_type: &TypeSignature, ) -> TypeResult { - match (&expr.expr, expected_type) { - ( - LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), - TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), - ) => { - let contract_to_check = self - .db - .load_contract(&contract_identifier, &StacksEpochId::Epoch21)? - .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; - - let contract_defining_trait = self - .db - .load_contract( - &trait_identifier.contract_identifier, - &StacksEpochId::Epoch21, - )? - .ok_or(CheckErrors::NoSuchContract( - trait_identifier.contract_identifier.to_string(), - ))?; - - let trait_definition = contract_defining_trait - .get_defined_trait(&trait_identifier.name) - .ok_or(CheckErrors::NoSuchTrait( - trait_identifier.contract_identifier.to_string(), - trait_identifier.name.to_string(), - ))?; - - contract_to_check.check_trait_compliance( + if let ( + LiteralValue(Value::Principal(PrincipalData::Contract(ref contract_identifier))), + TypeSignature::CallableType(CallableSubtype::Trait(trait_identifier)), + ) = (&expr.expr, expected_type) + { + let contract_to_check = self + .db + .load_contract(contract_identifier, &StacksEpochId::Epoch21)? + .ok_or(CheckErrors::NoSuchContract(contract_identifier.to_string()))?; + + let contract_defining_trait = self + .db + .load_contract( + &trait_identifier.contract_identifier, &StacksEpochId::Epoch21, - trait_identifier, - &trait_definition, - )?; - return Ok(expected_type.clone()); - } - (_, _) => {} + )? + .ok_or(CheckErrors::NoSuchContract( + trait_identifier.contract_identifier.to_string(), + ))?; + + let trait_definition = contract_defining_trait + .get_defined_trait(&trait_identifier.name) + .ok_or(CheckErrors::NoSuchTrait( + trait_identifier.contract_identifier.to_string(), + trait_identifier.name.to_string(), + ))?; + + contract_to_check.check_trait_compliance( + &StacksEpochId::Epoch21, + trait_identifier, + trait_definition, + )?; + return Ok(expected_type.clone()); } let actual_type = self.type_check(expr, context)?; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs index 9876062241..95fe6f9bf9 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs @@ -7,9 +7,10 @@ use crate::vm::analysis::CheckError; use crate::vm::types::{BufferLength, SequenceSubtype, TypeSignature}; use crate::vm::SymbolicExpression; -/// to-consensus-buff? admits exactly one argument: -/// * the Clarity value to serialize -/// it returns an `(optional (buff x))` where `x` is the maximum possible +/// `to-consensus-buff?` admits exactly one argument: +/// * the Clarity value to serialize +/// +/// It returns an `(optional (buff x))`, where `x` is the maximum possible /// consensus buffer length based on the inferred type of the supplied value. pub fn check_special_to_consensus_buff( checker: &mut TypeChecker, @@ -25,10 +26,11 @@ pub fn check_special_to_consensus_buff( .map_err(CheckError::from) } -/// from-consensus-buff? admits exactly two arguments: -/// * a type signature indicating the expected return type `t1` -/// * a buffer (of up to max length) -/// it returns an `(optional t1)` +/// `from-consensus-buff?` admits exactly two arguments: +/// * a type signature indicating the expected return type `t1` +/// * a buffer (of up to max length) +/// +/// It returns an `(optional t1)` pub fn check_special_from_consensus_buff( checker: &mut TypeChecker, args: &[SymbolicExpression], diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index b576277a5b..7769652d25 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -79,7 +79,7 @@ fn check_special_list_cons( }); costs.push(cost); - if let Some(cur_size) = entries_size.clone() { + if let Some(cur_size) = entries_size { entries_size = cur_size.checked_add(checked.size()?); } if let Some(cur_size) = entries_size { @@ -263,6 +263,7 @@ pub fn check_special_tuple_cons( Ok(TypeSignature::TupleType(tuple_signature)) } +#[allow(clippy::unnecessary_lazy_evaluations)] fn check_special_let( checker: &mut TypeChecker, args: &[SymbolicExpression], @@ -1016,7 +1017,7 @@ impl TypedNativeFunction { /// The return type of `principal-destruct` is a Response, in which the success /// and error types are the same. fn parse_principal_basic_type() -> Result { - Ok(TupleTypeSignature::try_from(vec![ + TupleTypeSignature::try_from(vec![ ("version".into(), BUFF_1.clone()), ("hash-bytes".into(), BUFF_20.clone()), ( @@ -1032,7 +1033,7 @@ impl TypedNativeFunction { "FAIL: PrincipalDestruct failed to initialize type signature" .into(), ) - })?) + }) } TypeSignature::ResponseType(Box::new(( parse_principal_basic_type()?.into(), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs index 6a097a8cd6..772bdd32a4 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs @@ -274,6 +274,7 @@ pub fn check_special_unwrap_err( inner_unwrap_err(input, checker) } +#[allow(clippy::unnecessary_lazy_evaluations)] fn eval_with_new_binding( body: &SymbolicExpression, bind_name: ClarityName, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 12597c88fa..498b52dcb0 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -3411,7 +3411,7 @@ fn test_trait_args() { }, TraitIdentifier { name: ClarityName::from("trait-bar"), - contract_identifier: contract_identifier, + contract_identifier, }, )]; diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index a5a551298c..bd611851b6 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -420,7 +420,7 @@ impl Graph { let list = self .adjacency_list .get_mut(src_expr_index) - .ok_or_else(|| ParseErrors::InterpreterFailure)?; + .ok_or(ParseErrors::InterpreterFailure)?; list.push(dst_expr_index); Ok(()) } @@ -491,7 +491,7 @@ impl GraphWalker { fn get_cycling_dependencies( &mut self, graph: &Graph, - sorted_indexes: &Vec, + sorted_indexes: &[usize], ) -> Option> { let mut tainted: HashSet = HashSet::new(); diff --git a/clarity/src/vm/ast/errors.rs b/clarity/src/vm/ast/errors.rs index c1a0914b5f..56f8e40f86 100644 --- a/clarity/src/vm/ast/errors.rs +++ b/clarity/src/vm/ast/errors.rs @@ -113,10 +113,7 @@ impl ParseError { } pub fn rejectable(&self) -> bool { - match self.err { - ParseErrors::InterpreterFailure => true, - _ => false, - } + matches!(self.err, ParseErrors::InterpreterFailure) } pub fn has_pre_expression(&self) -> bool { diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 1cff959695..5c615f46fa 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -353,7 +353,7 @@ mod test { ) -> std::result::Result { self.invoked_functions.push((cost_f, input.to_vec())); self.invocation_count += 1; - Ok(ExecutionCost::zero()) + Ok(ExecutionCost::ZERO) } fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { self.cost_addition_count += 1; diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 5c2715e9f7..4cef2e5411 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -219,9 +219,7 @@ fn inner_lex(input: &str, max_nesting: u64) -> ParseResult { if !args.is_empty() { self.probe_for_generics( - args[1..].to_vec().into_iter(), + args[1..].iter().copied(), &mut referenced_traits, false, )?; diff --git a/clarity/src/vm/ast/types.rs b/clarity/src/vm/ast/types.rs index aedd31eae3..2071130131 100644 --- a/clarity/src/vm/ast/types.rs +++ b/clarity/src/vm/ast/types.rs @@ -96,6 +96,10 @@ impl PreExpressionsDrain { pub fn len(&self) -> usize { self.len } + + pub fn is_empty(&self) -> bool { + self.len == 0 + } } impl Iterator for PreExpressionsDrain { diff --git a/clarity/src/vm/callables.rs b/clarity/src/vm/callables.rs index 9cd991ec97..4691025a8d 100644 --- a/clarity/src/vm/callables.rs +++ b/clarity/src/vm/callables.rs @@ -37,6 +37,7 @@ use crate::vm::types::{ }; use crate::vm::{eval, Environment, LocalContext, Value}; +#[allow(clippy::type_complexity)] pub enum CallableType { UserFunction(DefinedFunction), NativeFunction(&'static str, NativeHandle, ClarityCostFunction), @@ -244,7 +245,11 @@ impl DefinedFunction { ) .into()); } - if let Some(_) = context.variables.insert(name.clone(), value.clone()) { + if context + .variables + .insert(name.clone(), value.clone()) + .is_some() + { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -286,7 +291,7 @@ impl DefinedFunction { } } - if let Some(_) = context.variables.insert(name.clone(), cast_value) { + if context.variables.insert(name.clone(), cast_value).is_some() { return Err(CheckErrors::NameAlreadyUsed(name.to_string()).into()); } } @@ -323,7 +328,7 @@ impl DefinedFunction { self.name.to_string(), ))?; - let args = self.arg_types.iter().map(|a| a.clone()).collect(); + let args = self.arg_types.to_vec(); if !expected_sig.check_args_trait_compliance(epoch, args)? { return Err( CheckErrors::BadTraitImplementation(trait_name, self.name.to_string()).into(), @@ -393,16 +398,12 @@ impl CallableType { impl FunctionIdentifier { fn new_native_function(name: &str) -> FunctionIdentifier { let identifier = format!("_native_:{}", name); - FunctionIdentifier { - identifier: identifier, - } + FunctionIdentifier { identifier } } fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { let identifier = format!("{}:{}", context, name); - FunctionIdentifier { - identifier: identifier, - } + FunctionIdentifier { identifier } } } @@ -636,12 +637,9 @@ mod test { let cast_list = clarity2_implicit_cast(&list_opt_ty, &list_opt_contract).unwrap(); let items = cast_list.expect_list().unwrap(); for item in items { - match item.expect_optional().unwrap() { - Some(cast_opt) => { - let cast_trait = cast_opt.expect_callable().unwrap(); - assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); - } - None => (), + if let Some(cast_opt) = item.expect_optional().unwrap() { + let cast_trait = cast_opt.expect_callable().unwrap(); + assert_eq!(&cast_trait.trait_identifier.unwrap(), &trait_identifier); } } diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 11145ab11a..1e503d1425 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -113,6 +113,7 @@ pub trait ClarityConnection { self.with_clarity_db_readonly_owned(|mut db| (to_do(&mut db), db)) } + #[allow(clippy::too_many_arguments)] fn with_readonly_clarity_env( &mut self, mainnet: bool, @@ -151,12 +152,15 @@ pub trait ClarityConnection { pub trait TransactionConnection: ClarityConnection { /// Do something with this connection's Clarity environment that can be aborted - /// with `abort_call_back`. + /// with `abort_call_back`. + /// /// This returns the return value of `to_do`: - /// * the generic term `R` - /// * the asset changes during `to_do` in an `AssetMap` - /// * the Stacks events during the transaction - /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort + /// * the generic term `R` + /// * the asset changes during `to_do` in an `AssetMap` + /// * the Stacks events during the transaction + /// + /// and a `bool` value which is `true` if the `abort_call_back` caused the changes to abort. + /// /// If `to_do` returns an `Err` variant, then the changes are aborted. fn with_abort_callback( &mut self, @@ -197,14 +201,14 @@ pub trait TransactionConnection: ClarityConnection { ast_rules, ); - let mut contract_ast = match ast_result { + let contract_ast = match ast_result { Ok(x) => x, Err(e) => return (cost_track, Err(e.into())), }; let result = analysis::run_analysis( identifier, - &mut contract_ast.expressions, + &contract_ast.expressions, db, false, cost_track, @@ -272,7 +276,7 @@ pub trait TransactionConnection: ClarityConnection { }, |_, _| false, ) - .and_then(|(value, assets, events, _)| Ok((value, assets, events))) + .map(|(value, assets, events, _)| (value, assets, events)) } /// Execute a contract call in the current block. diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index a559ad59fd..b3eb8c9fe5 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -180,7 +180,7 @@ impl AssetMap { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct EventBatch { pub events: Vec, } @@ -243,6 +243,12 @@ pub type StackTrace = Vec; pub const TRANSIENT_CONTRACT_NAME: &str = "__transient"; +impl Default for AssetMap { + fn default() -> Self { + Self::new() + } +} + impl AssetMap { pub fn new() -> AssetMap { AssetMap { @@ -276,11 +282,11 @@ impl AssetMap { asset: &AssetIdentifier, amount: u128, ) -> Result { - let current_amount = match self.token_map.get(principal) { - Some(principal_map) => *principal_map.get(asset).unwrap_or(&0), - None => 0, - }; - + let current_amount = self + .token_map + .get(principal) + .and_then(|x| x.get(asset)) + .unwrap_or(&0); current_amount .checked_add(amount) .ok_or(RuntimeErrorType::ArithmeticOverflow.into()) @@ -393,17 +399,14 @@ impl AssetMap { for (principal, stx_amount) in self.stx_map.drain() { let output_map = map.entry(principal.clone()).or_default(); - output_map.insert( - AssetIdentifier::STX(), - AssetMapEntry::STX(stx_amount as u128), - ); + output_map.insert(AssetIdentifier::STX(), AssetMapEntry::STX(stx_amount)); } for (principal, stx_burned_amount) in self.burn_map.drain() { let output_map = map.entry(principal.clone()).or_default(); output_map.insert( AssetIdentifier::STX_burned(), - AssetMapEntry::Burn(stx_burned_amount as u128), + AssetMapEntry::Burn(stx_burned_amount), ); } @@ -414,7 +417,7 @@ impl AssetMap { } } - return map; + map } pub fn get_stx(&self, principal: &PrincipalData) -> Option { @@ -440,13 +443,8 @@ impl AssetMap { principal: &PrincipalData, asset_identifier: &AssetIdentifier, ) -> Option { - match self.token_map.get(principal) { - Some(ref assets) => match assets.get(asset_identifier) { - Some(value) => Some(*value), - None => None, - }, - None => None, - } + let assets = self.token_map.get(principal)?; + assets.get(asset_identifier).copied() } pub fn get_nonfungible_tokens( @@ -454,13 +452,8 @@ impl AssetMap { principal: &PrincipalData, asset_identifier: &AssetIdentifier, ) -> Option<&Vec> { - match self.asset_map.get(principal) { - Some(ref assets) => match assets.get(asset_identifier) { - Some(values) => Some(values), - None => None, - }, - None => None, - } + let assets = self.asset_map.get(principal)?; + assets.get(asset_identifier) } } @@ -469,23 +462,23 @@ impl fmt::Display for AssetMap { write!(f, "[")?; for (principal, principal_map) in self.token_map.iter() { for (asset, amount) in principal_map.iter() { - write!(f, "{} spent {} {}\n", principal, amount, asset)?; + writeln!(f, "{principal} spent {amount} {asset}")?; } } for (principal, principal_map) in self.asset_map.iter() { for (asset, transfer) in principal_map.iter() { - write!(f, "{} transfered [", principal)?; + write!(f, "{principal} transfered [")?; for t in transfer { - write!(f, "{}, ", t)?; + write!(f, "{t}, ")?; } - write!(f, "] {}\n", asset)?; + writeln!(f, "] {asset}")?; } } for (principal, stx_amount) in self.stx_map.iter() { - write!(f, "{} spent {} microSTX\n", principal, stx_amount)?; + writeln!(f, "{principal} spent {stx_amount} microSTX")?; } for (principal, stx_burn_amount) in self.burn_map.iter() { - write!(f, "{} burned {} microSTX\n", principal, stx_burn_amount)?; + writeln!(f, "{principal} burned {stx_burn_amount} microSTX")?; } write!(f, "]") } @@ -493,13 +486,13 @@ impl fmt::Display for AssetMap { impl EventBatch { pub fn new() -> EventBatch { - EventBatch { events: vec![] } + EventBatch::default() } } impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { #[cfg(any(test, feature = "testing"))] - pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, '_> { + pub fn new(database: ClarityDatabase<'a>, epoch: StacksEpochId) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( false, @@ -513,7 +506,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { } #[cfg(any(test, feature = "testing"))] - pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, '_> { + pub fn new_toplevel(mut database: ClarityDatabase<'a>) -> OwnedEnvironment<'a, 'a> { database.begin(); let epoch = database.get_clarity_epoch_version().unwrap(); let version = ClarityVersion::default_for_epoch(epoch); @@ -540,7 +533,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { mut database: ClarityDatabase<'a>, epoch: StacksEpochId, use_mainnet: bool, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { use crate::vm::tests::test_only_mainnet_to_chain_id; let cost_track = LimitedCostTracker::new_max_limit(&mut database, epoch, use_mainnet) .expect("FAIL: problem instantiating cost tracking"); @@ -557,7 +550,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { chain_id: u32, database: ClarityDatabase<'a>, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new( mainnet, @@ -576,7 +569,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { database: ClarityDatabase<'a>, cost_tracker: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> OwnedEnvironment<'a, '_> { + ) -> OwnedEnvironment<'a, 'a> { OwnedEnvironment { context: GlobalContext::new(mainnet, chain_id, database, cost_tracker, epoch_id), call_stack: CallStack::new(), @@ -614,12 +607,11 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { self.begin(); let result = { - let mut initial_context = initial_context.unwrap_or(ContractContext::new( + let initial_context = initial_context.unwrap_or(ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity1, )); - let mut exec_env = - self.get_exec_environment(Some(sender), sponsor, &mut initial_context); + let mut exec_env = self.get_exec_environment(Some(sender), sponsor, &initial_context); f(&mut exec_env) }; @@ -737,7 +729,7 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { let mut snapshot = env .global_context .database - .get_stx_balance_snapshot(&recipient) + .get_stx_balance_snapshot(recipient) .unwrap(); snapshot.credit(amount).unwrap(); @@ -949,7 +941,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { program: &str, rules: ast::ASTRules, ) -> Result { - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let parsed = ast::build_ast_with_rules( contract_identifier, @@ -961,7 +953,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.len() < 1 { + if parsed.is_empty() { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) @@ -981,7 +973,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - &mut self.global_context, + self.global_context, &contract.contract_context, self.call_stack, self.sender.clone(), @@ -1008,7 +1000,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { pub fn eval_raw_with_rules(&mut self, program: &str, rules: ast::ASTRules) -> Result { let contract_id = QualifiedContractIdentifier::transient(); - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let parsed = ast::build_ast_with_rules( &contract_id, @@ -1020,15 +1012,14 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { )? .expressions; - if parsed.len() < 1 { + if parsed.is_empty() { return Err(RuntimeErrorType::ParseError( "Expected a program of at least length 1".to_string(), ) .into()); } let local_context = LocalContext::new(); - let result = { eval(&parsed[0], self, &local_context) }; - result + eval(&parsed[0], self, &local_context) } #[cfg(any(test, feature = "testing"))] @@ -1150,7 +1141,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { Ok(value) => { if let Some(handler) = self.global_context.database.get_cc_special_cases_handler() { handler( - &mut self.global_context, + self.global_context, self.sender.as_ref(), self.sponsor.as_ref(), contract_identifier, @@ -1185,7 +1176,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let result = { let mut nested_env = Environment::new( - &mut self.global_context, + self.global_context, next_contract_context, self.call_stack, self.sender.clone(), @@ -1240,7 +1231,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_content: &str, ast_rules: ASTRules, ) -> Result<()> { - let clarity_version = self.contract_context.clarity_version.clone(); + let clarity_version = self.contract_context.clarity_version; let contract_ast = ast::build_ast_with_rules( &contract_identifier, @@ -1254,7 +1245,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier, clarity_version, &contract_ast, - &contract_content, + contract_content, ) } @@ -1299,7 +1290,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { contract_identifier.clone(), contract_content, self.sponsor.clone(), - &mut self.global_context, + self.global_context, contract_version, ); self.drop_memory(memory_use)?; @@ -1546,7 +1537,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { database: ClarityDatabase<'a>, cost_track: LimitedCostTracker, epoch_id: StacksEpochId, - ) -> GlobalContext { + ) -> GlobalContext<'a, 'hooks> { GlobalContext { database, cost_track, @@ -1561,7 +1552,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { } pub fn is_top_level(&self) -> bool { - self.asset_maps.len() == 0 + self.asset_maps.is_empty() } fn get_asset_map(&mut self) -> Result<&mut AssetMap> { @@ -1841,6 +1832,12 @@ impl ContractContext { } } +impl Default for LocalContext<'_> { + fn default() -> Self { + Self::new() + } +} + impl<'a> LocalContext<'a> { pub fn new() -> LocalContext<'a> { LocalContext { @@ -1898,6 +1895,12 @@ impl<'a> LocalContext<'a> { } } +impl Default for CallStack { + fn default() -> Self { + Self::new() + } +} + impl CallStack { pub fn new() -> CallStack { CallStack { @@ -1946,10 +1949,10 @@ impl CallStack { } Ok(()) } else { - return Err(InterpreterError::InterpreterError( + Err(InterpreterError::InterpreterError( "Tried to remove item from empty call stack.".to_string(), ) - .into()); + .into()) } } @@ -2149,8 +2152,8 @@ mod test { // not simply rollback the tx and squelch the error as includable. let e = env .stx_transfer( - &PrincipalData::from(u1.clone()), - &PrincipalData::from(u2.clone()), + &PrincipalData::from(u1), + &PrincipalData::from(u2), 1000, &BuffData::empty(), ) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index b3ee746fcf..a3c7fa7140 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -46,9 +46,9 @@ type Result = std::result::Result; pub const CLARITY_MEMORY_LIMIT: u64 = 100 * 1000 * 1000; // TODO: factor out into a boot lib? -pub const COSTS_1_NAME: &'static str = "costs"; -pub const COSTS_2_NAME: &'static str = "costs-2"; -pub const COSTS_3_NAME: &'static str = "costs-3"; +pub const COSTS_1_NAME: &str = "costs"; +pub const COSTS_2_NAME: &str = "costs-2"; +pub const COSTS_3_NAME: &str = "costs-3"; lazy_static! { static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = { @@ -140,7 +140,7 @@ impl CostTracker for () { _cost_function: ClarityCostFunction, _input: &[u64], ) -> std::result::Result { - Ok(ExecutionCost::zero()) + Ok(ExecutionCost::ZERO) } fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { Ok(()) @@ -254,6 +254,7 @@ pub struct TrackerData { } #[derive(Clone)] +#[allow(clippy::large_enum_variant)] pub enum LimitedCostTracker { Limited(TrackerData), Free, @@ -334,11 +335,7 @@ pub enum CostErrors { impl CostErrors { fn rejectable(&self) -> bool { - match self { - CostErrors::InterpreterFailure => true, - CostErrors::Expect(_) => true, - _ => false, - } + matches!(self, CostErrors::InterpreterFailure | CostErrors::Expect(_)) } } @@ -650,7 +647,7 @@ fn load_cost_functions( continue; } for arg in &cost_func_type.args { - if &arg.signature != &TypeSignature::UIntType { + if arg.signature != TypeSignature::UIntType { warn!("Confirmed cost proposal invalid: contains non uint argument"; "confirmed_proposal_id" => confirmed_proposal, ); @@ -707,7 +704,7 @@ impl LimitedCostTracker { contract_call_circuits: HashMap::new(), limit, memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::zero(), + total: ExecutionCost::ZERO, memory: 0, epoch, mainnet, @@ -731,7 +728,7 @@ impl LimitedCostTracker { contract_call_circuits: HashMap::new(), limit, memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::zero(), + total: ExecutionCost::ZERO, memory: 0, epoch, mainnet, @@ -775,7 +772,8 @@ impl LimitedCostTracker { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => COSTS_3_NAME.to_string(), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => COSTS_3_NAME.to_string(), }; Ok(result) } @@ -871,7 +869,7 @@ impl TrackerData { .map_err(|e| CostErrors::Expect(e.to_string()))?; } - return Ok(()); + Ok(()) } } @@ -879,11 +877,11 @@ impl LimitedCostTracker { pub fn get_total(&self) -> ExecutionCost { match self { Self::Limited(TrackerData { total, .. }) => total.clone(), - Self::Free => ExecutionCost::zero(), + Self::Free => ExecutionCost::ZERO, } } #[allow(clippy::panic)] - pub fn set_total(&mut self, total: ExecutionCost) -> () { + pub fn set_total(&mut self, total: ExecutionCost) { // used by the miner to "undo" the cost of a transaction when trying to pack a block. match self { Self::Limited(ref mut data) => data.total = total, @@ -981,8 +979,7 @@ fn compute_cost( .cost_contracts .get_mut(&cost_function_reference.contract_id) .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not found: {}", - &cost_function_reference + "CostFunction not found: {cost_function_reference}" )))?; let mut program = vec![SymbolicExpression::atom( @@ -1049,7 +1046,7 @@ impl CostTracker for LimitedCostTracker { match self { Self::Free => { // tracker is free, return zero! - return Ok(ExecutionCost::zero()); + Ok(ExecutionCost::ZERO) } Self::Limited(ref mut data) => { if cost_function == ClarityCostFunction::Unimplemented { @@ -1061,8 +1058,7 @@ impl CostTracker for LimitedCostTracker { .cost_function_references .get(&cost_function) .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not defined: {}", - &cost_function + "CostFunction not defined: {cost_function}" )))? .clone(); @@ -1176,39 +1172,33 @@ pub trait CostOverflowingMath { impl CostOverflowingMath for u64 { fn cost_overflow_mul(self, other: u64) -> Result { - self.checked_mul(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_mul(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_add(self, other: u64) -> Result { - self.checked_add(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_add(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_sub(self, other: u64) -> Result { - self.checked_sub(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_sub(other).ok_or(CostErrors::CostOverflow) } fn cost_overflow_div(self, other: u64) -> Result { - self.checked_div(other) - .ok_or_else(|| CostErrors::CostOverflow) + self.checked_div(other).ok_or(CostErrors::CostOverflow) } } impl ExecutionCost { - pub fn zero() -> ExecutionCost { - Self { - runtime: 0, - write_length: 0, - read_count: 0, - write_count: 0, - read_length: 0, - } - } + pub const ZERO: Self = Self { + runtime: 0, + write_length: 0, + read_count: 0, + write_count: 0, + read_length: 0, + }; /// Returns the percentage of self consumed in `numerator`'s largest proportion dimension. pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { // max() should always return because there are > 0 elements #[allow(clippy::expect_used)] - [ + *[ numerator.runtime / cmp::max(1, self.runtime / 100), numerator.write_length / cmp::max(1, self.write_length / 100), numerator.write_count / cmp::max(1, self.write_count / 100), @@ -1218,7 +1208,6 @@ impl ExecutionCost { .iter() .max() .expect("BUG: should find maximum") - .clone() } /// Returns the dot product of this execution cost with `resolution`/block_limit @@ -1327,6 +1316,10 @@ impl ExecutionCost { read_length: first.read_length.max(second.read_length), } } + + pub fn is_zero(&self) -> bool { + *self == Self::ZERO + } } // ONLY WORKS IF INPUT IS u64 diff --git a/clarity/src/vm/coverage.rs b/clarity/src/vm/coverage.rs index be8a647e9c..862c035f98 100644 --- a/clarity/src/vm/coverage.rs +++ b/clarity/src/vm/coverage.rs @@ -26,6 +26,12 @@ struct CoverageFileInfo { coverage: HashMap>, } +impl Default for CoverageReporter { + fn default() -> Self { + Self::new() + } +} + impl CoverageReporter { pub fn new() -> CoverageReporter { CoverageReporter { diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 50715fd98f..cbb8bcb4de 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -23,7 +23,7 @@ use stacks_common::consts::{ }; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, - VRFSeed, + TrieHash, VRFSeed, }; use stacks_common::types::{Address, StacksEpoch as GenericStacksEpoch, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; @@ -76,6 +76,68 @@ pub enum StoreType { PoxUnlockHeight = 0x15, } +impl TryFrom<&str> for StoreType { + type Error = String; + + fn try_from(value: &str) -> core::result::Result { + use self::StoreType::*; + + let hex_value = value.parse::().map_err(|e| e.to_string())?; + match hex_value { + 0x00 => Ok(DataMap), + 0x01 => Ok(Variable), + 0x02 => Ok(FungibleToken), + 0x03 => Ok(CirculatingSupply), + 0x04 => Ok(NonFungibleToken), + 0x05 => Ok(DataMapMeta), + 0x06 => Ok(VariableMeta), + 0x07 => Ok(FungibleTokenMeta), + 0x08 => Ok(NonFungibleTokenMeta), + 0x09 => Ok(Contract), + 0x10 => Ok(SimmedBlock), + 0x11 => Ok(SimmedBlockHeight), + 0x12 => Ok(Nonce), + 0x13 => Ok(STXBalance), + 0x14 => Ok(PoxSTXLockup), + 0x15 => Ok(PoxUnlockHeight), + _ => Err("Invalid StoreType".into()), + } + } +} + +pub enum ContractDataVarName { + Contract, + ContractSize, + ContractSrc, + ContractDataSize, +} + +impl ContractDataVarName { + pub fn as_str(&self) -> &str { + match self { + Self::Contract => "contract", + Self::ContractSize => "contract-size", + Self::ContractSrc => "contract-src", + Self::ContractDataSize => "contract-data-size", + } + } +} + +impl TryFrom<&str> for ContractDataVarName { + type Error = String; + + fn try_from(value: &str) -> core::result::Result { + use self::ContractDataVarName::*; + match value { + "contract" => Ok(Contract), + "contract-size" => Ok(ContractSize), + "contract-src" => Ok(ContractSrc), + "contract-data-size" => Ok(ContractDataSize), + _ => Err("Invalid ContractDataVarName".into()), + } + } +} + pub struct ClarityDatabase<'a> { pub store: RollbackWrapper<'a>, headers_db: &'a dyn HeadersDB, @@ -444,7 +506,7 @@ impl<'a> ClarityDatabase<'a> { } pub fn put_data(&mut self, key: &str, value: &T) -> Result<()> { - self.store.put_data(&key, &value.serialize()) + self.store.put_data(key, &value.serialize()) } /// Like `put()`, but returns the serialized byte size of the stored value @@ -454,7 +516,7 @@ impl<'a> ClarityDatabase<'a> { value: &T, ) -> Result { let serialized = value.serialize(); - self.store.put_data(&key, &serialized)?; + self.store.put_data(key, &serialized)?; Ok(byte_len_of_serialization(&serialized)) } @@ -465,6 +527,13 @@ impl<'a> ClarityDatabase<'a> { self.store.get_data::(key) } + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> Result> + where + T: ClarityDeserializable, + { + self.store.get_data_by_hash::(hash) + } + pub fn put_value(&mut self, key: &str, value: Value, epoch: &StacksEpochId) -> Result<()> { self.put_value_with_size(key, value, epoch)?; Ok(()) @@ -499,7 +568,7 @@ impl<'a> ClarityDatabase<'a> { let size = serialized.len() as u64; let hex_serialized = to_hex(serialized.as_slice()); - self.store.put_data(&key, &hex_serialized)?; + self.store.put_data(key, &hex_serialized)?; Ok(pre_sanitized_size.unwrap_or(size)) } @@ -522,6 +591,16 @@ impl<'a> ClarityDatabase<'a> { self.store.get_data_with_proof(key) } + pub fn get_data_with_proof_by_hash( + &mut self, + hash: &TrieHash, + ) -> Result)>> + where + T: ClarityDeserializable, + { + self.store.get_data_with_proof_by_hash(hash) + } + pub fn make_key_for_trip( contract_identifier: &QualifiedContractIdentifier, data: StoreType, @@ -559,12 +638,18 @@ impl<'a> ClarityDatabase<'a> { self.store .prepare_for_contract_metadata(contract_identifier, hash)?; // insert contract-size - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); self.insert_metadata(contract_identifier, &key, &(contract_content.len() as u64))?; // insert contract-src if STORE_CONTRACT_SRC_INTERFACE { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSrc.as_str(), + ); self.insert_metadata(contract_identifier, &key, &contract_content.to_string())?; } Ok(()) @@ -574,7 +659,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Option { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-src"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSrc.as_str(), + ); self.fetch_metadata(contract_identifier, &key) .ok() .flatten() @@ -667,23 +755,24 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result> { - let x_opt = self - .store + self.store .get_metadata(contract_identifier, AnalysisDatabase::storage_key()) // treat NoSuchContract error thrown by get_metadata as an Option::None -- // the analysis will propagate that as a CheckError anyways. - .ok(); - match x_opt.flatten() { - None => Ok(None), - Some(x) => ContractAnalysis::deserialize(&x).map(|out| Some(out)), - } + .ok() + .flatten() + .map(|x| ContractAnalysis::deserialize(&x)) + .transpose() } pub fn get_contract_size( &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -691,7 +780,10 @@ impl<'a> ClarityDatabase<'a> { "Failed to read non-consensus contract metadata, even though contract exists in MARF." .into()) })?; - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractDataSize.as_str(), + ); let data_size: u64 = self .fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -710,7 +802,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, data_size: u64, ) -> Result<()> { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractSize.as_str(), + ); let contract_size: u64 = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| { @@ -720,7 +815,10 @@ impl<'a> ClarityDatabase<'a> { })?; contract_size.cost_overflow_add(data_size)?; - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract-data-size"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::ContractDataSize.as_str(), + ); self.insert_metadata(contract_identifier, &key, &data_size)?; Ok(()) } @@ -730,13 +828,19 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, contract: Contract, ) -> Result<()> { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); self.insert_metadata(contract_identifier, &key, &contract)?; Ok(()) } pub fn has_contract(&mut self, contract_identifier: &QualifiedContractIdentifier) -> bool { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); self.store.has_metadata_entry(contract_identifier, &key) } @@ -744,7 +848,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, ) -> Result { - let key = ClarityDatabase::make_metadata_key(StoreType::Contract, "contract"); + let key = ClarityDatabase::make_metadata_key( + StoreType::Contract, + ContractDataVarName::Contract.as_str(), + ); let mut data: Contract = self.fetch_metadata(contract_identifier, &key)? .ok_or_else(|| InterpreterError::Expect( "Failed to read non-consensus contract metadata, even though contract exists in MARF." @@ -869,7 +976,7 @@ impl<'a> ClarityDatabase<'a> { // Get block information -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { /// Returns the ID of a *Stacks* block, by a *Stacks* block height. /// /// Fails if `block_height` >= the "currently" under construction Stacks block height. @@ -957,7 +1064,7 @@ impl<'a> ClarityDatabase<'a> { let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self .headers_db - .get_stacks_height_for_tenure_height(&query_tip, tenure_height.into())) + .get_stacks_height_for_tenure_height(&query_tip, tenure_height)) } /// Get the last-known burnchain block height. @@ -1049,7 +1156,7 @@ impl<'a> ClarityDatabase<'a> { /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId - /// In Epoch 3+: + /// In Epoch 3+: /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { if !self @@ -1167,8 +1274,7 @@ impl<'a> ClarityDatabase<'a> { InterpreterError::Expect( "FATAL: no winning burnchain token spend record for block".into(), ) - })? - .into()) + })?) } pub fn get_miner_spend_total(&mut self, block_height: u32) -> Result { @@ -1185,8 +1291,7 @@ impl<'a> ClarityDatabase<'a> { InterpreterError::Expect( "FATAL: no total burnchain token spend record for block".into(), ) - })? - .into()) + })?) } pub fn get_block_reward(&mut self, block_height: u32) -> Result> { @@ -1207,7 +1312,6 @@ impl<'a> ClarityDatabase<'a> { let reward: u128 = self .headers_db .get_tokens_earned_for_block(&id_bhh, &epoch) - .map(|x| x.into()) .ok_or_else(|| { InterpreterError::Expect("FATAL: matured block has no recorded reward".into()) })?; @@ -1228,7 +1332,7 @@ impl<'a> ClarityDatabase<'a> { // poison-microblock -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn make_microblock_pubkey_height_key(pubkey_hash: &Hash160) -> String { format!("microblock-pubkey-hash::{}", pubkey_hash) } @@ -1251,6 +1355,7 @@ impl<'a> ClarityDatabase<'a> { self.store.get_cc_special_cases_handler() } + #[allow(clippy::unnecessary_fallible_conversions)] pub fn insert_microblock_poison( &mut self, height: u32, @@ -1342,11 +1447,11 @@ impl<'a> ClarityDatabase<'a> { if let PrincipalData::Standard(principal_data) = reporter_principal { Ok((principal_data, seq)) } else { - return Err(InterpreterError::Expect( + Err(InterpreterError::Expect( "BUG: poison-microblock report principal is not a standard principal" .into(), ) - .into()); + .into()) } }) .transpose() @@ -1363,7 +1468,7 @@ fn map_no_contract_as_none(res: Result>) -> Result> { } // Variable Functions... -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_variable( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -1496,7 +1601,7 @@ impl<'a> ClarityDatabase<'a> { } // Data Map Functions -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_map( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -1842,7 +1947,7 @@ impl<'a> ClarityDatabase<'a> { // Asset Functions -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn create_fungible_token( &mut self, contract_identifier: &QualifiedContractIdentifier, @@ -2185,19 +2290,13 @@ impl<'a> ClarityDatabase<'a> { let key = ClarityDatabase::make_key_for_account_balance(principal); debug!("Fetching account balance"; "principal" => %principal.to_string()); let result = self.get_data(&key)?; - Ok(match result { - None => STXBalance::zero(), - Some(balance) => balance, - }) + Ok(result.unwrap_or_default()) } pub fn get_account_nonce(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_nonce(principal); let result = self.get_data(&key)?; - Ok(match result { - None => 0, - Some(nonce) => nonce, - }) + Ok(result.unwrap_or_default()) } pub fn set_account_nonce(&mut self, principal: &PrincipalData, nonce: u64) -> Result<()> { @@ -2207,7 +2306,7 @@ impl<'a> ClarityDatabase<'a> { } // access burnchain state -impl<'a> ClarityDatabase<'a> { +impl ClarityDatabase<'_> { pub fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { self.burn_state_db.get_burn_block_height(sortition_id) } @@ -2219,7 +2318,7 @@ impl<'a> ClarityDatabase<'a> { } pub fn get_stacks_epoch_for_block(&self, id_bhh: &StacksBlockId) -> Result { - let burn_block = self.get_burnchain_block_height(&id_bhh).ok_or_else(|| { + let burn_block = self.get_burnchain_block_height(id_bhh).ok_or_else(|| { InterpreterError::Expect(format!( "FATAL: no burnchain block height found for Stacks block {}", id_bhh diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index b6a45ee764..07d48c9504 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -18,7 +18,7 @@ use std::path::PathBuf; #[cfg(feature = "canonical")] use rusqlite::Connection; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; @@ -64,9 +64,15 @@ pub trait ClarityBackingStore { fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()>; /// fetch K-V out of the committed datastore fn get_data(&mut self, key: &str) -> Result>; + /// fetch Hash(K)-V out of the commmitted datastore + fn get_data_from_path(&mut self, hash: &TrieHash) -> Result>; /// fetch K-V out of the committed datastore, along with the byte representation /// of the Merkle proof for that key-value pair fn get_data_with_proof(&mut self, key: &str) -> Result)>>; + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> Result)>>; fn has_entry(&mut self, key: &str) -> Result { Ok(self.get_data(key)?.is_some()) } @@ -209,10 +215,21 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } + fn get_data_from_path(&mut self, _hash: &TrieHash) -> Result> { + panic!("NullBackingStore can't retrieve data") + } + fn get_data_with_proof(&mut self, _key: &str) -> Result)>> { panic!("NullBackingStore can't retrieve data") } + fn get_data_with_proof_from_path( + &mut self, + _hash: &TrieHash, + ) -> Result)>> { + panic!("NullBackingStore can't retrieve data") + } + #[cfg(feature = "canonical")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 3fd845f92f..eecbe092ea 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -17,7 +17,7 @@ use std::hash::Hash; use hashbrown::HashMap; -use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -76,11 +76,11 @@ fn rollback_value_check(value: &String, check: &RollbackValueCheck) { assert_eq!(value, check) } #[cfg(feature = "rollback_value_check")] -fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &String) +fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &str) where T: Eq + Hash + Clone, { - edits.push((key, value.clone())); + edits.push((key, value.to_owned())); } // this function is used to check the lookup map when committing at the "bottom" of the // wrapper -- i.e., when committing to the underlying store. @@ -88,7 +88,7 @@ where fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, -) -> Vec<(T, String)> +) -> Result, InterpreterError> where T: Eq + Hash + Clone, { @@ -96,10 +96,10 @@ where edit_history.reverse(); } for (key, value) in edits.iter() { - rollback_lookup_map(key, &value, lookup_map); + let _ = rollback_lookup_map(key, value, lookup_map); } assert!(lookup_map.is_empty()); - edits + Ok(edits) } /// Result structure for fetched values from the @@ -205,7 +205,7 @@ where } impl<'a> RollbackWrapper<'a> { - pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper { + pub fn new(store: &'a mut dyn ClarityBackingStore) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: HashMap::new(), @@ -218,7 +218,7 @@ impl<'a> RollbackWrapper<'a> { pub fn from_persisted_log( store: &'a mut dyn ClarityBackingStore, log: RollbackWrapperPersistedLog, - ) -> RollbackWrapper { + ) -> RollbackWrapper<'a> { RollbackWrapper { store, lookup_map: log.lookup_map, @@ -283,7 +283,7 @@ impl<'a> RollbackWrapper<'a> { // stack is empty, committing to the backing store let all_edits = rollback_check_pre_bottom_commit(last_item.edits, &mut self.lookup_map)?; - if all_edits.len() > 0 { + if !all_edits.is_empty() { self.store.put_all_data(all_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" @@ -295,7 +295,7 @@ impl<'a> RollbackWrapper<'a> { last_item.metadata_edits, &mut self.metadata_lookup_map, )?; - if metadata_edits.len() > 0 { + if !metadata_edits.is_empty() { self.store.put_all_metadata(metadata_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" @@ -316,12 +316,12 @@ fn inner_put_data( ) where T: Eq + Hash + Clone, { - let key_edit_deque = lookup_map.entry(key.clone()).or_insert_with(|| Vec::new()); + let key_edit_deque = lookup_map.entry(key.clone()).or_default(); rollback_edits_push(edits, key, &value); key_edit_deque.push(value); } -impl<'a> RollbackWrapper<'a> { +impl RollbackWrapper<'_> { pub fn put_data(&mut self, key: &str, value: &str) -> InterpreterResult<()> { let current = self.stack.last_mut().ok_or_else(|| { InterpreterError::Expect( @@ -329,12 +329,13 @@ impl<'a> RollbackWrapper<'a> { ) })?; - Ok(inner_put_data( + inner_put_data( &mut self.lookup_map, &mut current.edits, key.to_string(), value.to_string(), - )) + ); + Ok(()) } /// @@ -347,13 +348,12 @@ impl<'a> RollbackWrapper<'a> { bhh: StacksBlockId, query_pending_data: bool, ) -> InterpreterResult { - self.store.set_block_hash(bhh).map(|x| { + self.store.set_block_hash(bhh).inspect(|_| { // use and_then so that query_pending_data is only set once set_block_hash succeeds // this doesn't matter in practice, because a set_block_hash failure always aborts // the transaction with a runtime error (destroying its environment), but it's much // better practice to do this, especially if the abort behavior changes in the future. self.query_pending_data = query_pending_data; - x }) } @@ -369,6 +369,21 @@ impl<'a> RollbackWrapper<'a> { .transpose() } + /// this function will only return commitment proofs for values _already_ materialized + /// in the underlying store. otherwise it returns None. + pub fn get_data_with_proof_by_hash( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> + where + T: ClarityDeserializable, + { + self.store + .get_data_with_proof_from_path(hash)? + .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) + .transpose() + } + pub fn get_data(&mut self, key: &str) -> InterpreterResult> where T: ClarityDeserializable, @@ -392,6 +407,23 @@ impl<'a> RollbackWrapper<'a> { .transpose() } + /// DO NOT USE IN CONSENSUS CODE. + /// + /// Load data directly from the underlying store, given its trie hash. The lookup map will not + /// be used. + /// + /// This should never be called from within the Clarity VM, or via block-processing. It's only + /// meant to be used by the RPC system. + pub fn get_data_by_hash(&mut self, hash: &TrieHash) -> InterpreterResult> + where + T: ClarityDeserializable, + { + self.store + .get_data_from_path(hash)? + .map(|x| T::deserialize(&x)) + .transpose() + } + pub fn deserialize_value( value_hex: &str, expected: &TypeSignature, @@ -469,12 +501,13 @@ impl<'a> RollbackWrapper<'a> { let metadata_key = (contract.clone(), key.to_string()); - Ok(inner_put_data( + inner_put_data( &mut self.metadata_lookup_map, &mut current.metadata_edits, metadata_key, value.to_string(), - )) + ); + Ok(()) } // Throws a NoSuchContract error if contract doesn't exist, diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index d16d944d55..a9c2182806 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . - use hashbrown::HashMap; #[cfg(feature = "canonical")] pub use sqlite::MemoryBackingStore; diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 0e0f0e3f6e..7bc9a7130f 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -19,7 +19,7 @@ use rusqlite::{ params, Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, Savepoint, }; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::db::tx_busy_handler; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -330,10 +330,21 @@ impl ClarityBackingStore for MemoryBackingStore { SqliteConnection::get(self.get_side_store(), key) } + fn get_data_from_path(&mut self, hash: &TrieHash) -> Result> { + SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) + } + fn get_data_with_proof(&mut self, key: &str) -> Result)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> Result)>> { + self.get_data_with_proof(&hash.to_string()) + } + fn get_side_store(&mut self) -> &Connection { &self.side_store } diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index e4fab929bd..b88420ff6a 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -257,7 +257,7 @@ impl ClaritySerializable for STXBalance { impl ClarityDeserializable for STXBalance { fn deserialize(input: &str) -> Result { - let bytes = hex_bytes(&input).map_err(|_| { + let bytes = hex_bytes(input).map_err(|_| { InterpreterError::Expect("STXBalance deserialization: failed decoding bytes.".into()) })?; let result = if bytes.len() == STXBalance::unlocked_and_v1_size { @@ -555,7 +555,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ); } - if !(self.balance.amount_locked() <= new_total_locked) { + if self.balance.amount_locked() > new_total_locked { return Err(InterpreterError::Expect( "FATAL: account must lock more after `increase_lock_v2`".into(), ) @@ -623,7 +623,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { } // caller needs to have checked this - if !(amount_to_lock > 0) { + if amount_to_lock == 0 { return Err(InterpreterError::Expect("BUG: cannot lock 0 tokens".into()).into()); } @@ -980,6 +980,12 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { } } +impl Default for STXBalance { + fn default() -> Self { + STXBalance::zero() + } +} + // NOTE: do _not_ add mutation methods to this struct. Put them in STXBalanceSnapshot! impl STXBalance { pub const unlocked_and_v1_size: usize = 40; diff --git a/clarity/src/vm/diagnostic.rs b/clarity/src/vm/diagnostic.rs index 81939237d7..164875151f 100644 --- a/clarity/src/vm/diagnostic.rs +++ b/clarity/src/vm/diagnostic.rs @@ -66,24 +66,26 @@ impl Diagnostic { impl fmt::Display for Diagnostic { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.level)?; - if self.spans.len() == 1 { - write!( + match self.spans.len().cmp(&1) { + std::cmp::Ordering::Equal => write!( f, " (line {}, column {})", self.spans[0].start_line, self.spans[0].start_column - )?; - } else if self.spans.len() > 1 { - let lines: Vec = self - .spans - .iter() - .map(|s| format!("line: {}", s.start_line)) - .collect(); - write!(f, " ({})", lines.join(", "))?; + )?, + std::cmp::Ordering::Greater => { + let lines: Vec = self + .spans + .iter() + .map(|s| format!("line: {}", s.start_line)) + .collect(); + write!(f, " ({})", lines.join(", "))?; + } + _ => {} } write!(f, ": {}.", &self.message)?; if let Some(suggestion) = &self.suggestion { - write!(f, "\n{}", suggestion)?; + write!(f, "\n{suggestion}")?; } - write!(f, "\n") + writeln!(f) } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 9075c55e71..8c9a48f006 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -529,7 +529,7 @@ const LOG2_API: SimpleFunctionAPI = SimpleFunctionAPI { snippet: "log2 ${1:expr-1}", signature: "(log2 n)", description: - "Returns the power to which the number 2 must be raised to to obtain the value `n`, rounded + "Returns the power to which the number 2 must be raised to obtain the value `n`, rounded down to the nearest integer. Fails on a negative numbers. ", example: "(log2 u8) ;; Returns u3 @@ -814,19 +814,19 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { FunctionType::Binary(left, right, ref out_sig) => match out_sig { FunctionReturnsSignature::Fixed(out_type) => format!("{}", out_type), FunctionReturnsSignature::TypeOfArgAtPosition(pos) => { - let arg_sig: &FunctionArgSignature; - match pos { - 0 => arg_sig = left, - 1 => arg_sig = right, - _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") - } + let arg_sig = match pos { + 0 => left, + 1 => right, + _ => panic!("Index out of range: TypeOfArgAtPosition for FunctionType::Binary can only handle two arguments, zero-indexed (0 or 1).") + }; + match arg_sig { - FunctionArgSignature::Single(arg_type) => format!("{}", arg_type), - FunctionArgSignature::Union(arg_types) => { - let out_types: Vec = - arg_types.iter().map(|x| format!("{}", x)).collect(); - out_types.join(" | ") - } + FunctionArgSignature::Single(arg_type) => arg_type.to_string(), + FunctionArgSignature::Union(arg_types) => arg_types + .iter() + .map(ToString::to_string) + .collect::>() + .join(" | "), } } }, @@ -835,15 +835,12 @@ pub fn get_output_type_string(function_type: &FunctionType) -> String { pub fn get_signature(function_name: &str, function_type: &FunctionType) -> Option { if let FunctionType::Fixed(FixedFunction { ref args, .. }) = function_type { - let in_names: Vec = args - .iter() - .map(|x| format!("{}", x.name.as_str())) - .collect(); + let in_names: Vec = args.iter().map(|x| x.name.to_string()).collect(); let arg_examples = in_names.join(" "); Some(format!( "({}{}{})", function_name, - if arg_examples.len() == 0 { "" } else { " " }, + if arg_examples.is_empty() { "" } else { " " }, arg_examples )) } else { @@ -860,7 +857,7 @@ fn make_for_simple_native( ) -> FunctionAPI { let (input_type, output_type) = { if let TypedNativeFunction::Simple(SimpleNativeFunction(function_type)) = - TypedNativeFunction::type_native_function(&function) + TypedNativeFunction::type_native_function(function) .expect("Failed to type a native function") { let input_type = get_input_type_string(&function_type); @@ -877,8 +874,8 @@ fn make_for_simple_native( FunctionAPI { name: api.name.map_or(name, |x| x.to_string()), snippet: api.snippet.to_string(), - input_type: input_type, - output_type: output_type, + input_type, + output_type, signature: api.signature.to_string(), description: api.description.to_string(), example: api.example.to_string(), @@ -2526,35 +2523,35 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { use crate::vm::functions::NativeFunctions::*; let name = function.get_name(); match function { - Add => make_for_simple_native(&ADD_API, &function, name), - ToUInt => make_for_simple_native(&TO_UINT_API, &function, name), - ToInt => make_for_simple_native(&TO_INT_API, &function, name), - Subtract => make_for_simple_native(&SUB_API, &function, name), - Multiply => make_for_simple_native(&MUL_API, &function, name), - Divide => make_for_simple_native(&DIV_API, &function, name), - BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &function, name), - BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &function, name), - BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &function, name), - BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &function, name), - IsStandard => make_for_simple_native(&IS_STANDARD_API, &function, name), - PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, &function, name), - PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, &function), - StringToInt => make_for_simple_native(&STRING_TO_INT_API, &function, name), - StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &function, name), - IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &function, name), - IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &function, name), - CmpGeq => make_for_simple_native(&GEQ_API, &function, name), - CmpLeq => make_for_simple_native(&LEQ_API, &function, name), - CmpLess => make_for_simple_native(&LESS_API, &function, name), - CmpGreater => make_for_simple_native(&GREATER_API, &function, name), - Modulo => make_for_simple_native(&MOD_API, &function, name), - Power => make_for_simple_native(&POW_API, &function, name), - Sqrti => make_for_simple_native(&SQRTI_API, &function, name), - Log2 => make_for_simple_native(&LOG2_API, &function, name), - BitwiseXor => make_for_simple_native(&XOR_API, &function, name), - And => make_for_simple_native(&AND_API, &function, name), - Or => make_for_simple_native(&OR_API, &function, name), - Not => make_for_simple_native(&NOT_API, &function, name), + Add => make_for_simple_native(&ADD_API, function, name), + ToUInt => make_for_simple_native(&TO_UINT_API, function, name), + ToInt => make_for_simple_native(&TO_INT_API, function, name), + Subtract => make_for_simple_native(&SUB_API, function, name), + Multiply => make_for_simple_native(&MUL_API, function, name), + Divide => make_for_simple_native(&DIV_API, function, name), + BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, function, name), + BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, function, name), + BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, function, name), + BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, function, name), + IsStandard => make_for_simple_native(&IS_STANDARD_API, function, name), + PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, function, name), + PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, function), + StringToInt => make_for_simple_native(&STRING_TO_INT_API, function, name), + StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, function, name), + IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, function, name), + IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, function, name), + CmpGeq => make_for_simple_native(&GEQ_API, function, name), + CmpLeq => make_for_simple_native(&LEQ_API, function, name), + CmpLess => make_for_simple_native(&LESS_API, function, name), + CmpGreater => make_for_simple_native(&GREATER_API, function, name), + Modulo => make_for_simple_native(&MOD_API, function, name), + Power => make_for_simple_native(&POW_API, function, name), + Sqrti => make_for_simple_native(&SQRTI_API, function, name), + Log2 => make_for_simple_native(&LOG2_API, function, name), + BitwiseXor => make_for_simple_native(&XOR_API, function, name), + And => make_for_simple_native(&AND_API, function, name), + Or => make_for_simple_native(&OR_API, function, name), + Not => make_for_simple_native(&NOT_API, function, name), Equals => make_for_special(&EQUALS_API, function), If => make_for_special(&IF_API, function), Let => make_for_special(&LET_API, function), @@ -2620,20 +2617,20 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { BurnAsset => make_for_special(&BURN_ASSET, function), GetTokenSupply => make_for_special(&GET_TOKEN_SUPPLY, function), AtBlock => make_for_special(&AT_BLOCK, function), - GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &function, name), - StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &function, name), + GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, function, name), + StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, function, name), StxTransfer => make_for_special(&STX_TRANSFER, function), StxTransferMemo => make_for_special(&STX_TRANSFER_MEMO, function), - StxBurn => make_for_simple_native(&STX_BURN, &function, name), + StxBurn => make_for_simple_native(&STX_BURN, function, name), ToConsensusBuff => make_for_special(&TO_CONSENSUS_BUFF, function), FromConsensusBuff => make_for_special(&FROM_CONSENSUS_BUFF, function), ReplaceAt => make_for_special(&REPLACE_AT, function), - BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &function, name), - BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &function, name), - BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &function, name), - BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &function, name), - BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &function, name), - BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &function, name), + BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, function, name), + BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, function, name), + BitwiseOr => make_for_simple_native(&BITWISE_OR_API, function, name), + BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, function, name), + BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, function, name), + BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, function, name), } } @@ -2726,7 +2723,7 @@ fn make_all_api_reference() -> ReferenceAPIs { .filter_map(make_keyword_reference) .collect(); - keywords.sort_by(|x, y| x.name.cmp(&y.name)); + keywords.sort_by_key(|x| x.name); ReferenceAPIs { functions, @@ -2737,10 +2734,9 @@ fn make_all_api_reference() -> ReferenceAPIs { #[allow(clippy::expect_used)] pub fn make_json_api_reference() -> String { let api_out = make_all_api_reference(); - format!( - "{}", - serde_json::to_string(&api_out).expect("Failed to serialize documentation") - ) + serde_json::to_string(&api_out) + .expect("Failed to serialize documentation") + .to_string() } #[cfg(test)] @@ -2777,7 +2773,7 @@ mod test { const DOC_HEADER_DB: DocHeadersDB = DocHeadersDB {}; impl MemoryBackingStore { - pub fn as_docs_clarity_db<'a>(&'a mut self) -> ClarityDatabase<'a> { + pub fn as_docs_clarity_db(&mut self) -> ClarityDatabase<'_> { ClarityDatabase::new(self, &DOC_HEADER_DB, &DOC_POX_STATE_DB) } } @@ -3001,13 +2997,13 @@ mod test { let mut current_segment: String = "".into(); for line in program.lines() { current_segment.push_str(line); - current_segment.push_str("\n"); + current_segment.push('\n'); if line.contains(";;") && line.contains("Returns ") { segments.push(current_segment); current_segment = "".into(); } } - if current_segment.len() > 0 { + if !current_segment.is_empty() { segments.push(current_segment); } @@ -3067,7 +3063,7 @@ mod test { .type_map .as_ref() .unwrap() - .get_type_expected(&analysis.expressions.last().unwrap()) + .get_type_expected(analysis.expressions.last().unwrap()) .cloned(), ); } @@ -3162,7 +3158,7 @@ mod test { let mut analysis_db = store.as_analysis_db(); let mut parsed = ast::build_ast( &contract_id, - &token_contract_content, + token_contract_content, &mut (), ClarityVersion::latest(), StacksEpochId::latest(), @@ -3232,7 +3228,7 @@ mod test { env.initialize_contract( contract_id, - &token_contract_content, + token_contract_content, None, ASTRules::PrecheckSize, ) diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index b3b0ca5fea..911465d4ba 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -37,6 +37,7 @@ pub struct IncomparableError { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] pub enum Error { /// UncheckedErrors are errors that *should* be caught by the /// TypeChecker and other check passes. Test executions may @@ -117,7 +118,7 @@ pub type InterpreterResult = Result; impl PartialEq> for IncomparableError { fn eq(&self, _other: &IncomparableError) -> bool { - return false; + false } } @@ -137,19 +138,16 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Runtime(ref err, ref stack) => { - match err { - _ => write!(f, "{}", err), - }?; - + write!(f, "{err}")?; if let Some(ref stack_trace) = stack { - write!(f, "\n Stack Trace: \n")?; + writeln!(f, "\n Stack Trace: ")?; for item in stack_trace.iter() { - write!(f, "{}\n", item)?; + writeln!(f, "{item}")?; } } Ok(()) } - _ => write!(f, "{:?}", self), + _ => write!(f, "{self:?}"), } } } @@ -226,9 +224,9 @@ impl From for () { fn from(err: Error) -> Self {} } -impl Into for ShortReturnType { - fn into(self) -> Value { - match self { +impl From for Value { + fn from(val: ShortReturnType) -> Self { + match val { ShortReturnType::ExpectedValue(v) => v, ShortReturnType::AssertionFailed(v) => v, } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 0d004a846a..3dca730928 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -210,6 +210,7 @@ pub fn special_stx_transfer_memo( } } +#[allow(clippy::unnecessary_fallible_conversions)] pub fn special_stx_account( args: &[SymbolicExpression], env: &mut Environment, @@ -286,10 +287,7 @@ pub fn special_stx_burn( env.add_memory(TypeSignature::PrincipalType.size()? as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let mut burner_snapshot = env - .global_context - .database - .get_stx_balance_snapshot(&from)?; + let mut burner_snapshot = env.global_context.database.get_stx_balance_snapshot(from)?; if !burner_snapshot.can_transfer(amount)? { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 090f0d2107..142c1308eb 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -57,13 +57,13 @@ pub fn buff_to_int_generic( > BufferLength::try_from(16_u32) .map_err(|_| InterpreterError::Expect("Failed to construct".into()))? { - return Err(CheckErrors::TypeValueError( + Err(CheckErrors::TypeValueError( SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( |_| InterpreterError::Expect("Failed to construct".into()), )?)), value, ) - .into()); + .into()) } else { let mut transfer_buffer = [0u8; 16]; let original_slice = sequence_data.as_slice(); @@ -82,15 +82,13 @@ pub fn buff_to_int_generic( Ok(value) } } - _ => { - return Err(CheckErrors::TypeValueError( - SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( - |_| InterpreterError::Expect("Failed to construct".into()), - )?)), - value, - ) - .into()) - } + _ => Err(CheckErrors::TypeValueError( + SequenceType(BufferType(BufferLength::try_from(16_u32).map_err( + |_| InterpreterError::Expect("Failed to construct".into()), + )?)), + value, + ) + .into()), } } diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index dd55f3a56f..86d92283ca 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -126,8 +126,8 @@ pub fn special_principal_of( pubkey_to_address_v1(pub_key)? }; let principal = addr.to_account_principal(); - return Ok(Value::okay(Value::Principal(principal)) - .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?); + Ok(Value::okay(Value::Principal(principal)) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) } else { Ok(Value::err_uint(1)) } @@ -169,17 +169,14 @@ pub fn special_secp256k1_recover( _ => return Err(CheckErrors::TypeValueError(BUFF_65.clone(), param1).into()), }; - match secp256k1_recover(&message, &signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) - { - Ok(pubkey) => { - return Ok(Value::okay( - Value::buff_from(pubkey.to_vec()) - .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, - ) - .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?) - } - _ => return Ok(Value::err_uint(1)), - }; + match secp256k1_recover(message, signature).map_err(|_| CheckErrors::InvalidSecp65k1Signature) { + Ok(pubkey) => Ok(Value::okay( + Value::buff_from(pubkey.to_vec()) + .map_err(|_| InterpreterError::Expect("Failed to construct buff".into()))?, + ) + .map_err(|_| InterpreterError::Expect("Failed to construct ok".into()))?), + _ => Ok(Value::err_uint(1)), + } } pub fn special_secp256k1_verify( diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index ff14507ead..12fb1cd3da 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -730,14 +730,12 @@ pub fn special_delete_entry_v205( /// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the block at `block-height` /// - `miner-spend-total` returns the total number of satoshis spent by all miners for the block at `block-height` /// - `block-reward` returns the block reward for the block at `block-height` - /// /// # Errors: /// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. /// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. /// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. /// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. - pub fn special_get_block_info( args: &[SymbolicExpression], env: &mut Environment, diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 833ed4baf8..a8971b3fa0 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -63,6 +63,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch25 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 3.0. StacksEpochId::Epoch30 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 3.1. + StacksEpochId::Epoch31 => $Epoch205Version(args, env, context), } } }; @@ -77,7 +79,6 @@ mod boolean; mod conversions; mod crypto; mod database; -#[allow(clippy::result_large_err)] pub mod define; mod options; pub mod principals; diff --git a/clarity/src/vm/functions/options.rs b/clarity/src/vm/functions/options.rs index 26829618af..e3305395a5 100644 --- a/clarity/src/vm/functions/options.rs +++ b/clarity/src/vm/functions/options.rs @@ -212,7 +212,7 @@ pub fn special_match( match input { Value::Response(data) => special_match_resp(data, &args[1..], env, context), Value::Optional(data) => special_match_opt(data, &args[1..], env, context), - _ => return Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), + _ => Err(CheckErrors::BadMatchInput(TypeSignature::type_of(&input)?).into()), } } diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index d64b207522..8680c06224 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(clippy::result_large_err)] pub mod diagnostic; pub mod errors; @@ -172,33 +173,31 @@ fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> name )) .into()) + } else if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + Ok(value) } else { - if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + runtime_cost( + ClarityCostFunction::LookupVariableDepth, + env, + context.depth(), + )?; + if let Some(value) = context.lookup_variable(name) { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; Ok(value) - } else { - runtime_cost( - ClarityCostFunction::LookupVariableDepth, - env, - context.depth(), - )?; - if let Some(value) = context.lookup_variable(name) { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - Ok(value.clone()) - } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - let (value, _) = - Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) - .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; - Ok(value) - } else if let Some(callable_data) = context.lookup_callable_contract(name) { - if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { - Ok(callable_data.contract_identifier.clone().into()) - } else { - Ok(Value::CallableContract(callable_data.clone())) - } + } else if let Some(callable_data) = context.lookup_callable_contract(name) { + if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { + Ok(callable_data.contract_identifier.clone().into()) } else { - Err(CheckErrors::UndefinedVariable(name.to_string()).into()) + Ok(Value::CallableContract(callable_data.clone())) } + } else { + Err(CheckErrors::UndefinedVariable(name.to_string()).into()) } } } @@ -238,11 +237,7 @@ pub fn apply( // only enough to do recursion detection. // do recursion check on user functions. - let track_recursion = match function { - CallableType::UserFunction(_) => true, - _ => false, - }; - + let track_recursion = matches!(function, CallableType::UserFunction(_)); if track_recursion && env.call_stack.contains(&identifier) { return Err(CheckErrors::CircularReference(vec![identifier.to_string()]).into()); } @@ -311,9 +306,9 @@ pub fn apply( } } -pub fn eval<'a>( +pub fn eval( exp: &SymbolicExpression, - env: &'a mut Environment, + env: &mut Environment, context: &LocalContext, ) -> Result { use crate::vm::representations::SymbolicExpressionType::{ @@ -329,7 +324,7 @@ pub fn eval<'a>( let res = match exp.expr { AtomValue(ref value) | LiteralValue(ref value) => Ok(value.clone()), - Atom(ref value) => lookup_variable(&value, context, env), + Atom(ref value) => lookup_variable(value, context, env), List(ref children) => { let (function_variable, rest) = children .split_first() @@ -338,8 +333,8 @@ pub fn eval<'a>( let function_name = function_variable .match_atom() .ok_or(CheckErrors::BadFunctionName)?; - let f = lookup_function(&function_name, env)?; - apply(&f, &rest, env, context) + let f = lookup_function(function_name, env)?; + apply(&f, rest, env, context) } TraitReference(_, _) | Field(_) => { return Err(InterpreterError::BadSymbolicRepresentation( @@ -360,13 +355,8 @@ pub fn eval<'a>( } pub fn is_reserved(name: &str, version: &ClarityVersion) -> bool { - if let Some(_result) = functions::lookup_reserved_functions(name, version) { - true - } else if variables::is_reserved_name(name, version) { - true - } else { - false - } + functions::lookup_reserved_functions(name, version).is_some() + || variables::is_reserved_name(name, version) } /// This function evaluates a list of expressions, sharing a global context. @@ -575,7 +565,7 @@ pub fn execute(program: &str) -> Result> { ) } -/// Execute for test in in Clarity2, Epoch21, testnet. +/// Execute for test in Clarity2, Epoch21, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_v2(program: &str) -> Result> { execute_with_parameters( @@ -629,7 +619,7 @@ mod test { func_body, DefineType::Private, &"do_work".into(), - &"", + "", ); let context = LocalContext::new(); diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index c80e3c7467..0f779b479f 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -125,8 +125,8 @@ impl StacksMessageCodec for ClarityName { impl StacksMessageCodec for ContractName { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH as usize - || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH as usize + if self.as_bytes().len() < CONTRACT_MIN_NAME_LENGTH + || self.as_bytes().len() > CONTRACT_MAX_NAME_LENGTH { return Err(codec_error::SerializeError(format!( "Failed to serialize contract name: too short or too long: {}", diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index f2b6d4dd09..861c88ad0a 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -52,7 +52,8 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => UnitTestBurnStateDB { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::PrecheckSize, }, @@ -69,7 +70,7 @@ pub fn execute_on_network(s: &str, use_mainnet: bool) -> Value { pub fn symbols_from_values(vec: Vec) -> Vec { vec.into_iter() - .map(|value| SymbolicExpression::atom_value(value)) + .map(SymbolicExpression::atom_value) .collect() } diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index e42f2c59da..e332f72d46 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -1006,7 +1006,7 @@ fn test_simple_naming_system( _ => panic!(), }; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let tokens_contract_id = @@ -1107,7 +1107,7 @@ fn test_simple_naming_system( assert!(is_committed(&result)); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); assert_eq!( env.eval_read_only(&names_contract_id.clone(), "(nft-get-owner? names 1)") .unwrap(), @@ -1378,7 +1378,7 @@ fn test_simple_naming_system( assert_eq!(asset_map.to_table().len(), 0); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); assert_eq!( env.eval_read_only(&names_contract_id.clone(), "(nft-get-owner? names 5)") .unwrap(), diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 9cb5aea4b1..94433958c4 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -119,7 +119,7 @@ fn test_get_block_info_eval( Ok(Value::none()), ]; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -138,7 +138,7 @@ fn test_get_block_info_eval( ) .unwrap(); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); eprintln!("{}", contracts[i]); let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); match expected[i] { @@ -172,13 +172,13 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment (as-contract (contract-call? .contract-a get-caller)))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -200,7 +200,7 @@ fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironment let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -312,7 +312,7 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener .expect_principal() .unwrap(); let p2 = execute("'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -324,11 +324,8 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener }; { - let mut env = owned_env.get_exec_environment( - Some(p1.clone()), - sponsor.clone(), - &mut placeholder_context, - ); + let mut env = + owned_env.get_exec_environment(Some(p1.clone()), sponsor.clone(), &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -345,11 +342,8 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener // Sponsor is equal to some(principal) in this code block. { - let mut env = owned_env.get_exec_environment( - Some(p1.clone()), - sponsor.clone(), - &mut placeholder_context, - ); + let mut env = + owned_env.get_exec_environment(Some(p1.clone()), sponsor.clone(), &placeholder_context); tx_sponsor_contract_asserts(&mut env, sponsor); } @@ -357,7 +351,7 @@ fn test_tx_sponsor(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener { let sponsor = None; let mut env = - owned_env.get_exec_environment(Some(p1), sponsor.clone(), &mut placeholder_context); + owned_env.get_exec_environment(Some(p1), sponsor.clone(), &placeholder_context); tx_sponsor_contract_asserts(&mut env, sponsor); } } @@ -381,13 +375,13 @@ fn test_fully_qualified_contract_call( (as-contract (contract-call? .contract-a get-caller)))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-a").unwrap(), contract_a, @@ -409,7 +403,7 @@ fn test_fully_qualified_contract_call( let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -520,13 +514,13 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let name_hash_expensive_0 = execute("(hash160 1)"); let name_hash_expensive_1 = execute("(hash160 2)"); let name_hash_cheap_0 = execute("(hash160 100001)"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); env.initialize_contract(contract_identifier, tokens_contract, ASTRules::PrecheckSize) @@ -541,7 +535,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_err_code( @@ -560,7 +554,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p1.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -588,7 +582,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_err_code( &env.execute_contract( @@ -607,7 +601,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -625,7 +619,7 @@ fn test_simple_naming_system(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(p2.clone().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert!(is_committed( &env.execute_contract( @@ -690,7 +684,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro (contract-call? .factorial-contract compute 8008)) "; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -698,7 +692,7 @@ fn test_simple_contract_call(epoch: StacksEpochId, mut env_factory: MemoryEnviro let mut env = owned_env.get_exec_environment( Some(get_principal().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let contract_identifier = QualifiedContractIdentifier::local("factorial-contract").unwrap(); @@ -776,12 +770,12 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator (contract-call? .contract-1 modify-data 105 105) (err 1))) "; - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("contract-1").unwrap(); env.initialize_contract(contract_identifier, contract_1, ASTRules::PrecheckSize) @@ -890,12 +884,12 @@ fn test_aborts(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator fn test_factorial_contract(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { let mut owned_env = env_factory.get_env(epoch); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("factorial").unwrap(); env.initialize_contract( @@ -1092,9 +1086,9 @@ fn test_cc_stack_depth( 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1)) (bar) "; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) @@ -1133,9 +1127,9 @@ fn test_cc_trait_stack_depth( (bar .c-foo) "; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) @@ -1156,7 +1150,7 @@ fn test_eval_with_non_existing_contract( ) { let mut owned_env = env_factory.get_env(epoch); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); @@ -1164,7 +1158,7 @@ fn test_eval_with_non_existing_contract( let mut env = owned_env.get_exec_environment( Some(get_principal().expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let result = env.eval_read_only( diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 2c6f23ef42..cada7e973b 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -36,7 +36,7 @@ mod traits; mod variables; #[cfg(any(test, feature = "testing"))] -impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { +impl OwnedEnvironment<'_, '_> { pub fn set_tenure_height(&mut self, tenure_height: u32) { self.context.database.begin(); self.context @@ -122,6 +122,7 @@ epochs_template! { Epoch24, Epoch25, Epoch30, + Epoch31, } clarity_template! { @@ -140,6 +141,9 @@ clarity_template! { (Epoch30, Clarity1), (Epoch30, Clarity2), (Epoch30, Clarity3), + (Epoch31, Clarity1), + (Epoch31, Clarity2), + (Epoch31, Clarity3), } #[cfg(test)] diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 44f3447bad..98db149273 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -711,7 +711,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(22, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -735,7 +735,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(20, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -799,7 +799,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(26, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -823,7 +823,7 @@ fn test_principal_construct_good() { data: Box::new(Value::Principal(PrincipalData::Contract( QualifiedContractIdentifier::new( StandardPrincipalData(21, transfer_buffer), - "hello-world".try_into().unwrap() + "hello-world".into() ) ))) }), @@ -854,7 +854,7 @@ fn create_principal_from_strings( // contract principal requested Value::Principal(PrincipalData::Contract(QualifiedContractIdentifier::new( StandardPrincipalData(version_array[0], principal_array), - name.try_into().unwrap(), + name.into(), ))) } else { // standard principal requested diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index d9e52c0222..f6dbd87090 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -73,7 +73,7 @@ fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId (+ z y)) x))"; let contract_id = QualifiedContractIdentifier::transient(); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); if let Ok(parsed_program) = parse(&contract_id, program, version, epoch) { let context = LocalContext::new(); @@ -84,7 +84,7 @@ fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId Ok(Value::Int(7)), eval( &parsed_program[0], - &mut env.get_exec_environment(None, None, &mut placeholder_context), + &mut env.get_exec_environment(None, None, &placeholder_context), &context ) ); diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 97c4292b0d..d3fcfb7779 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -40,11 +40,11 @@ fn test_dynamic_dispatch_by_defining_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -66,7 +66,7 @@ fn test_dynamic_dispatch_by_defining_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -98,11 +98,11 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -124,7 +124,7 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -155,11 +155,11 @@ fn test_dynamic_dispatch_pass_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -181,7 +181,7 @@ fn test_dynamic_dispatch_pass_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -211,11 +211,11 @@ fn test_dynamic_dispatch_intra_contract_call( (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -237,7 +237,7 @@ fn test_dynamic_dispatch_intra_contract_call( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -270,11 +270,11 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -302,7 +302,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -335,11 +335,11 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( (define-public (get-2 (x uint)) (ok u2))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -367,7 +367,7 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -397,11 +397,11 @@ fn test_dynamic_dispatch_by_importing_trait( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -429,7 +429,7 @@ fn test_dynamic_dispatch_by_importing_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -466,11 +466,11 @@ fn test_dynamic_dispatch_including_nested_trait( let target_nested_contract = "(define-public (get-a (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-nested-trait").unwrap(), contract_defining_nested_trait, @@ -513,7 +513,7 @@ fn test_dynamic_dispatch_including_nested_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -542,11 +542,11 @@ fn test_dynamic_dispatch_mismatched_args( let target_contract = "(define-public (get-1 (x int)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -568,7 +568,7 @@ fn test_dynamic_dispatch_mismatched_args( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -599,11 +599,11 @@ fn test_dynamic_dispatch_mismatched_returned( let target_contract = "(define-public (get-1 (x uint)) (ok 1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -625,7 +625,7 @@ fn test_dynamic_dispatch_mismatched_returned( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -659,11 +659,11 @@ fn test_reentrant_dynamic_dispatch( "(define-public (get-1 (x uint)) (contract-call? .dispatching-contract wrapped-get-1 .target-contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -685,7 +685,7 @@ fn test_reentrant_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -716,11 +716,11 @@ fn test_readwrite_dynamic_dispatch( let target_contract = "(define-read-only (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -742,7 +742,7 @@ fn test_readwrite_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -773,11 +773,11 @@ fn test_readwrite_violation_dynamic_dispatch( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -799,7 +799,7 @@ fn test_readwrite_violation_dynamic_dispatch( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); let err_result = env .execute_contract( @@ -837,11 +837,11 @@ fn test_bad_call_with_trait( (contract-call? .dispatch wrapped-get-1 contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -872,7 +872,7 @@ fn test_bad_call_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -905,11 +905,11 @@ fn test_good_call_with_trait( (contract-call? .dispatch wrapped-get-1 .implem))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -940,7 +940,7 @@ fn test_good_call_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -974,11 +974,11 @@ fn test_good_call_2_with_trait( (contract-call? .dispatch wrapped-get-1 contract))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1012,7 +1012,7 @@ fn test_good_call_2_with_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1045,11 +1045,11 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio (define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("contract-defining-trait").unwrap(), contract_defining_trait, @@ -1077,7 +1077,7 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1108,11 +1108,11 @@ fn test_contract_of_value( (define-public (get-1 (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1141,7 +1141,7 @@ fn test_contract_of_value( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1175,11 +1175,11 @@ fn test_contract_of_no_impl( (define-public (get-1 (x uint)) (ok u99))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("defun").unwrap(), contract_defining_trait, @@ -1208,7 +1208,7 @@ fn test_contract_of_no_impl( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( @@ -1240,11 +1240,11 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1266,7 +1266,7 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1297,11 +1297,11 @@ fn test_return_trait_with_contract_of_wrapped_in_let( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1323,7 +1323,7 @@ fn test_return_trait_with_contract_of_wrapped_in_let( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1352,11 +1352,11 @@ fn test_return_trait_with_contract_of( let target_contract = "(define-public (get-1 (x uint)) (ok u1))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1378,7 +1378,7 @@ fn test_return_trait_with_contract_of( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1414,13 +1414,13 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1443,7 +1443,7 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1476,13 +1476,13 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1506,7 +1506,7 @@ fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentG let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1549,13 +1549,13 @@ fn test_pass_embedded_trait_to_subtrait_optional( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1578,7 +1578,7 @@ fn test_pass_embedded_trait_to_subtrait_optional( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1621,13 +1621,13 @@ fn test_pass_embedded_trait_to_subtrait_ok( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1650,7 +1650,7 @@ fn test_pass_embedded_trait_to_subtrait_ok( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1693,13 +1693,13 @@ fn test_pass_embedded_trait_to_subtrait_err( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1722,7 +1722,7 @@ fn test_pass_embedded_trait_to_subtrait_err( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1765,13 +1765,13 @@ fn test_pass_embedded_trait_to_subtrait_list( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1794,7 +1794,7 @@ fn test_pass_embedded_trait_to_subtrait_list( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1840,13 +1840,13 @@ fn test_pass_embedded_trait_to_subtrait_list_option( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1869,7 +1869,7 @@ fn test_pass_embedded_trait_to_subtrait_list_option( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1915,13 +1915,13 @@ fn test_pass_embedded_trait_to_subtrait_option_list( (define-public (get-2 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -1944,7 +1944,7 @@ fn test_pass_embedded_trait_to_subtrait_option_list( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -1976,13 +1976,13 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2005,7 +2005,7 @@ fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenera let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -2041,13 +2041,13 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener let target_contract = "(define-public (echo (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2070,7 +2070,7 @@ fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGener let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( @@ -2102,13 +2102,13 @@ fn test_pass_principal_literal_to_trait( let target_contract = "(define-public (get-1 (a uint)) (ok a))"; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); - let mut placeholder_context = ContractContext::new( + let placeholder_context = ContractContext::new( QualifiedContractIdentifier::transient(), ClarityVersion::Clarity2, ); { - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); env.initialize_contract( QualifiedContractIdentifier::local("dispatching-contract").unwrap(), dispatching_contract, @@ -2131,7 +2131,7 @@ fn test_pass_principal_literal_to_trait( let mut env = owned_env.get_exec_environment( Some(p1.expect_principal().unwrap()), None, - &mut placeholder_context, + &placeholder_context, ); assert_eq!( env.execute_contract( diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 5b392bb678..e862aeb0df 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -36,13 +36,13 @@ fn test_block_height( ) { let contract = "(define-read-only (test-func) block-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -70,7 +70,7 @@ fn test_block_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -94,13 +94,13 @@ fn test_stacks_block_height( ) { let contract = "(define-read-only (test-func) stacks-block-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -128,7 +128,7 @@ fn test_stacks_block_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -154,13 +154,13 @@ fn test_tenure_height( ) { let contract = "(define-read-only (test-func) tenure-height)"; - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::transient(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -188,7 +188,7 @@ fn test_tenure_height( ASTRules::PrecheckSize, ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); @@ -213,6 +213,7 @@ enum WhenError { } #[cfg(test)] +#[allow(clippy::type_complexity)] fn expect_contract_error( version: ClarityVersion, epoch: StacksEpochId, @@ -226,13 +227,13 @@ fn expect_contract_error( )], expected_success: Value, ) { - let mut placeholder_context = + let placeholder_context = ContractContext::new(QualifiedContractIdentifier::local(name).unwrap(), version); let mut owned_env = tl_env_factory.get_env(epoch); let contract_identifier = QualifiedContractIdentifier::local(name).unwrap(); - let mut exprs = parse(&contract_identifier, &contract, version, epoch).unwrap(); + let mut exprs = parse(&contract_identifier, contract, version, epoch).unwrap(); let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); let analysis = db.execute(|db| { @@ -280,7 +281,7 @@ fn expect_contract_error( } } - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + let mut env = owned_env.get_exec_environment(None, None, &placeholder_context); // Call the function let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); diff --git a/clarity/src/vm/tooling/mod.rs b/clarity/src/vm/tooling/mod.rs index f218b2ccab..5b89145588 100644 --- a/clarity/src/vm/tooling/mod.rs +++ b/clarity/src/vm/tooling/mod.rs @@ -21,7 +21,7 @@ pub fn mem_type_check( epoch: StacksEpochId, ) -> CheckResult<(Option, ContractAnalysis)> { let contract_identifier = QualifiedContractIdentifier::transient(); - let mut contract = build_ast_with_rules( + let contract = build_ast_with_rules( &contract_identifier, snippet, &mut (), @@ -37,7 +37,7 @@ pub fn mem_type_check( let cost_tracker = LimitedCostTracker::new_free(); match run_analysis( &QualifiedContractIdentifier::transient(), - &mut contract, + &contract, &mut analysis_db, false, cost_tracker, @@ -51,7 +51,7 @@ pub fn mem_type_check( .type_map .as_ref() .unwrap() - .get_type_expected(&x.expressions.last().unwrap()) + .get_type_expected(x.expressions.last().unwrap()) .cloned(); Ok((first_type, x)) } diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index e1837ee034..ef4b565834 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -14,9 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[allow(clippy::result_large_err)] pub mod serialization; -#[allow(clippy::result_large_err)] pub mod signatures; use std::collections::btree_map::Entry; @@ -279,6 +277,10 @@ impl SequenceData { } } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn element_at(self, index: usize) -> Result> { if self.len() <= index { return Ok(None); @@ -613,7 +615,7 @@ pub trait SequencedValue { fn atom_values(&mut self) -> Result> { self.drained_items() .iter() - .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(&item)?))) + .map(|item| Ok(SymbolicExpression::atom_value(Self::to_value(item)?))) .collect() } } @@ -751,11 +753,11 @@ define_named_enum!(TenureInfoProperty { impl OptionalData { pub fn type_signature(&self) -> std::result::Result { let type_result = match self.data { - Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(&v)?), + Some(ref v) => TypeSignature::new_option(TypeSignature::type_of(v)?), None => TypeSignature::new_option(TypeSignature::NoType), }; type_result.map_err(|_| { - CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + CheckErrors::Expects("Should not have constructed too large of a type.".into()) }) } } @@ -773,7 +775,7 @@ impl ResponseData { ), }; type_result.map_err(|_| { - CheckErrors::Expects("Should not have constructed too large of a type.".into()).into() + CheckErrors::Expects("Should not have constructed too large of a type.".into()) }) } } @@ -1265,6 +1267,10 @@ impl ListData { .map_err(|_| InterpreterError::Expect("Data length should be valid".into()).into()) } + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + fn append(&mut self, epoch: &StacksEpochId, other_seq: ListData) -> Result<()> { let entry_type_a = self.type_signature.get_list_item_type(); let entry_type_b = other_seq.type_signature.get_list_item_type(); diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 7dcda788a8..48030519c8 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -782,14 +782,12 @@ impl Value { expected_type.unwrap(), )); } - } else { - if len as u64 != tuple_type.len() { - // unwrap is safe because of the match condition - #[allow(clippy::unwrap_used)] - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap(), - )); - } + } else if u64::from(len) != tuple_type.len() { + // unwrap is safe because of the match condition + #[allow(clippy::unwrap_used)] + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap(), + )); } Some(tuple_type) } @@ -1344,7 +1342,7 @@ impl ClaritySerializable for u32 { impl ClarityDeserializable for u32 { fn deserialize(input: &str) -> Result { - let bytes = hex_bytes(&input).map_err(|_| { + let bytes = hex_bytes(input).map_err(|_| { InterpreterError::Expect("u32 deserialization: failed decoding bytes.".into()) })?; assert_eq!(bytes.len(), 4); @@ -1419,13 +1417,10 @@ pub mod tests { } fn test_bad_expectation(v: Value, e: TypeSignature) { - assert!( - match Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ) + assert!(matches!( + Value::try_deserialize_hex(&v.serialize_to_hex().unwrap(), &e, false).unwrap_err(), + SerializationError::DeserializeExpected(_) + )); } #[test] @@ -1704,40 +1699,37 @@ pub mod tests { ); // field number not equal to expectations - assert!(match Value::try_deserialize_hex( - &t_3.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_1).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_3.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); // field type mismatch - assert!(match Value::try_deserialize_hex( - &t_2.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_1).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_2.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_1).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); // field not-present in expected - assert!(match Value::try_deserialize_hex( - &t_1.serialize_to_hex().unwrap(), - &TypeSignature::type_of(&t_4).unwrap(), - false - ) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - }); + assert!(matches!( + Value::try_deserialize_hex( + &t_1.serialize_to_hex().unwrap(), + &TypeSignature::type_of(&t_4).unwrap(), + false + ) + .unwrap_err(), + SerializationError::DeserializeExpected(_) + )); } #[apply(test_clarity_versions)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 280258e026..a85c56ff3e 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -587,10 +587,9 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.admits_type_v2_1(other), - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) - } + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.admits_type_v2_1(other), + StacksEpochId::Epoch10 => Err(CheckErrors::Expects("epoch 1.0 not supported".into())), } } @@ -677,16 +676,12 @@ impl TypeSignature { } } NoType => Err(CheckErrors::CouldNotDetermineType), - CallableType(_) => { - return Err(CheckErrors::Expects( - "CallableType should not be used in epoch v2.0".into(), - )) - } - ListUnionType(_) => { - return Err(CheckErrors::Expects( - "ListUnionType should not be used in epoch v2.0".into(), - )) - } + CallableType(_) => Err(CheckErrors::Expects( + "CallableType should not be used in epoch v2.0".into(), + )), + ListUnionType(_) => Err(CheckErrors::Expects( + "ListUnionType should not be used in epoch v2.0".into(), + )), _ => Ok(other == self), } } @@ -800,7 +795,8 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => self.canonicalize_v2_1(), } } @@ -1158,10 +1154,9 @@ impl TypeSignature { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => Self::least_supertype_v2_1(a, b), - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects("epoch 1.0 not supported".into())) - } + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => Self::least_supertype_v2_1(a, b), + StacksEpochId::Epoch10 => Err(CheckErrors::Expects("epoch 1.0 not supported".into())), } } @@ -1452,8 +1447,7 @@ impl TypeSignature { // Checks if resulting type signature is of valid size. pub fn construct_parent_list_type(args: &[Value]) -> Result { - let children_types: Result> = - args.iter().map(|x| TypeSignature::type_of(x)).collect(); + let children_types: Result> = args.iter().map(TypeSignature::type_of).collect(); TypeSignature::parent_list_type(&children_types?) } @@ -1657,7 +1651,7 @@ impl TypeSignature { ) -> Result> { let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args - .get(0) + .first() .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; @@ -1679,11 +1673,10 @@ impl TypeSignature { let fn_args_exprs = args[1] .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; - let mut fn_args = Vec::with_capacity(fn_args_exprs.len()); - for arg_type in fn_args_exprs.into_iter() { - let arg_t = TypeSignature::parse_type_repr(epoch, arg_type, accounting)?; - fn_args.push(arg_t); - } + let fn_args = fn_args_exprs + .iter() + .map(|arg_type| TypeSignature::parse_type_repr(epoch, arg_type, accounting)) + .collect::>()?; // Extract function's type return - must be a response let fn_return = match TypeSignature::parse_type_repr(epoch, &args[2], accounting) { @@ -1763,7 +1756,6 @@ impl TypeSignature { "FAIL: .size() overflowed on too large of a type. construction should have failed!" .into(), ) - .into() }) } @@ -1882,9 +1874,8 @@ impl TupleTypeSignature { } pub fn size(&self) -> Result { - self.inner_size()?.ok_or_else(|| { - CheckErrors::Expects("size() overflowed on a constructed type.".into()).into() - }) + self.inner_size()? + .ok_or_else(|| CheckErrors::Expects("size() overflowed on a constructed type.".into())) } fn max_depth(&self) -> u8 { diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 4c437d52cc..7050d5dbd9 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -40,6 +40,7 @@ impl ClarityVersion { StacksEpochId::Epoch24 => ClarityVersion::Clarity2, StacksEpochId::Epoch25 => ClarityVersion::Clarity2, StacksEpochId::Epoch30 => ClarityVersion::Clarity3, + StacksEpochId::Epoch31 => ClarityVersion::Clarity3, } } } diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 94a5479613..e029a8b113 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -172,6 +172,35 @@ Where data is the hex serialization of the variable value. This endpoint also accepts a querystring parameter `?proof=` which when supplied `0`, will return the JSON object _without_ the `proof` field. +### GET /v2/clarity/marf/[Clarity MARF Key] +Attempt to fetch the value of a MARF key. The key is identified with [Clarity MARF Key]. + +Returns JSON data in the form: + +```json +{ + "data": "0x01ce...", + "proof": "0x01ab...", +} +``` + +Where data is the hex serialization of the value. + +### GET /v2/clarity/metadata/[Stacks Address]/[Contract Name]/[Clarity Metadata Key] +Attempt to fetch the metadata of a contract. + The contract is identified with [Stacks Address] and [Contract Name] in the URL path. + The metadata key is identified with [Clarity Metadata Key]. + +Returns JSON data in the form: + +```json +{ + "data": "'{\"contract_identifier\":{...}'", +} +``` + +Where data is the metadata formatted as a JSON string. + ### GET /v2/constant_val/[Stacks Address]/[Contract Name]/[Constant Name] Attempt to fetch a constant from a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The constant is identified with [Constant Name]. diff --git a/docs/rpc/api/core-node/get-clarity-marf-value.example.json b/docs/rpc/api/core-node/get-clarity-marf-value.example.json new file mode 100644 index 0000000000..d0e233416f --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-marf-value.example.json @@ -0,0 +1,4 @@ +{ + "data": "0x0a0c000000010a6d6f6e737465722d69640100000000000000000000000000000001", + "proof": "0x123..." +} diff --git a/docs/rpc/api/core-node/get-clarity-marf-value.schema.json b/docs/rpc/api/core-node/get-clarity-marf-value.schema.json new file mode 100644 index 0000000000..ea7e7894fb --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-marf-value.schema.json @@ -0,0 +1,17 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Response of get Clarity MARF value request", + "title": "ClarityMARFValueResponse", + "type": "object", + "required": ["data"], + "properties": { + "data": { + "type": "string", + "description": "Hex-encoded string" + }, + "proof": { + "type": "string", + "description": "Hex-encoded string of the MARF proof for the data" + } + } +} diff --git a/docs/rpc/api/core-node/get-clarity-metadata.example.json b/docs/rpc/api/core-node/get-clarity-metadata.example.json new file mode 100644 index 0000000000..5bb4bd5c47 --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-metadata.example.json @@ -0,0 +1,3 @@ +{ + "data": "'{\"contract_identifier\":{...}, \"private_function_types\":{...}'" +} diff --git a/docs/rpc/api/core-node/get-clarity-metadata.schema.json b/docs/rpc/api/core-node/get-clarity-metadata.schema.json new file mode 100644 index 0000000000..3c0104fa41 --- /dev/null +++ b/docs/rpc/api/core-node/get-clarity-metadata.schema.json @@ -0,0 +1,13 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "Response of get clarity metadata request", + "title": "ClarityMetadataResponse", + "type": "object", + "required": ["data"], + "properties": { + "data": { + "type": "string", + "description": "Metadata value formatted as a JSON string" + } + } +} diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index db36da8bac..d82494ca36 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -486,6 +486,93 @@ paths: If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + /v2/clarity/marf/{clarity_marf_key}: + post: + summary: Get the MARF value for a given key + tags: + - Smart Contracts + operationId: get_clarity_marf_value + description: | + Attempt to fetch the value of a MARF key. + + In the response, `data` is the hex serialization of the value. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-marf-value.schema.json + example: + $ref: ./api/core-node/get-clarity-marf-value.example.json + 400: + description: Failed to retrieve MARF key + parameters: + - name: clarity_marf_key + in: path + required: true + description: MARF key + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field when set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v2/clarity/metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: + post: + summary: Get the contract metadata for the metadata key + tags: + - Smart Contracts + operationId: get_clarity_metadata_key + description: | + Attempt to fetch the metadata of a contract. The contract is identified with [Contract Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. + + In the response, `data` is formatted as JSON. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-metadata.schema.json + example: + $ref: ./api/core-node/get-clarity-metadata.example.json + 400: + description: Failed to retrieve constant value from contract + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: clarity_metadata_key + in: path + required: true + description: Metadata key + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: post: summary: Get the value of a constant inside a contract @@ -778,4 +865,4 @@ paths: text/plain: schema: type: integer - example: 7 \ No newline at end of file + example: 7 diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 1de0e34f09..52a77e2bb8 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -114,6 +114,13 @@ pub enum SignerEvent { /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, + /// A new processed Stacks block was received from the node with the given block hash + NewBlock { + /// The block header hash for the newly processed stacks block + block_hash: Sha512Trunc256Sum, + /// The block height for the newly processed stacks block + block_height: u64, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -298,29 +305,25 @@ impl EventReceiver for SignerEventReceiver { &request.method(), ))); } + debug!("Processing {} event", request.url()); if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_event::(request) } else if request.url() == "/proposal_response" { - process_proposal_response(request) + process_event::(request) } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) + process_event::(request) } else if request.url() == "/shutdown" { event_receiver.stop_signal.store(true, Ordering::SeqCst); - return Err(EventError::Terminated); + Err(EventError::Terminated) + } else if request.url() == "/new_block" { + process_event::(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); - } + debug!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + url + ); ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } @@ -385,12 +388,13 @@ fn ack_dispatcher(request: HttpRequest) { // TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] -/// Process a stackerdb event from the node -fn process_stackerdb_event( - local_addr: Option, - mut request: HttpRequest, -) -> Result, EventError> { +fn process_event(mut request: HttpRequest) -> Result, EventError> +where + T: SignerEventTrait, + E: serde::de::DeserializeOwned + TryInto, Error = EventError>, +{ let mut body = String::new(); + if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); ack_dispatcher(request); @@ -399,27 +403,12 @@ fn process_stackerdb_event( &e ))); } - - debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); - let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) + // Regardless of whether we successfully deserialize, we should ack the dispatcher so they don't keep resending it + ack_dispatcher(request); + let json_event: E = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e); - } - Ok(x) => x, - }; - - ack_dispatcher(request); + let signer_event: SignerEvent = json_event.try_into()?; Ok(signer_event) } @@ -466,78 +455,69 @@ impl TryFrom for SignerEvent { } } -/// Process a proposal response from the node -fn process_proposal_response( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got proposal_response event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); + fn try_from(block_validate_response: BlockValidateResponse) -> Result { + Ok(SignerEvent::BlockValidationResponse( + block_validate_response, + )) } +} - let event: BlockValidateResponse = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; +#[derive(Debug, Deserialize)] +struct BurnBlockEvent { + burn_block_hash: String, + burn_block_height: u64, + reward_recipients: Vec, + reward_slot_holders: Vec, + burn_amount: u64, +} - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; + + fn try_from(burn_block_event: BurnBlockEvent) -> Result { + let burn_header_hash = burn_block_event + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + + Ok(SignerEvent::NewBurnBlock { + burn_height: burn_block_event.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }) } +} - Ok(SignerEvent::BlockValidationResponse(event)) +#[derive(Debug, Deserialize)] +struct BlockEvent { + block_hash: String, + block_height: u64, } -/// Process a new burn block event from the node -fn process_new_burn_block_event( - mut request: HttpRequest, -) -> Result, EventError> { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); +impl TryFrom for SignerEvent { + type Error = EventError; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, - } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let burn_header_hash = temp - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let event = SignerEvent::NewBurnBlock { - burn_height: temp.burn_block_height, - received_time: SystemTime::now(), - burn_header_hash, - }; - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); + fn try_from(block_event: BlockEvent) -> Result { + let block_hash: Sha512Trunc256Sum = block_event + .block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + Sha512Trunc256Sum::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { + block_hash, + block_height: block_event.block_height, + }) } - Ok(event) } pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index 0a5ed49a6d..40a097088e 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -120,9 +120,8 @@ impl, R, T: SignerEventTrait> RunningSigner { pub fn join(self) -> Option { debug!("Try join event loop..."); // wait for event receiver join - let _ = self.event_join.join().map_err(|thread_panic| { + let _ = self.event_join.join().inspect_err(|thread_panic| { error!("Event thread panicked with: '{:?}'", &thread_panic); - thread_panic }); info!("Event receiver thread joined"); @@ -131,9 +130,8 @@ impl, R, T: SignerEventTrait> RunningSigner { let result_opt = self .signer_join .join() - .map_err(|thread_panic| { + .inspect_err(|thread_panic| { error!("Event thread panicked with: '{:?}'", &thread_panic); - thread_panic }) .unwrap_or(None); diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 618aa20937..087c4ba7a3 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -73,6 +73,9 @@ use crate::{ SignerMessage as SignerMessageTrait, VERSION_STRING, }; +/// Maximum size of the [BlockResponseData] serialized bytes +pub const BLOCK_RESPONSE_DATA_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB + define_u8_enum!( /// Enum representing the stackerdb message identifier: this is /// the contract index in the signers contracts (i.e., X in signers-0-X) @@ -638,11 +641,16 @@ impl std::fmt::Display for BlockResponse { impl BlockResponse { /// Create a new accepted BlockResponse for the provided block signer signature hash and signature - pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { + pub fn accepted( + signer_signature_hash: Sha512Trunc256Sum, + signature: MessageSignature, + tenure_extend_timestamp: u64, + ) -> Self { Self::Accepted(BlockAccepted { - signer_signature_hash: hash, - signature: sig, + signer_signature_hash, + signature, metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(tenure_extend_timestamp), }) } @@ -652,8 +660,31 @@ impl BlockResponse { reject_code: RejectCode, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { - Self::Rejected(BlockRejection::new(hash, reject_code, private_key, mainnet)) + Self::Rejected(BlockRejection::new( + hash, + reject_code, + private_key, + mainnet, + timestamp, + )) + } + + /// Get the tenure extend timestamp from the block response + pub fn get_tenure_extend_timestamp(&self) -> u64 { + match self { + BlockResponse::Accepted(accepted) => accepted.response_data.tenure_extend_timestamp, + BlockResponse::Rejected(rejection) => rejection.response_data.tenure_extend_timestamp, + } + } + + /// Get the signer signature hash from the block response + pub fn get_signer_signature_hash(&self) -> Sha512Trunc256Sum { + match self { + BlockResponse::Accepted(accepted) => accepted.signer_signature_hash, + BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, + } } } @@ -739,6 +770,79 @@ impl SignerMessageMetadata { } } +/// The latest version of the block response data +pub const BLOCK_RESPONSE_DATA_VERSION: u8 = 2; + +/// Versioned, backwards-compatible struct for block response data +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockResponseData { + /// The version of the block response data + pub version: u8, + /// The block response data + pub tenure_extend_timestamp: u64, + /// When deserializing future versions, + /// there may be extra bytes that we don't know about + pub unknown_bytes: Vec, +} + +impl BlockResponseData { + /// Create a new BlockResponseData for the provided tenure extend timestamp and unknown bytes + pub fn new(tenure_extend_timestamp: u64) -> Self { + Self { + version: BLOCK_RESPONSE_DATA_VERSION, + tenure_extend_timestamp, + unknown_bytes: vec![], + } + } + + /// Create an empty BlockResponseData + pub fn empty() -> Self { + Self::new(u64::MAX) + } + + /// Serialize the "inner" block response data. Used to determine the bytes length of the serialized block response data + fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.tenure_extend_timestamp)?; + // write_next(fd, &self.unknown_bytes)?; + fd.write_all(&self.unknown_bytes) + .map_err(CodecError::WriteError)?; + Ok(()) + } +} + +impl StacksMessageCodec for BlockResponseData { + /// Serialize the block response data. + /// When creating a new version of the block response data, we are only ever + /// appending new bytes to the end of the struct. When serializing, we use + /// `bytes_len` to ensure that older versions of the code can read through the + /// end of the serialized bytes. + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.version)?; + let mut inner_bytes = vec![]; + self.inner_consensus_serialize(&mut inner_bytes)?; + write_next(fd, &inner_bytes)?; + Ok(()) + } + + /// Deserialize the block response data in a backwards-compatible manner. + /// When creating a new version of the block response data, we are only ever + /// appending new bytes to the end of the struct. When deserializing, we use + /// `bytes_len` to ensure that we read through the end of the serialized bytes. + fn consensus_deserialize(fd: &mut R) -> Result { + let Ok(version) = read_next(fd) else { + return Ok(Self::empty()); + }; + let inner_bytes: Vec = read_next_at_most(fd, BLOCK_RESPONSE_DATA_MAX_SIZE)?; + let mut inner_reader = inner_bytes.as_slice(); + let tenure_extend_timestamp = read_next(&mut inner_reader)?; + Ok(Self { + version, + tenure_extend_timestamp, + unknown_bytes: inner_reader.to_vec(), + }) + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockAccepted { @@ -748,6 +852,8 @@ pub struct BlockAccepted { pub signature: MessageSignature, /// Signer message metadata pub metadata: SignerMessageMetadata, + /// Extra versioned block response data + pub response_data: BlockResponseData, } impl StacksMessageCodec for BlockAccepted { @@ -755,6 +861,7 @@ impl StacksMessageCodec for BlockAccepted { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; + write_next(fd, &self.response_data)?; Ok(()) } @@ -762,21 +869,28 @@ impl StacksMessageCodec for BlockAccepted { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; + let response_data = read_next::(fd)?; Ok(Self { signer_signature_hash, signature, metadata, + response_data, }) } } impl BlockAccepted { /// Create a new BlockAccepted for the provided block signer signature hash and signature - pub fn new(signer_signature_hash: Sha512Trunc256Sum, signature: MessageSignature) -> Self { + pub fn new( + signer_signature_hash: Sha512Trunc256Sum, + signature: MessageSignature, + tenure_extend_timestamp: u64, + ) -> Self { Self { signer_signature_hash, signature, metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(tenure_extend_timestamp), } } } @@ -796,6 +910,8 @@ pub struct BlockRejection { pub chain_id: u32, /// Signer message metadata pub metadata: SignerMessageMetadata, + /// Extra versioned block response data + pub response_data: BlockResponseData, } impl BlockRejection { @@ -805,6 +921,7 @@ impl BlockRejection { reason_code: RejectCode, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { let chain_id = if mainnet { CHAIN_ID_MAINNET @@ -818,6 +935,7 @@ impl BlockRejection { signature: MessageSignature::empty(), chain_id, metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(timestamp), }; rejection .sign(private_key) @@ -830,6 +948,7 @@ impl BlockRejection { reject: BlockValidateReject, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { let chain_id = if mainnet { CHAIN_ID_MAINNET @@ -843,6 +962,7 @@ impl BlockRejection { chain_id, signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(timestamp), }; rejection .sign(private_key) @@ -893,6 +1013,7 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; + write_next(fd, &self.response_data)?; Ok(()) } @@ -906,6 +1027,7 @@ impl StacksMessageCodec for BlockRejection { let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; + let response_data = read_next::(fd)?; Ok(Self { reason, reason_code, @@ -913,6 +1035,7 @@ impl StacksMessageCodec for BlockRejection { chain_id, signature, metadata, + response_data, }) } } @@ -1046,6 +1169,7 @@ mod test { RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -1057,6 +1181,7 @@ mod test { RejectCode::ConnectivityIssues, &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -1070,6 +1195,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); @@ -1082,6 +1208,7 @@ mod test { RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) @@ -1095,6 +1222,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); @@ -1241,9 +1369,9 @@ mod test { #[test] fn test_backwards_compatibility() { let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3"; - let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_rejected_bytes = hex_bytes(block_rejected_hex).unwrap(); let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8"; - let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_accepted_bytes = hex_bytes(block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); let block_accepted = read_next::(&mut &block_accepted_bytes[..]) @@ -1258,6 +1386,7 @@ mod test { chain_id: CHAIN_ID_TESTNET, signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), + response_data: BlockResponseData::new(u64::MAX) })) ); @@ -1270,6 +1399,7 @@ mod test { .unwrap(), metadata: SignerMessageMetadata::empty(), signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::new(u64::MAX) })) ); } @@ -1277,9 +1407,9 @@ mod test { #[test] fn test_block_response_metadata() { let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c64"; - let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_rejected_bytes = hex_bytes(block_rejected_hex).unwrap(); let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c64"; - let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_accepted_bytes = hex_bytes(block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); let block_accepted = read_next::(&mut &block_accepted_bytes[..]) @@ -1296,6 +1426,7 @@ mod test { metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, + response_data: BlockResponseData::new(u64::MAX), })) ); @@ -1310,6 +1441,7 @@ mod test { server_version: "Hello world".to_string(), }, signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::empty(), })) ); } @@ -1322,4 +1454,137 @@ mod test { .expect("Failed to deserialize SignerMessageMetadata"); assert_eq!(deserialized_metadata, SignerMessageMetadata::empty()); } + + #[test] + fn block_response_data_serialization() { + let mut response_data = BlockResponseData::new(2); + response_data.unknown_bytes = vec![1, 2, 3, 4]; + let mut bytes = vec![]; + response_data.consensus_serialize(&mut bytes).unwrap(); + // 1 byte version + 4 bytes (bytes_len) + 8 bytes tenure_extend_timestamp + 4 bytes unknown_bytes + assert_eq!(bytes.len(), 17); + let deserialized_data = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(response_data, deserialized_data); + + let response_data = BlockResponseData::new(2); + let mut bytes = vec![]; + response_data.consensus_serialize(&mut bytes).unwrap(); + // 1 byte version + 4 bytes (bytes_len) + 8 bytes tenure_extend_timestamp + 0 bytes unknown_bytes + assert_eq!(bytes.len(), 13); + let deserialized_data = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(response_data, deserialized_data); + } + + /// Mock struct for testing "future proofing" of the block response data + pub struct NewerBlockResponseData { + pub version: u8, + pub tenure_extend_timestamp: u64, + pub some_other_field: u64, + pub yet_another_field: u64, + } + + impl NewerBlockResponseData { + pub fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.tenure_extend_timestamp)?; + write_next(fd, &self.some_other_field)?; + write_next(fd, &self.yet_another_field)?; + Ok(()) + } + + pub fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.version)?; + let mut inner_bytes = vec![]; + self.inner_consensus_serialize(&mut inner_bytes)?; + let bytes_len = inner_bytes.len() as u32; + write_next(fd, &bytes_len)?; + fd.write_all(&inner_bytes).map_err(CodecError::WriteError)?; + Ok(()) + } + } + + #[test] + fn test_newer_block_response_data() { + let new_response_data = NewerBlockResponseData { + version: 11, + tenure_extend_timestamp: 2, + some_other_field: 3, + yet_another_field: 4, + }; + + let mut bytes = vec![]; + new_response_data.consensus_serialize(&mut bytes).unwrap(); + let mut reader = bytes.as_slice(); + let deserialized_data = read_next::(&mut reader) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(reader.len(), 0, "Expected bytes to be fully consumed"); + assert_eq!(deserialized_data.version, 11); + assert_eq!(deserialized_data.tenure_extend_timestamp, 2); + // two extra u64s: + assert_eq!(deserialized_data.unknown_bytes.len(), 16); + + // BlockResponseData with unknown bytes can serialize/deserialize back to itself + let mut bytes = vec![]; + deserialized_data.consensus_serialize(&mut bytes).unwrap(); + let deserialized_data_2 = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(deserialized_data, deserialized_data_2); + } + + /// Test using an older version of BlockAccepted to verify that we can deserialize + /// future versions + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + pub struct BlockAcceptedOld { + /// The signer signature hash of the block that was accepted + pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the acceptance + pub signature: MessageSignature, + /// Signer message metadata + pub metadata: SignerMessageMetadata, + } + + impl StacksMessageCodec for BlockAcceptedOld { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next::(fd)?; + let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; + Ok(Self { + signer_signature_hash, + signature, + metadata, + }) + } + } + + #[test] + fn block_accepted_old_version_can_deserialize() { + let block_accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum::from_hex("11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19").unwrap(), + metadata: SignerMessageMetadata::default(), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::new(u64::MAX) + }; + + let mut bytes = vec![]; + block_accepted.consensus_serialize(&mut bytes).unwrap(); + + // Ensure the old version can deserialize + let block_accepted_old = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockAcceptedOld"); + assert_eq!( + block_accepted.signer_signature_hash, + block_accepted_old.signer_signature_hash + ); + assert_eq!(block_accepted.signature, block_accepted_old.signature); + assert_eq!(block_accepted.metadata, block_accepted_old.metadata); + } } diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 1a13aa02ed..04c3acc1ea 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -81,10 +81,11 @@ pub mod consts { pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; + pub const PEER_VERSION_EPOCH_3_1: u8 = 0x0c; /// this should be updated to the latest network epoch version supported by /// this node. this will be checked by the `validate_epochs()` method. - pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; + pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_1 as u32; /// set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; @@ -93,6 +94,9 @@ pub mod consts { /// network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; pub const NETWORK_ID_TESTNET: u32 = 0xff000000; + + /// number of uSTX per STX + pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; } /// This test asserts that the constant above doesn't change. diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 47d6c3c499..59347ed36a 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,6 +30,68 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; +impl TrieHash { + pub fn from_key(k: &str) -> Self { + Self::from_data(k.as_bytes()) + } + + /// TrieHash of zero bytes + pub fn from_empty_data() -> TrieHash { + // sha2-512/256 hash of empty string. + // this is used so frequently it helps performance if we just have a constant for it. + TrieHash([ + 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, + 0x14, 0x06, 0x9b, 0xdd, 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, 0x1e, + 0xce, 0xf0, 0x96, 0x7a, + ]) + } + + /// TrieHash from bytes + pub fn from_data(data: &[u8]) -> TrieHash { + if data.len() == 0 { + return TrieHash::from_empty_data(); + } + + let mut tmp = [0u8; 32]; + + let mut hasher = Sha512_256::new(); + hasher.update(data); + tmp.copy_from_slice(hasher.finalize().as_slice()); + + TrieHash(tmp) + } + + pub fn from_data_array>(data: &[B]) -> TrieHash { + if data.len() == 0 { + return TrieHash::from_empty_data(); + } + + let mut tmp = [0u8; 32]; + + let mut hasher = Sha512_256::new(); + + for item in data.iter() { + hasher.update(item); + } + tmp.copy_from_slice(hasher.finalize().as_slice()); + TrieHash(tmp) + } + + /// Convert to a String that can be used in e.g. sqlite + pub fn to_string(&self) -> String { + let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", + self.0[0], self.0[1], self.0[2], self.0[3], + self.0[4], self.0[5], self.0[6], self.0[7], + self.0[8], self.0[9], self.0[10], self.0[11], + self.0[12], self.0[13], self.0[14], self.0[15], + self.0[16], self.0[17], self.0[18], self.0[19], + self.0[20], self.0[21], self.0[22], self.0[23], + self.0[24], self.0[25], self.0[26], self.0[27], + self.0[28], self.0[29], self.0[30], self.0[31]); + s + } +} + #[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 5e13a6d330..49fdfa84fd 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -1,3 +1,20 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -11,6 +28,7 @@ use crate::address::{ C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use crate::consts::MICROSTACKS_PER_STACKS; use crate::deps_common::bitcoin::blockdata::transaction::TxOut; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; @@ -19,6 +37,9 @@ use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; pub mod chainstate; pub mod net; +#[cfg(test)] +pub mod tests; + /// A container for public keys (compressed secp256k1 public keys) pub struct StacksPublicKeyBuffer(pub [u8; 33]); impl_array_newtype!(StacksPublicKeyBuffer, u8, 33); @@ -81,6 +102,7 @@ pub enum StacksEpochId { Epoch24 = 0x02019, Epoch25 = 0x0201a, Epoch30 = 0x03000, + Epoch31 = 0x03001, } #[derive(Debug)] @@ -89,9 +111,153 @@ pub enum MempoolCollectionBehavior { ByReceiveTime, } +/// Struct describing an interval of time (measured in burnchain blocks) during which a coinbase is +/// allotted. Applies to SIP-029 code paths and later. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CoinbaseInterval { + /// amount of uSTX to award + pub coinbase: u128, + /// height of the chain after Stacks chain genesis at which this coinbase interval starts + pub effective_start_height: u64, +} + +/// From SIP-029: +/// +/// | Coinbase Interval | Bitcoin Height | Offset Height | Approx. Supply | STX Reward | Annual Inflation | +/// |--------------------|----------------|---------------------|------------------|------------|------------------| +/// | Current | - | - | 1,552,452,847 | 1000 | - | +/// | 1st | 945,000 | 278,950 | 1,627,352,847 | 500 (50%) | 3.23% | +/// | 2nd | 1,050,000 | 383,950 | 1,679,852,847 | 250 (50%) | 1.57% | +/// | 3rd | 1,260,000 | 593,950 | 1,732,352,847 | 125 (50%) | 0.76% | +/// | 4th | 1,470,000 | 803,950 | 1,758,602,847 | 62.5 (50%) | 0.37% | +/// | - | 2,197,560 | 1,531,510 | 1,804,075,347 | 62.5 (0%) | 0.18% | +/// +/// The above is for mainnet, which has a burnchain year of 52596 blocks and starts at burnchain height 666050. +/// The `Offset Height` column is simply the difference between `Bitcoin Height` and 666050. + +/// Mainnet coinbase intervals, as of SIP-029 +pub const COINBASE_INTERVALS_MAINNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { + let emissions_schedule = [ + CoinbaseInterval { + coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 278_950, + }, + CoinbaseInterval { + coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 383_950, + }, + CoinbaseInterval { + coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 593_950, + }, + CoinbaseInterval { + coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, + effective_start_height: 803_950, + }, + ]; + assert!(CoinbaseInterval::check_order(&emissions_schedule)); + emissions_schedule +}); + +/// Testnet coinbase intervals, as of SIP-029 +pub const COINBASE_INTERVALS_TESTNET: LazyCell<[CoinbaseInterval; 5]> = LazyCell::new(|| { + let emissions_schedule = [ + CoinbaseInterval { + coinbase: 1_000 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 500 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 77_777, + }, + CoinbaseInterval { + coinbase: 250 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 77_777 * 7, + }, + CoinbaseInterval { + coinbase: 125 * u128::from(MICROSTACKS_PER_STACKS), + effective_start_height: 77_777 * 14, + }, + CoinbaseInterval { + coinbase: (625 * u128::from(MICROSTACKS_PER_STACKS)) / 10, + effective_start_height: 77_777 * 21, + }, + ]; + assert!(CoinbaseInterval::check_order(&emissions_schedule)); + emissions_schedule +}); + +/// Used for testing to substitute a coinbase schedule +#[cfg(any(test, feature = "testing"))] +pub static COINBASE_INTERVALS_TEST: std::sync::Mutex>> = + std::sync::Mutex::new(None); + +#[cfg(any(test, feature = "testing"))] +pub fn set_test_coinbase_schedule(coinbase_schedule: Option>) { + match COINBASE_INTERVALS_TEST.lock() { + Ok(mut schedule_guard) => { + *schedule_guard = coinbase_schedule; + } + Err(_e) => { + panic!("COINBASE_INTERVALS_TEST mutex poisoned"); + } + } +} + +impl CoinbaseInterval { + /// Look up the value of a coinbase at an effective height. + /// Precondition: `intervals` must be sorted in ascending order by `effective_start_height` + pub fn get_coinbase_at_effective_height( + intervals: &[CoinbaseInterval], + effective_height: u64, + ) -> u128 { + if intervals.is_empty() { + return 0; + } + if intervals.len() == 1 { + if intervals[0].effective_start_height <= effective_height { + return intervals[0].coinbase; + } else { + return 0; + } + } + + for i in 0..(intervals.len() - 1) { + if intervals[i].effective_start_height <= effective_height + && effective_height < intervals[i + 1].effective_start_height + { + return intervals[i].coinbase; + } + } + + // in last interval, which per the above checks is guaranteed to exist + intervals.last().unwrap_or_else(|| unreachable!()).coinbase + } + + /// Verify that a list of intervals is sorted in ascending order by `effective_start_height` + pub fn check_order(intervals: &[CoinbaseInterval]) -> bool { + if intervals.len() < 2 { + return true; + } + + let mut ht = intervals[0].effective_start_height; + for i in 1..intervals.len() { + if intervals[i].effective_start_height < ht { + return false; + } + ht = intervals[i].effective_start_height; + } + true + } +} + impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch30 + StacksEpochId::Epoch31 } /// In this epoch, how should the mempool perform garbage collection? @@ -105,7 +271,9 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => MempoolCollectionBehavior::ByStacksHeight, - StacksEpochId::Epoch30 => MempoolCollectionBehavior::ByReceiveTime, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + MempoolCollectionBehavior::ByReceiveTime + } } } @@ -120,7 +288,7 @@ impl StacksEpochId { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => false, - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -134,7 +302,10 @@ impl StacksEpochId { | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => false, - StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => true, } } @@ -150,7 +321,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -166,7 +337,22 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, + } + } + + /// Whether or not this epoch supports shadow blocks + pub fn supports_shadow_blocks(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => true, } } @@ -197,7 +383,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => 0, - StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } @@ -233,7 +419,132 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle, + StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + cur_reward_cycle > first_epoch30_reward_cycle + } + } + } + + /// What is the coinbase (in uSTX) to award for the given burnchain height? + /// Applies prior to SIP-029 + fn coinbase_reward_pre_sip029( + &self, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + /* + From https://forum.stacks.org/t/pox-consensus-and-stx-future-supply + + """ + + 1000 STX for years 0-4 + 500 STX for years 4-8 + 250 STX for years 8-12 + 125 STX in perpetuity + + + From the Token Whitepaper: + + We expect that once native mining goes live, approximately 4383 blocks will be pro- + cessed per month, or approximately 52,596 blocks will be processed per year. + + """ + */ + // this is saturating subtraction for the initial reward calculation + // where we are computing the coinbase reward for blocks that occur *before* + // the `first_burn_block_height` + let effective_ht = current_burnchain_height.saturating_sub(first_burnchain_height); + let blocks_per_year = 52596; + let stx_reward = if effective_ht < blocks_per_year * 4 { + 1000 + } else if effective_ht < blocks_per_year * 8 { + 500 + } else if effective_ht < blocks_per_year * 12 { + 250 + } else { + 125 + }; + + stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) + } + + /// Get the coinbase intervals to use. + /// Can be overriden by tests + #[cfg(any(test, feature = "testing"))] + pub(crate) fn get_coinbase_intervals(mainnet: bool) -> Vec { + match COINBASE_INTERVALS_TEST.lock() { + Ok(schedule_opt) => { + if let Some(schedule) = (*schedule_opt).as_ref() { + info!("Use overridden coinbase schedule {:?}", &schedule); + return schedule.clone(); + } + } + Err(_e) => { + panic!("COINBASE_INTERVALS_TEST mutex poisoned"); + } + } + + if mainnet { + COINBASE_INTERVALS_MAINNET.to_vec() + } else { + COINBASE_INTERVALS_TESTNET.to_vec() + } + } + + #[cfg(not(any(test, feature = "testing")))] + pub(crate) fn get_coinbase_intervals(mainnet: bool) -> Vec { + if mainnet { + COINBASE_INTERVALS_MAINNET.to_vec() + } else { + COINBASE_INTERVALS_TESTNET.to_vec() + } + } + + /// what are the offsets after chain-start when coinbase reductions occur? + /// Applies at and after SIP-029. + /// Uses coinbase intervals defined by COINBASE_INTERVALS_MAINNET, unless overridden by a unit + /// or integration test. + fn coinbase_reward_sip029( + &self, + mainnet: bool, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + let effective_ht = current_burnchain_height.saturating_sub(first_burnchain_height); + let coinbase_intervals = Self::get_coinbase_intervals(mainnet); + CoinbaseInterval::get_coinbase_at_effective_height(&coinbase_intervals, effective_ht) + } + + /// What is the coinbase to award? + pub fn coinbase_reward( + &self, + mainnet: bool, + first_burnchain_height: u64, + current_burnchain_height: u64, + ) -> u128 { + match self { + StacksEpochId::Epoch10 => { + // Stacks is not active + 0 + } + StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => { + self.coinbase_reward_pre_sip029(first_burnchain_height, current_burnchain_height) + } + StacksEpochId::Epoch31 => { + let cb = self.coinbase_reward_sip029( + mainnet, + first_burnchain_height, + current_burnchain_height, + ); + cb + } } } } @@ -250,6 +561,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch24 => write!(f, "2.4"), StacksEpochId::Epoch25 => write!(f, "2.5"), StacksEpochId::Epoch30 => write!(f, "3.0"), + StacksEpochId::Epoch31 => write!(f, "3.1"), } } } @@ -268,6 +580,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), x if x == StacksEpochId::Epoch25 as u32 => Ok(StacksEpochId::Epoch25), x if x == StacksEpochId::Epoch30 as u32 => Ok(StacksEpochId::Epoch30), + x if x == StacksEpochId::Epoch31 as u32 => Ok(StacksEpochId::Epoch31), _ => Err("Invalid epoch"), } } diff --git a/stacks-common/src/types/tests.rs b/stacks-common/src/types/tests.rs new file mode 100644 index 0000000000..20676999e7 --- /dev/null +++ b/stacks-common/src/types/tests.rs @@ -0,0 +1,352 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{ + set_test_coinbase_schedule, CoinbaseInterval, StacksEpochId, COINBASE_INTERVALS_MAINNET, + COINBASE_INTERVALS_TESTNET, +}; + +#[test] +fn test_mainnet_coinbase_emissions() { + assert_eq!(COINBASE_INTERVALS_MAINNET.len(), 5); + assert_eq!(COINBASE_INTERVALS_MAINNET[0].coinbase, 1_000_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[1].coinbase, 500_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[2].coinbase, 250_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[3].coinbase, 125_000_000); + assert_eq!(COINBASE_INTERVALS_MAINNET[4].coinbase, 62_500_000); + + // heights from SIP-029 + assert_eq!( + COINBASE_INTERVALS_MAINNET[0].effective_start_height, + 666_050 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[1].effective_start_height, + 945_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[2].effective_start_height, + 1_050_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[3].effective_start_height, + 1_260_000 - 666_050 + ); + assert_eq!( + COINBASE_INTERVALS_MAINNET[4].effective_start_height, + 1_470_000 - 666_050 + ); +} + +#[test] +fn test_get_coinbase_at_effective_height() { + assert!(CoinbaseInterval::check_order(&*COINBASE_INTERVALS_MAINNET)); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 666050 - 666050 + ), + 1_000_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 666051 - 666050 + ), + 1_000_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 944_999 - 666050 + ), + 1_000_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 945_000 - 666050 + ), + 500_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 945_001 - 666050 + ), + 500_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_049_999 - 666050 + ), + 500_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_050_000 - 666050 + ), + 250_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_050_001 - 666050 + ), + 250_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_259_999 - 666050 + ), + 250_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_260_000 - 666050 + ), + 125_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_260_001 - 666050 + ), + 125_000_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_469_999 - 666050 + ), + 125_000_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_470_000 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 1_470_001 - 666050 + ), + 62_500_000 + ); + + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_559 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_560 - 666050 + ), + 62_500_000 + ); + assert_eq!( + CoinbaseInterval::get_coinbase_at_effective_height( + &*COINBASE_INTERVALS_MAINNET, + 2_197_561 - 666050 + ), + 62_500_000 + ); +} + +#[test] +fn test_epoch_coinbase_reward() { + // new coinbase schedule + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 666050), + 1_000_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 666051), + 1_000_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 944_999), + 1_000_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 945_000), + 500_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 945_001), + 500_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_049_999), + 500_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_050_000), + 250_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_050_001), + 250_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_259_999), + 250_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_260_000), + 125_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_260_001), + 125_000_000 + ); + + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_469_999), + 125_000_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_470_000), + 62_500_000 + ); + assert_eq!( + StacksEpochId::Epoch31.coinbase_reward(true, 666050, 1_470_001), + 62_500_000 + ); + + // old coinbase schedule + for epoch in [ + StacksEpochId::Epoch20, + StacksEpochId::Epoch2_05, + StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, + StacksEpochId::Epoch25, + ] + .iter() + { + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4 - 1), + 1_000_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4), + 500_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 4 + 1), + 500_000_000 + ); + + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8 - 1), + 500_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8), + 250_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 8 + 1), + 250_000_000 + ); + + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12 - 1), + 250_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12), + 125_000_000 + ); + assert_eq!( + epoch.coinbase_reward(true, 666050, 666050 + 52596 * 12 + 1), + 125_000_000 + ); + } +} + +/// Verifies that the test facility for setting a coinbase schedule in a unit or integration test +/// actually works. +#[test] +fn test_set_coinbase_intervals() { + let new_sched = vec![ + CoinbaseInterval { + coinbase: 1, + effective_start_height: 0, + }, + CoinbaseInterval { + coinbase: 2, + effective_start_height: 1, + }, + CoinbaseInterval { + coinbase: 3, + effective_start_height: 2, + }, + CoinbaseInterval { + coinbase: 4, + effective_start_height: 3, + }, + CoinbaseInterval { + coinbase: 5, + effective_start_height: 4, + }, + ]; + + assert_eq!( + StacksEpochId::get_coinbase_intervals(true), + *COINBASE_INTERVALS_MAINNET + ); + assert_eq!( + StacksEpochId::get_coinbase_intervals(false), + *COINBASE_INTERVALS_TESTNET + ); + + set_test_coinbase_schedule(Some(new_sched.clone())); + + assert_eq!(StacksEpochId::get_coinbase_intervals(true), new_sched); + assert_eq!(StacksEpochId::get_coinbase_intervals(false), new_sched); + + set_test_coinbase_schedule(None); + + assert_eq!( + StacksEpochId::get_coinbase_intervals(true), + *COINBASE_INTERVALS_MAINNET + ); + assert_eq!( + StacksEpochId::get_coinbase_intervals(false), + *COINBASE_INTERVALS_TESTNET + ); +} diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index a9dfc47806..5f733eddad 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -35,6 +35,9 @@ use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; +#[cfg(any(test, feature = "testing"))] +pub mod tests; + pub fn get_epoch_time_secs() -> u64 { let start = SystemTime::now(); let since_the_epoch = start diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs new file mode 100644 index 0000000000..b87e913718 --- /dev/null +++ b/stacks-common/src/util/tests.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::{Arc, Mutex}; +/// `TestFlag` is a thread-safe utility designed for managing shared state in testing scenarios. It wraps +/// a value of type `T` inside an `Arc>>`, allowing you to set and retrieve a value +/// across different parts of your codebase while ensuring thread safety. +/// +/// This structure is particularly useful when: +/// - You need a global or static variable in tests. +/// - You want to control the execution of custom test code paths by setting and checking a shared value. +/// +/// # Type Parameter +/// - `T`: The type of the value managed by the `TestFlag`. It must implement the `Default` and `Clone` traits. +/// +/// # Examples +/// +/// ```rust +/// use stacks_common::util::tests::TestFlag; +/// use std::sync::{Arc, Mutex}; +/// +/// // Create a TestFlag instance +/// let test_flag = TestFlag::default(); +/// +/// // Set a value in the test flag +/// test_flag.set("test_value".to_string()); +/// +/// // Retrieve the value +/// assert_eq!(test_flag.get(), "test_value".to_string()); +/// +/// // Reset the value to default +/// test_flag.set("".to_string()); +/// assert_eq!(test_flag.get(), "".to_string()); +/// ``` +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(Mutex::new(None))) + } +} + +impl TestFlag { + /// Sets the value of the test flag. + /// + /// This method updates the value stored inside the `TestFlag`, replacing any existing value. + /// + /// # Arguments + /// - `value`: The new value to set for the `TestFlag`. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// test_flag.set(42); + /// assert_eq!(test_flag.get(), 42); + /// ``` + pub fn set(&self, value: T) { + *self.0.lock().unwrap() = Some(value); + } + + /// Retrieves the current value of the test flag. + /// + /// If no value has been set, this method returns the default value for the type `T`. + /// + /// # Returns + /// - The current value of the test flag, or the default value of `T` if none has been set. + /// + /// # Examples + /// + /// ```rust + /// let test_flag = TestFlag::default(); + /// + /// // Get the default value + /// assert_eq!(test_flag.get(), 0); // For T = i32, default is 0 + /// + /// // Set a value + /// test_flag.set(123); + /// + /// // Get the updated value + /// assert_eq!(test_flag.get(), 123); + /// ``` + pub fn get(&self) -> T { + self.0.lock().unwrap().clone().unwrap_or_default().clone() + } +} diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 3183c0d5c3..78cadc0c05 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,12 +7,58 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +## Added + +- Introduced the `block_proposal_max_age_secs` configuration option for signers, enabling them to automatically ignore block proposals that exceed the specified age in seconds. + +## Changed +- Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database +- Signers now listen to new block events from the stacks node to determine whether a block has been successfully appended to the chain tip + +## [3.1.0.0.1.0] + +### Added + +### Changed + +- Added tenure extend timestamp to signer block responses +- Added tenure_idle_timeout_secs configuration option for determining when a time-based tenure extend will be accepted + +## [3.1.0.0.0.0] + +### Added + +- **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/will-corcoran/sips/blob/feat/sip-029-halving-alignment/sips/sip-029/sip-029-halving-alignment.md) for details) + +### Changed + +## [3.0.0.0.4.0] + +### Added + +### Changed + +- Use the same burn view loader in both block validation and block processing + +## [3.0.0.0.3.0] + ### Added ### Changed +- Allow a miner to extend their tenure immediately if the winner of the next tenure has committed to the wrong parent tenure (#5361) + +## [3.0.0.0.2.0] + +### Added +- Adds `tenure_last_block_proposal_timeout_secs` option to account for delayed global block acceptance. default to 30s + +### Changed + ## [3.0.0.0.1.0] +### Added + ### Changed - Change block rejection message to generic block response diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 139c34fba8..22204daf97 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -36,16 +36,16 @@ serde_stacker = "0.1" slog = { version = "2.5.2", features = [ "max_level_trace" ] } slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" -stacks-common = { path = "../stacks-common" } +stacks-common = { path = "../stacks-common", features = ["testing"] } stackslib = { path = "../stackslib" } thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } -toml = "0.5.6" +toml = { workspace = true } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } rand = { workspace = true } url = "2.1.0" -rusqlite = { workspace = true } +rusqlite = { workspace = true, features = ["functions"] } [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index fa24c8b22e..462f3dc2d2 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -122,6 +122,8 @@ pub struct ProposalEvalConfig { /// Time to wait for the last block of a tenure to be globally accepted or rejected before considering /// a new miner's block at the same height as valid. pub tenure_last_block_proposal_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -130,6 +132,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, tenure_last_block_proposal_timeout: value.tenure_last_block_proposal_timeout, + tenure_idle_timeout: value.tenure_idle_timeout, } } } @@ -173,7 +176,7 @@ enum ProposedBy<'a> { CurrentSortition(&'a SortitionState), } -impl<'a> ProposedBy<'a> { +impl ProposedBy<'_> { pub fn state(&self) -> &SortitionState { match self { ProposedBy::LastSortition(x) => x, @@ -190,7 +193,6 @@ impl SortitionsView { signer_db: &mut SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, - reward_cycle: u64, reset_view_if_wrong_consensus_hash: bool, ) -> Result { if self @@ -203,7 +205,40 @@ impl SortitionsView { "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } else if let Some(tip) = signer_db.get_canonical_tip()? { + // Check if the current sortition is aligned with the expected tenure: + // - If the tip is in the current tenure, we are in the process of mining this tenure. + // - If the tip is not in the current tenure, then we’re starting a new tenure, + // and the current sortition's parent tenure must match the tenure of the tip. + // - If the tip is not building off of the current sortition's parent tenure, then + // check to see if the tip's parent is within the first proposal burn block timeout, + // which allows for forks when a burn block arrives quickly. + // - Else the miner of the current sortition has committed to an incorrect parent tenure. + let consensus_hash_match = + self.cur_sortition.consensus_hash == tip.block.header.consensus_hash; + let parent_tenure_id_match = + self.cur_sortition.parent_tenure_id == tip.block.header.consensus_hash; + if !consensus_hash_match && !parent_tenure_id_match { + // More expensive check, so do it only if we need to. + let is_valid_parent_tenure = Self::check_parent_tenure_choice( + &self.cur_sortition, + block, + signer_db, + client, + &self.config.first_proposal_burn_block_timing, + )?; + if !is_valid_parent_tenure { + warn!( + "Current sortition does not build off of canonical tip tenure, marking as invalid"; + "current_sortition_parent" => ?self.cur_sortition.parent_tenure_id, + "tip_consensus_hash" => ?tip.block.header.consensus_hash, + ); + self.cur_sortition.miner_status = + SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } } + if let Some(last_sortition) = self.last_sortition.as_mut() { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { info!( @@ -251,14 +286,7 @@ impl SortitionsView { "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); self.reset_view(client)?; - return self.check_proposal( - client, - signer_db, - block, - block_pk, - reward_cycle, - false, - ); + return self.check_proposal(client, signer_db, block, block_pk, false); } warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; @@ -304,6 +332,7 @@ impl SortitionsView { "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_miner_status" => ?self.cur_sortition.miner_status, ); return Ok(false); } @@ -315,7 +344,6 @@ impl SortitionsView { &proposed_by, tenure_change, block, - reward_cycle, signer_db, client, )? { @@ -334,14 +362,23 @@ impl SortitionsView { // in tenure extends, we need to check: // (1) if this is the most recent sortition, an extend is allowed if it changes the burnchain view // (2) if this is the most recent sortition, an extend is allowed if enough time has passed to refresh the block limit + let sortition_consensus_hash = proposed_by.state().consensus_hash; let changed_burn_view = - tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; - let enough_time_passed = Self::tenure_time_passed_block_lim()?; + tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; + let extend_timestamp = signer_db.calculate_tenure_extend_timestamp( + self.config.tenure_idle_timeout, + block, + false, + ); + let epoch_time = get_epoch_time_secs(); + let enough_time_passed = epoch_time > extend_timestamp; if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "extend_timestamp" => extend_timestamp, + "epoch_time" => epoch_time, ); return Ok(false); } @@ -439,6 +476,8 @@ impl SortitionsView { "violating_tenure_proposed_time" => local_block_info.proposed_time, "new_tenure_received_time" => sortition_state_received_time, "new_tenure_burn_timestamp" => sortition_state.burn_header_timestamp, + "first_proposal_burn_block_timing_secs" => first_proposal_burn_block_timing.as_secs(), + "proposal_to_sortition" => proposal_to_sortition, ); continue; } @@ -503,7 +542,6 @@ impl SortitionsView { fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, - reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, tenure_last_block_proposal_timeout: Duration, @@ -548,7 +586,7 @@ impl SortitionsView { // If we have seen this block already, make sure its state is updated to globally accepted. // Otherwise, don't worry about it. if let Ok(Some(mut block_info)) = - signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) + signer_db.block_lookup(&nakamoto_tip.signer_signature_hash()) { if block_info.state != BlockState::GloballyAccepted { if let Err(e) = block_info.mark_globally_accepted() { @@ -583,7 +621,6 @@ impl SortitionsView { proposed_by: &ProposedBy, tenure_change: &TenureChangePayload, block: &NakamotoBlock, - reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, ) -> Result { @@ -591,7 +628,6 @@ impl SortitionsView { let confirms_expected_parent = Self::check_tenure_change_confirms_parent( tenure_change, block, - reward_cycle, signer_db, client, self.config.tenure_last_block_proposal_timeout, @@ -655,12 +691,6 @@ impl SortitionsView { } } - /// Has the current tenure lasted long enough to extend the block limit? - pub fn tenure_time_passed_block_lim() -> Result { - // TODO - Ok(false) - } - /// Fetch a new view of the recent sortitions pub fn fetch_view( config: ProposalEvalConfig, diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 4e9067498d..7b666d3762 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -41,11 +41,9 @@ use stacks_common::types::chainstate::StacksPrivateKey; extern crate alloc; -#[derive(Parser, Debug)] -#[command(author, version, about)] -#[command(long_version = VERSION_STRING.as_str())] - /// The CLI arguments for the stacks signer +#[derive(Parser, Debug)] +#[command(author, version, about, long_version = VERSION_STRING.as_str())] pub struct Cli { /// Subcommand action to take #[command(subcommand)] diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 37706368dc..bdaa368567 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -413,6 +413,8 @@ pub(crate) mod tests { block_proposal_timeout: config.block_proposal_timeout, tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: config.block_proposal_validation_timeout, + tenure_idle_timeout: config.tenure_idle_timeout, + block_proposal_max_age_secs: config.block_proposal_max_age_secs, } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 117dd4814f..934686d1c2 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -236,7 +236,8 @@ mod tests { use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, RejectCode, SignerMessage, SignerMessageMetadata, + BlockRejection, BlockResponse, BlockResponseData, RejectCode, SignerMessage, + SignerMessageMetadata, }; use rand::{thread_rng, RngCore}; @@ -286,6 +287,7 @@ mod tests { chain_id: thread_rng().next_u32(), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::empty(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cae6a210b7..4676738629 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -173,6 +173,9 @@ impl StacksClient { &self, consensus_hash: &ConsensusHash, ) -> Result { + debug!("StacksClient: Getting tenure tip"; + "consensus_hash" => %consensus_hash, + ); let send_request = || { self.stacks_node_client .get(self.tenure_tip_path(consensus_hash)) @@ -192,6 +195,7 @@ impl StacksClient { /// Get the last set reward cycle stored within the stackerdb contract pub fn get_last_set_cycle(&self) -> Result { + debug!("StacksClient: Getting last set cycle"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); let function_name_str = "get-last-set-cycle"; let function_name = ClarityName::from(function_name_str); @@ -210,6 +214,10 @@ impl StacksClient { stackerdb_contract: &QualifiedContractIdentifier, page: u32, ) -> Result, ClientError> { + debug!("StacksClient: Getting signer slots"; + "stackerdb_contract" => %stackerdb_contract, + "page" => page, + ); let function_name_str = "stackerdb-get-signer-slots-page"; let function_name = ClarityName::from(function_name_str); let function_args = &[ClarityValue::UInt(page.into())]; @@ -250,6 +258,9 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result, ClientError> { + debug!("StacksClient: Getting parsed signer slots"; + "reward_cycle" => reward_cycle, + ); let signer_set = u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); @@ -272,6 +283,7 @@ impl StacksClient { /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { + debug!("StacksClient: Getting node epoch"); let pox_info = self.get_pox_data()?; let burn_block_height = self.get_burn_block_height()?; @@ -302,7 +314,7 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { - debug!("stacks_node_client: Submitting block for validation..."; + debug!("StacksClient: Submitting block for validation"; "signer_sighash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, @@ -337,6 +349,10 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + debug!("StacksClient: Getting tenure forking info"; + "chosen_parent" => %chosen_parent, + "last_sortition" => %last_sortition, + ); let mut tenures: VecDeque = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; if tenures.is_empty() { @@ -373,7 +389,7 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { - debug!("stacks_node_client: Getting tenure forking info..."; + debug!("StacksClient: Getting tenure forking info"; "chosen_parent" => %chosen_parent, "last_sortition" => %last_sortition, ); @@ -402,7 +418,7 @@ impl StacksClient { /// Get the current winning sortition and the last winning sortition pub fn get_current_and_last_sortition(&self) -> Result { - debug!("stacks_node_client: Getting current and prior sortition..."); + debug!("StacksClient: Getting current and prior sortition"); let path = format!("{}/latest_and_last", self.sortition_info_path()); let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { @@ -443,7 +459,7 @@ impl StacksClient { /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { - debug!("stacks_node_client: Getting peer info..."); + debug!("StacksClient: Getting peer info"); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { @@ -466,7 +482,9 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { - debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); + debug!("StacksClient: Getting reward set signers"; + "reward_cycle" => reward_cycle, + ); let timer = crate::monitoring::new_rpc_call_timer( &format!("{}/v3/stacker_set/:reward_cycle", self.http_origin), &self.http_origin, @@ -502,7 +520,7 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { - debug!("stacks_node_client: Getting pox data..."); + debug!("StacksClient: Getting pox data"); let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client @@ -521,11 +539,13 @@ impl StacksClient { /// Helper function to retrieve the burn tip height from the stacks node fn get_burn_block_height(&self) -> Result { + debug!("StacksClient: Getting burn block height"); self.get_peer_info().map(|info| info.burn_block_height) } /// Get the current reward cycle info from the stacks node pub fn get_current_reward_cycle_info(&self) -> Result { + debug!("StacksClient: Getting current reward cycle info"); let pox_data = self.get_pox_data()?; let blocks_mined = pox_data .current_burnchain_block_height @@ -548,7 +568,9 @@ impl StacksClient { &self, address: &StacksAddress, ) -> Result { - debug!("stacks_node_client: Getting account info..."); + debug!("StacksClient: Getting account info"; + "address" => %address, + ); let timer_label = format!("{}/v2/accounts/:principal", self.http_origin); let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { @@ -570,6 +592,11 @@ impl StacksClient { /// /// In tests, this panics if the retry takes longer than 30 seconds. pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { + debug!("StacksClient: Posting block to stacks node"; + "signer_sighash" => %block.header.signer_signature_hash(), + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, + ); let start_time = Instant::now(); loop { match self.post_block(block) { @@ -595,7 +622,8 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { - debug!("stacks_node_client: Posting block to the stacks node..."; + debug!("StacksClient: Posting block to the stacks node"; + "signer_sighash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -630,7 +658,9 @@ impl StacksClient { function_name: &ClarityName, function_args: &[ClarityValue], ) -> Result { - debug!("stacks_node_client: Calling read-only function {function_name} with args {function_args:?}..."); + debug!( + "StacksClient: Calling read-only function {function_name} with args {function_args:?}" + ); let args = function_args .iter() .filter_map(|arg| arg.serialize_to_hex().ok()) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 57c90ab0eb..d2d526a589 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -38,6 +38,8 @@ const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; +const TENURE_IDLE_TIMEOUT_SECS: u64 = 300; +const DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS: u64 = 600; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -135,6 +137,10 @@ pub struct SignerConfig { pub tenure_last_block_proposal_timeout: Duration, /// How much time to wait for a block proposal validation response before marking the block invalid pub block_proposal_validation_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, + /// The maximum age of a block proposal in seconds that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// The parsed configuration for the signer @@ -171,6 +177,10 @@ pub struct GlobalConfig { /// How long to wait for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, + /// The maximum age of a block proposal that will be processed by the signer + pub block_proposal_max_age_secs: u64, } /// Internal struct for loading up the config file @@ -206,6 +216,10 @@ struct RawConfigFile { /// How long to wait (in millisecs) for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout_ms: Option, + /// How much idle time (in seconds) must pass before a tenure extend is allowed + pub tenure_idle_timeout_secs: Option, + /// The maximum age of a block proposal (in secs) that will be processed by the signer. + pub block_proposal_max_age_secs: Option, } impl RawConfigFile { @@ -297,6 +311,16 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), ); + let tenure_idle_timeout = Duration::from_secs( + raw_data + .tenure_idle_timeout_secs + .unwrap_or(TENURE_IDLE_TIMEOUT_SECS), + ); + + let block_proposal_max_age_secs = raw_data + .block_proposal_max_age_secs + .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -312,6 +336,8 @@ impl TryFrom for GlobalConfig { chain_id: raw_data.chain_id, tenure_last_block_proposal_timeout, block_proposal_validation_timeout, + tenure_idle_timeout, + block_proposal_max_age_secs, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index c8f6041478..018d35a4d2 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -285,6 +285,8 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo block_proposal_timeout: self.config.block_proposal_timeout, tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, + tenure_idle_timeout: self.config.tenure_idle_timeout, + block_proposal_max_age_secs: self.config.block_proposal_max_age_secs, })) } @@ -422,23 +424,15 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { let reward_cycle = signer.reward_cycle(); - let next_reward_cycle = reward_cycle.wrapping_add(1); - let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { - std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale - std::cmp::Ordering::Equal => { - // We are the next reward cycle, so check if we were registered and have any pending blocks to process - match signer { - ConfiguredSigner::RegisteredSigner(signer) => { - !signer.has_unprocessed_blocks() - } - _ => true, - } + if reward_cycle >= current_reward_cycle { + // We are either the current or a future reward cycle, so we are not stale. + continue; + } + if let ConfiguredSigner::RegisteredSigner(signer) = signer { + if !signer.has_unprocessed_blocks() { + debug!("{signer}: Signer's tenure has completed."); + to_delete.push(*idx); } - std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale - }; - if stale { - debug!("{signer}: Signer's tenure has completed."); - to_delete.push(*idx); } } for idx in to_delete { diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6f7d91bc6d..4cdc61471a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -16,15 +16,17 @@ use std::fmt::Display; use std::path::Path; -use std::time::SystemTime; +use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, }; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use libsigner::BlockProposal; +use rusqlite::functions::FunctionFlags; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, }; @@ -157,6 +159,8 @@ pub struct BlockInfo { pub signed_group: Option, /// The block state relative to the signer's view of the stacks blockchain pub state: BlockState, + /// Consumed processing time in milliseconds to validate this block + pub validation_time_ms: Option, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, } @@ -175,10 +179,20 @@ impl From for BlockInfo { signed_group: None, ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, + validation_time_ms: None, } } } impl BlockInfo { + /// Whether the block is a tenure change block or not + pub fn is_tenure_change(&self) -> bool { + self.block + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false) + } + /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { @@ -230,18 +244,10 @@ impl BlockInfo { } match state { BlockState::Unprocessed => false, - BlockState::LocallyAccepted => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyAccepted - ) - } - BlockState::LocallyRejected => { - matches!( - prev_state, - BlockState::Unprocessed | BlockState::LocallyRejected - ) - } + BlockState::LocallyAccepted | BlockState::LocallyRejected => !matches!( + prev_state, + BlockState::GloballyRejected | BlockState::GloballyAccepted + ), BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), } @@ -324,6 +330,18 @@ static CREATE_INDEXES_3: &str = r#" CREATE INDEX IF NOT EXISTS block_rejection_signer_addrs_on_block_signature_hash ON block_rejection_signer_addrs(signer_signature_hash); "#; +static CREATE_INDEXES_4: &str = r#" +CREATE INDEX IF NOT EXISTS blocks_state ON blocks ((json_extract(block_info, '$.state'))); +CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks ((json_extract(block_info, '$.signed_group'))); +"#; + +static CREATE_INDEXES_5: &str = r#" +CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (consensus_hash, signed_over); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash_state ON blocks (consensus_hash, state); +CREATE INDEX IF NOT EXISTS blocks_state ON blocks (state); +CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks (signed_group); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -368,7 +386,7 @@ CREATE TABLE IF NOT EXISTS block_signatures ( -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, -- and stacks forks. signer_signature_hash TEXT NOT NULL, - -- signtaure itself + -- signature itself signature TEXT NOT NULL, PRIMARY KEY (signature) ) STRICT;"#; @@ -385,6 +403,69 @@ CREATE TABLE IF NOT EXISTS block_rejection_signer_addrs ( PRIMARY KEY (signer_addr) ) STRICT;"#; +// Migration logic necessary to move blocks from the old blocks table to the new blocks table +static MIGRATE_BLOCKS_TABLE_2_BLOCKS_TABLE_3: &str = r#" +CREATE TABLE IF NOT EXISTS temp_blocks ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. + signer_signature_hash TEXT NOT NULL PRIMARY KEY, + reward_cycle INTEGER NOT NULL, + block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + valid INTEGER, + state TEXT NOT NULL, + signed_group INTEGER, + signed_self INTEGER, + proposed_time INTEGER NOT NULL, + validation_time_ms INTEGER, + tenure_change INTEGER NOT NULL +) STRICT; + +INSERT INTO temp_blocks ( + signer_signature_hash, + reward_cycle, + block_info, + consensus_hash, + signed_over, + broadcasted, + stacks_height, + burn_block_height, + valid, + state, + signed_group, + signed_self, + proposed_time, + validation_time_ms, + tenure_change +) +SELECT + signer_signature_hash, + reward_cycle, + block_info, + consensus_hash, + signed_over, + broadcasted, + stacks_height, + burn_block_height, + json_extract(block_info, '$.valid') AS valid, + json_extract(block_info, '$.state') AS state, + json_extract(block_info, '$.signed_group') AS signed_group, + json_extract(block_info, '$.signed_self') AS signed_self, + json_extract(block_info, '$.proposed_time') AS proposed_time, + json_extract(block_info, '$.validation_time_ms') AS validation_time_ms, + is_tenure_change(block_info) AS tenure_change +FROM blocks; + +DROP TABLE blocks; + +ALTER TABLE temp_blocks RENAME TO blocks;"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -421,9 +502,21 @@ static SCHEMA_3: &[&str] = &[ "INSERT INTO db_config (version) VALUES (3);", ]; +static SCHEMA_4: &[&str] = &[ + CREATE_INDEXES_4, + "INSERT OR REPLACE INTO db_config (version) VALUES (4);", +]; + +static SCHEMA_5: &[&str] = &[ + MIGRATE_BLOCKS_TABLE_2_BLOCKS_TABLE_3, + CREATE_INDEXES_5, + "DELETE FROM db_config;", // Be extra careful. Make sure there is only ever one row in the table. + "INSERT INTO db_config (version) VALUES (5);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 3; + pub const SCHEMA_VERSION: u32 = 5; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -443,7 +536,7 @@ impl SignerDb { return Ok(0); } let result = conn - .query_row("SELECT version FROM db_config LIMIT 1", [], |row| { + .query_row("SELECT MAX(version) FROM db_config LIMIT 1", [], |row| { row.get(0) }) .optional(); @@ -495,10 +588,63 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 3 to schema 4 + fn schema_4_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 4 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_4.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + + /// Migrate from schema 4 to schema 5 + fn schema_5_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 5 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_5.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + + /// Register custom scalar functions used by the database + fn register_scalar_functions(&self) -> Result<(), DBError> { + // Register helper function for determining if a block is a tenure change transaction + // Required only for data migration from Schema 4 to Schema 5 + self.db.create_scalar_function( + "is_tenure_change", + 1, + FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC, + |ctx| { + let value = ctx.get::(0)?; + let block_info = serde_json::from_str::(&value) + .map_err(|e| SqliteError::UserFunctionError(e.into()))?; + Ok(block_info.is_tenure_change()) + }, + )?; + Ok(()) + } + + /// Drop registered scalar functions used only for data migrations + fn remove_scalar_functions(&self) -> Result<(), DBError> { + self.db.remove_function("is_tenure_change", 1)?; + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). fn create_or_migrate(&mut self) -> Result<(), DBError> { + self.register_scalar_functions()?; let sql_tx = tx_begin_immediate(&mut self.db)?; loop { let version = Self::get_schema_version(&sql_tx)?; @@ -506,7 +652,9 @@ impl SignerDb { 0 => Self::schema_1_migration(&sql_tx)?, 1 => Self::schema_2_migration(&sql_tx)?, 2 => Self::schema_3_migration(&sql_tx)?, - 3 => break, + 3 => Self::schema_4_migration(&sql_tx)?, + 4 => Self::schema_5_migration(&sql_tx)?, + 5 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -514,6 +662,7 @@ impl SignerDb { } } sql_tx.commit()?; + self.remove_scalar_functions()?; Ok(()) } @@ -552,15 +701,11 @@ impl SignerDb { /// Fetch a block from the database using the block's /// `signer_signature_hash` - pub fn block_lookup( - &self, - reward_cycle: u64, - hash: &Sha512Trunc256Sum, - ) -> Result, DBError> { + pub fn block_lookup(&self, hash: &Sha512Trunc256Sum) -> Result, DBError> { let result: Option = query_row( &self.db, - "SELECT block_info FROM blocks WHERE reward_cycle = ? AND signer_signature_hash = ?", - params![u64_to_sql(reward_cycle)?, hash.to_string()], + "SELECT block_info FROM blocks WHERE signer_signature_hash = ?", + params![hash.to_string()], )?; try_deserialize(result) @@ -593,7 +738,7 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; let args = params![ tenure, &BlockState::GloballyAccepted.to_string(), @@ -609,13 +754,22 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC LIMIT 1"; let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; try_deserialize(result) } + /// Return the canonical tip -- the last globally accepted block. + pub fn get_canonical_tip(&self) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE state = ?1 ORDER BY stacks_height DESC, signed_group DESC LIMIT 1"; + let args = params![&BlockState::GloballyAccepted.to_string()]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + /// Insert or replace a burn block into the database pub fn insert_burn_block( &mut self, @@ -663,12 +817,12 @@ impl SignerDb { serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); - let signed_over = &block_info.signed_over; + let signed_over = block_info.signed_over; let vote = block_info .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; + let broadcasted = self.get_block_broadcasted(hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -678,24 +832,28 @@ impl SignerDb { "broadcasted" => ?broadcasted, "vote" => vote ); - self.db - .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", - params![ - u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, - signed_over, - &broadcasted, - u64_to_sql(block_info.block.header.chain_length)?, - block_info.block.header.consensus_hash.to_hex(), - ], - )?; - + self.db.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash, valid, state, signed_group, signed_self, proposed_time, validation_time_ms, tenure_change) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", params![ + u64_to_sql(block_info.reward_cycle)?, + u64_to_sql(block_info.burn_block_height)?, + hash.to_string(), + block_json, + &block_info.signed_over, + &broadcasted, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.block.header.consensus_hash.to_hex(), + &block_info.valid, &block_info.state.to_string(), + &block_info.signed_group, + &block_info.signed_self, + &block_info.proposed_time, + &block_info.validation_time_ms, + &block_info.is_tenure_change() + ])?; Ok(()) } /// Determine if there are any unprocessed blocks pub fn has_unprocessed_blocks(&self, reward_cycle: u64) -> Result { - let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND json_extract(block_info, '$.state') = ?2 LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND state = ?2 LIMIT 1"; let result: Option = query_row( &self.db, query, @@ -773,17 +931,11 @@ impl SignerDb { /// Mark a block as having been broadcasted and therefore GloballyAccepted pub fn set_block_broadcasted( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; - let args = params![ - u64_to_sql(ts)?, - BlockState::GloballyAccepted.to_string(), - u64_to_sql(reward_cycle)?, - block_sighash - ]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE signer_signature_hash = ?2"; + let args = params![u64_to_sql(ts)?, block_sighash]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -793,12 +945,11 @@ impl SignerDb { /// Get the timestamp at which the block was broadcasted. pub fn get_block_broadcasted( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { let qry = - "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; - let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE signer_signature_hash = ?"; + let args = params![block_sighash]; let Some(broadcasted): Option = query_row(&self.db, qry, args)? else { return Ok(None); @@ -812,11 +963,10 @@ impl SignerDb { /// Get the current state of a given block in the database pub fn get_block_state( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { - let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; - let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "SELECT state FROM blocks WHERE signer_signature_hash = ?1 LIMIT 1"; + let args = params![block_sighash]; let state_opt: Option = query_row(&self.db, qry, args)?; let Some(state) = state_opt else { return Ok(None); @@ -825,6 +975,69 @@ impl SignerDb { BlockState::try_from(state.as_str()).map_err(|_| DBError::Corruption)?, )) } + + /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). + fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { + let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC"; + let args = params![tenure, BlockState::GloballyAccepted.to_string()]; + let mut stmt = self.db.prepare(query)?; + let rows = stmt.query_map(args, |row| { + let tenure_change_block: bool = row.get(0)?; + let proposed_time: u64 = row.get(1)?; + let validation_time_ms: Option = row.get(2)?; + Ok((tenure_change_block, proposed_time, validation_time_ms)) + })?; + let mut tenure_processing_time_ms = 0_u64; + let mut tenure_start_time = None; + let mut nmb_rows = 0; + for (i, row) in rows.enumerate() { + nmb_rows += 1; + let (tenure_change_block, proposed_time, validation_time_ms) = row?; + tenure_processing_time_ms = + tenure_processing_time_ms.saturating_add(validation_time_ms.unwrap_or(0)); + tenure_start_time = Some(proposed_time); + if tenure_change_block { + debug!("Found tenure change block {i} blocks ago in tenure {tenure}"); + break; + } + } + debug!("Calculated tenure extend timestamp from {nmb_rows} blocks in tenure {tenure}"); + Ok(( + tenure_start_time.unwrap_or(get_epoch_time_secs()), + tenure_processing_time_ms, + )) + } + + /// Calculate the tenure extend timestamp. If determine the timestamp for a block rejection, check_tenure_extend should be set to false to avoid recalculating + /// the tenure extend timestamp for a tenure extend block. + pub fn calculate_tenure_extend_timestamp( + &self, + tenure_idle_timeout: Duration, + block: &NakamotoBlock, + check_tenure_extend: bool, + ) -> u64 { + if check_tenure_extend && block.get_tenure_tx_payload().is_some() { + let tenure_extend_timestamp = + get_epoch_time_secs().wrapping_add(tenure_idle_timeout.as_secs()); + debug!("Calculated tenure extend timestamp for a tenure extend block. Rolling over timestamp: {tenure_extend_timestamp}"); + return tenure_extend_timestamp; + } + let tenure_idle_timeout_secs = tenure_idle_timeout.as_secs(); + let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(&block.header.consensus_hash).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); + // Plus (ms + 999)/1000 to round up to the nearest second + let tenure_extend_timestamp = tenure_start_time + .saturating_add(tenure_idle_timeout_secs) + .saturating_add(tenure_process_time_ms.saturating_add(999) / 1000); + debug!("Calculated tenure extend timestamp"; + "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_start_time" => tenure_start_time, + "tenure_process_time_ms" => tenure_process_time_ms, + "tenure_idle_timeout_secs" => tenure_idle_timeout_secs, + "tenure_extend_in" => tenure_extend_timestamp.saturating_sub(get_epoch_time_secs()), + "consensus_hash" => %block.header.consensus_hash, + ); + tenure_extend_timestamp + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -837,22 +1050,18 @@ where .map_err(DBError::SerializationError) } -#[cfg(test)] -pub fn test_signer_db(db_path: &str) -> SignerDb { - use std::fs; - - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - SignerDb::new(db_path).expect("Failed to create signer db") -} - #[cfg(test)] mod tests { use std::fs; use std::path::PathBuf; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use blockstack_lib::chainstate::stacks::{ + StacksTransaction, TenureChangeCause, TenureChangePayload, TransactionAuth, + TransactionVersion, + }; + use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; + use clarity::util::hash::Hash160; use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; @@ -895,39 +1104,42 @@ mod tests { fn test_basic_signer_db_with_path(db_path: impl AsRef) { let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let (block_info, block_proposal) = create_block(); - let reward_cycle = block_info.reward_cycle; - db.insert_block(&block_info) + let (block_info_1, block_proposal_1) = create_block_override(|b| { + b.block.header.consensus_hash = ConsensusHash([0x01; 20]); + }); + let (block_info_2, block_proposal_2) = create_block_override(|b| { + b.block.header.consensus_hash = ConsensusHash([0x02; 20]); + }); + db.insert_block(&block_info_1) .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal_1.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal_1.clone()), block_info); - // Test looking up a block from a different reward cycle + // Test looking up a block with an unknown hash let block_info = db - .block_lookup( - reward_cycle + 1, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal_2.block.header.signer_signature_hash()) .unwrap(); assert!(block_info.is_none()); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + let block_info = db + .block_lookup(&block_proposal_2.block.header.signer_signature_hash()) + .unwrap() + .expect("Unable to get block from db"); + + assert_eq!(BlockInfo::from(block_proposal_2.clone()), block_info); // test getting the block state let block_state = db - .get_block_state( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .get_block_state(&block_proposal_1.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block state from db"); - assert_eq!(block_state, BlockInfo::from(block_proposal.clone()).state); + assert_eq!(block_state, BlockInfo::from(block_proposal_1.clone()).state); } #[test] @@ -947,15 +1159,11 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (block_info, block_proposal) = create_block(); - let reward_cycle = block_info.reward_cycle; db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); @@ -981,10 +1189,7 @@ mod tests { .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); @@ -1156,51 +1361,40 @@ mod tests { .expect("Unable to insert block into db"); assert!(db - .get_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) + .get_block_broadcasted(&block_info_1.signer_signature_hash()) .unwrap() .is_none()); assert_eq!( - db.block_lookup( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .expect("Unable to get block from db") - .expect("Unable to get block from db") - .state, + db.block_lookup(&block_info_1.signer_signature_hash()) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, BlockState::Unprocessed ); - db.set_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash(), - 12345, - ) - .unwrap(); + assert!(db + .get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) + .unwrap() + .is_none()); + db.set_block_broadcasted(&block_info_1.signer_signature_hash(), 12345) + .unwrap(); assert_eq!( - db.block_lookup( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .expect("Unable to get block from db") - .expect("Unable to get block from db") - .state, - BlockState::GloballyAccepted + db.block_lookup(&block_info_1.signer_signature_hash()) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, + BlockState::Unprocessed ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); assert_eq!( - db.get_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .unwrap() - .unwrap(), + db.get_block_broadcasted(&block_info_1.signer_signature_hash()) + .unwrap() + .unwrap(), 12345 ); } + #[test] fn state_machine() { let (mut block, _) = create_block(); @@ -1215,7 +1409,14 @@ mod tests { assert_eq!(block.state, BlockState::LocallyAccepted); assert!(!block.check_state(BlockState::Unprocessed)); assert!(block.check_state(BlockState::LocallyAccepted)); - assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); assert!(block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); @@ -1227,19 +1428,333 @@ mod tests { assert!(block.check_state(BlockState::GloballyAccepted)); assert!(!block.check_state(BlockState::GloballyRejected)); - // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted - block.state = BlockState::LocallyRejected; - assert!(!block.check_state(BlockState::Unprocessed)); - assert!(!block.check_state(BlockState::LocallyAccepted)); - assert!(block.check_state(BlockState::LocallyRejected)); - assert!(block.check_state(BlockState::GloballyAccepted)); - assert!(block.check_state(BlockState::GloballyRejected)); - - block.move_to(BlockState::GloballyRejected).unwrap(); + // Must manually override as will not be able to move from GloballyAccepted to GloballyRejected + block.state = BlockState::GloballyRejected; assert!(!block.check_state(BlockState::Unprocessed)); assert!(!block.check_state(BlockState::LocallyAccepted)); assert!(!block.check_state(BlockState::LocallyRejected)); assert!(!block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); } + + #[test] + fn test_get_canonical_tip() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let (mut block_info_1, _block_proposal_1) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + + let (mut block_info_2, _block_proposal_2) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert!(db.get_canonical_tip().unwrap().is_none()); + + block_info_1 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + + assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_1); + + block_info_2 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_2); + } + + #[test] + fn get_accepted_blocks() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let consensus_hash_1 = ConsensusHash([0x01; 20]); + let consensus_hash_2 = ConsensusHash([0x02; 20]); + let consensus_hash_3 = ConsensusHash([0x03; 20]); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + let (mut block_info_3, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 3; + }); + let (mut block_info_4, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 4; + }); + block_info_1.mark_globally_accepted().unwrap(); + block_info_2.mark_locally_accepted(false).unwrap(); + block_info_3.mark_locally_accepted(false).unwrap(); + block_info_4.mark_globally_accepted().unwrap(); + + db.insert_block(&block_info_1).unwrap(); + db.insert_block(&block_info_2).unwrap(); + db.insert_block(&block_info_3).unwrap(); + db.insert_block(&block_info_4).unwrap(); + + // Verify tenure consensus_hash_1 + let block_info = db + .get_last_accepted_block(&consensus_hash_1) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_3); + let block_info = db + .get_last_globally_accepted_block(&consensus_hash_1) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_1); + + // Verify tenure consensus_hash_2 + let block_info = db + .get_last_accepted_block(&consensus_hash_2) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_4); + let block_info = db + .get_last_globally_accepted_block(&consensus_hash_2) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_4); + + // Verify tenure consensus_hash_3 + assert!(db + .get_last_accepted_block(&consensus_hash_3) + .unwrap() + .is_none()); + assert!(db + .get_last_globally_accepted_block(&consensus_hash_3) + .unwrap() + .is_none()); + } + + fn generate_tenure_blocks() -> Vec { + let tenure_change_payload = TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header + prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), + burn_view_consensus_hash: ConsensusHash([0x04; 20]), + previous_tenure_end: StacksBlockId([0x03; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( + &StacksPrivateKey::new(), + )), + }; + let tenure_change_tx_payload = + TransactionPayload::TenureChange(tenure_change_payload.clone()); + let tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&StacksPrivateKey::new()).unwrap(), + tenure_change_tx_payload.clone(), + ); + + let consensus_hash_1 = ConsensusHash([0x01; 20]); + let consensus_hash_2 = ConsensusHash([0x02; 20]); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + block_info_1.state = BlockState::GloballyAccepted; + block_info_1.block.txs.push(tenure_change_tx.clone()); + block_info_1.validation_time_ms = Some(1000); + block_info_1.proposed_time = get_epoch_time_secs() + 500; + + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + block_info_2.state = BlockState::GloballyAccepted; + block_info_2.validation_time_ms = Some(2000); + block_info_2.proposed_time = block_info_1.proposed_time + 5; + + let (mut block_info_3, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 2; + }); + block_info_3.state = BlockState::GloballyAccepted; + block_info_3.block.txs.push(tenure_change_tx); + block_info_3.validation_time_ms = Some(5000); + block_info_3.proposed_time = block_info_1.proposed_time + 10; + + // This should have no effect on the time calculations as its not a globally accepted block + let (mut block_info_4, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x04; 65]); + b.block.header.chain_length = 3; + b.burn_height = 2; + }); + block_info_4.state = BlockState::LocallyAccepted; + block_info_4.validation_time_ms = Some(9000); + block_info_4.proposed_time = block_info_1.proposed_time + 15; + + let (mut block_info_5, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x05; 65]); + b.block.header.chain_length = 4; + b.burn_height = 3; + }); + block_info_5.state = BlockState::GloballyAccepted; + block_info_5.validation_time_ms = Some(20000); + block_info_5.proposed_time = block_info_1.proposed_time + 20; + + // This should have no effect on the time calculations as its not a globally accepted block + let (mut block_info_6, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x06; 65]); + b.block.header.chain_length = 5; + b.burn_height = 3; + }); + block_info_6.state = BlockState::LocallyAccepted; + block_info_6.validation_time_ms = Some(40000); + block_info_6.proposed_time = block_info_1.proposed_time + 25; + + vec![ + block_info_1, + block_info_2, + block_info_3, + block_info_4, + block_info_5, + block_info_6, + ] + } + + #[test] + fn tenure_times() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let block_infos = generate_tenure_blocks(); + let consensus_hash_1 = block_infos[0].block.header.consensus_hash; + let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; + let consensus_hash_3 = ConsensusHash([0x03; 20]); + + db.insert_block(&block_infos[0]).unwrap(); + db.insert_block(&block_infos[1]).unwrap(); + + // Verify tenure consensus_hash_1 + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[0].proposed_time); + assert_eq!(processing_time, 3000); + + db.insert_block(&block_infos[2]).unwrap(); + db.insert_block(&block_infos[3]).unwrap(); + + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[2].proposed_time); + assert_eq!(processing_time, 5000); + + db.insert_block(&block_infos[4]).unwrap(); + db.insert_block(&block_infos[5]).unwrap(); + + // Verify tenure consensus_hash_2 + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_2).unwrap(); + assert_eq!(start_time, block_infos[4].proposed_time); + assert_eq!(processing_time, 20000); + + // Verify tenure consensus_hash_3 (unknown hash) + let (start_time, validation_time) = db.get_tenure_times(&consensus_hash_3).unwrap(); + assert!(start_time < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); + assert_eq!(validation_time, 0); + } + + #[test] + fn tenure_extend_timestamp() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let block_infos = generate_tenure_blocks(); + let mut unknown_block = block_infos[0].block.clone(); + unknown_block.header.consensus_hash = ConsensusHash([0x03; 20]); + + db.insert_block(&block_infos[0]).unwrap(); + db.insert_block(&block_infos[1]).unwrap(); + + let tenure_idle_timeout = Duration::from_secs(10); + // Verify tenure consensus_hash_1 + let timestamp_hash_1_before = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, true); + assert_eq!( + timestamp_hash_1_before, + block_infos[0] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_add(3) + ); + + db.insert_block(&block_infos[2]).unwrap(); + db.insert_block(&block_infos[3]).unwrap(); + + let timestamp_hash_1_after = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, true); + + assert_eq!( + timestamp_hash_1_after, + block_infos[2] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_add(5) + ); + + db.insert_block(&block_infos[4]).unwrap(); + db.insert_block(&block_infos[5]).unwrap(); + + // Verify tenure consensus_hash_2 + let timestamp_hash_2 = db.calculate_tenure_extend_timestamp( + tenure_idle_timeout, + &block_infos.last().unwrap().block, + true, + ); + assert_eq!( + timestamp_hash_2, + block_infos[4] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_add(20) + ); + + let now = get_epoch_time_secs().saturating_add(tenure_idle_timeout.as_secs()); + let timestamp_hash_2_no_tenure_extend = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, false); + assert_ne!(timestamp_hash_2, timestamp_hash_2_no_tenure_extend); + assert!(now < timestamp_hash_2_no_tenure_extend); + + // Verify tenure consensus_hash_3 (unknown hash) + let timestamp_hash_3 = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &unknown_block, true); + assert!( + timestamp_hash_3.saturating_add(tenure_idle_timeout.as_secs()) + < block_infos[0].proposed_time + ); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index bec9f1258d..2037a25def 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -90,6 +90,7 @@ fn setup_test_environment( first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }, }; @@ -132,13 +133,13 @@ fn check_proposal_units() { setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -154,7 +155,6 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1, false, ) .unwrap()); @@ -166,7 +166,6 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1, false, ) .unwrap()); @@ -263,7 +262,7 @@ fn reorg_timing_testing( config, } = MockServerClient::new(); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, false) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, false) }); header_clone.chain_length -= 1; let response = crate::client::tests::build_get_tenure_tip_response( @@ -300,16 +299,16 @@ fn check_proposal_invalid_status() { setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -320,7 +319,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -369,7 +368,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -379,7 +378,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -406,7 +405,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -417,7 +415,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -430,7 +427,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -441,7 +437,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -533,7 +528,7 @@ fn check_proposal_refresh() { setup_test_environment("check_proposal_refresh"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); let MockServerClient { @@ -575,7 +570,7 @@ fn check_proposal_refresh() { view.cur_sortition.consensus_hash = ConsensusHash([128; 20]); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, true) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, true) }); crate::client::tests::write_response( server, diff --git a/stacks-signer/src/tests/conf/signer-0.toml b/stacks-signer/src/tests/conf/signer-0.toml index 19002c1914..26a9380dcb 100644 --- a/stacks-signer/src/tests/conf/signer-0.toml +++ b/stacks-signer/src/tests/conf/signer-0.toml @@ -1,6 +1,6 @@ stacks_private_key = "6a1fc1a3183018c6d79a4e11e154d2bdad2d89ac8bc1b0a021de8b4d28774fbb01" node_host = "127.0.0.1:20443" -endpoint = "localhost:30000" +endpoint = "[::1]:30000" network = "testnet" auth_password = "12345" db_path = ":memory:" diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 520fb36ca1..34b363311e 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -17,6 +17,10 @@ /// The signer module for processing events pub mod signer; +#[cfg(any(test, feature = "testing"))] +/// Test specific functions for the signer module +pub mod tests; + use libsigner::v0::messages::SignerMessage; use crate::v0::signer::Signer; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b537cfae8a..5a5128cce4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -44,29 +44,13 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list -pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list -pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< - Option>, -> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Pause the block broadcast -pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - -#[cfg(any(test, feature = "testing"))] -/// Skip broadcasting the block to the network -pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); - /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { + /// The private key of the signer + #[cfg(any(test, feature = "testing"))] + pub private_key: StacksPrivateKey, + #[cfg(not(any(test, feature = "testing")))] /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client @@ -92,6 +76,8 @@ pub struct Signer { pub block_proposal_validation_timeout: Duration, /// The current submitted block proposal and its submission time pub submitted_block_proposal: Option<(BlockProposal, Instant)>, + /// Maximum age of a block proposal in seconds before it is dropped without processing + pub block_proposal_max_age_secs: u64, } impl std::fmt::Display for Signer { @@ -126,6 +112,7 @@ impl SignerTrait for Signer { Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock { .. }) + | Some(SignerEvent::NewBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -168,21 +155,8 @@ impl SignerTrait for Signer { match message { SignerMessage::BlockProposal(block_proposal) => { #[cfg(any(test, feature = "testing"))] - if let Some(public_keys) = - &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() - { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Ignoring block proposal due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - continue; - } + if self.test_ignore_all_block_proposals(block_proposal) { + continue; } self.handle_block_proposal( stacks_client, @@ -246,6 +220,33 @@ impl SignerTrait for Signer { }); *sortition_state = None; } + SignerEvent::NewBlock { + block_hash, + block_height, + } => { + debug!( + "{self}: Received a new block event."; + "block_hash" => %block_hash, + "block_height" => block_height + ); + if let Ok(Some(mut block_info)) = self + .signer_db + .block_lookup(block_hash) + .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) + { + if block_info.state == BlockState::GloballyAccepted { + // We have already globally accepted this block. Do nothing. + return; + } + if let Err(e) = block_info.mark_globally_accepted() { + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + return; + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state to globally accepted: {e:?}"); + } + } + } } } @@ -284,6 +285,7 @@ impl From for Signer { proposal_config, submitted_block_proposal: None, block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, + block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, } } } @@ -292,7 +294,7 @@ impl Signer { /// Determine this signers response to a proposed block /// Returns a BlockResponse if we have already validated the block /// Returns None otherwise - fn determine_response(&self, block_info: &BlockInfo) -> Option { + fn determine_response(&mut self, block_info: &BlockInfo) -> Option { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); @@ -300,7 +302,15 @@ impl Signer { .private_key .sign(block_info.signer_signature_hash().bits()) .expect("Failed to sign block"); - BlockResponse::accepted(block_info.signer_signature_hash(), signature) + BlockResponse::accepted( + block_info.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + true, + ), + ) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); BlockResponse::rejected( @@ -308,6 +318,11 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + false, + ), ) }; Some(response) @@ -331,12 +346,30 @@ impl Signer { return; } + if block_proposal + .block + .header + .timestamp + .saturating_add(self.block_proposal_max_age_secs) + < get_epoch_time_secs() + { + // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. + warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + "timestamp" => block_proposal.block.header.timestamp, + ); + return; + } + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); if let Some(block_info) = self .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) + .block_lookup(&signer_signature_hash) .expect("Failed to connect to signer DB") { let Some(block_response) = self.determine_response(&block_info) else { @@ -372,7 +405,10 @@ impl Signer { "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); + #[cfg(any(test, feature = "testing"))] let mut block_info = BlockInfo::from(block_proposal.clone()); + #[cfg(not(any(test, feature = "testing")))] + let block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -395,7 +431,6 @@ impl Signer { &mut self.signer_db, &block_proposal.block, miner_pubkey, - self.reward_cycle, true, ) { // Error validating block @@ -410,6 +445,11 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) } // Block proposal is bad @@ -424,6 +464,11 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) } // Block proposal passed check, still don't know if valid @@ -440,6 +485,11 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) }; @@ -448,10 +498,7 @@ impl Signer { self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { - // We know proposal is invalid. Send rejection message, do not do further validation - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; + // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb @@ -535,10 +582,7 @@ impl Signer { self.submitted_block_proposal = None; } // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { + let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { Ok(Some(block_info)) => { if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); @@ -563,6 +607,13 @@ impl Signer { } block_info.signed_self.get_or_insert(get_epoch_time_secs()); } + // Record the block validation time but do not consider stx transfers or boot contract calls + block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { + Some(0) + } else { + Some(block_validate_ok.validation_time_ms) + }; + let signature = self .private_key .sign(&signer_signature_hash.0) @@ -571,7 +622,15 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); - let accepted = BlockAccepted::new(block_info.signer_signature_hash(), signature); + let accepted = BlockAccepted::new( + block_info.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + true, + ), + ); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); Some(BlockResponse::Accepted(accepted)) @@ -594,10 +653,7 @@ impl Signer { { self.submitted_block_proposal = None; } - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { + let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { Ok(Some(block_info)) => { if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); @@ -625,6 +681,11 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + false, + ), ); self.signer_db .insert_block(&block_info) @@ -683,10 +744,7 @@ impl Signer { } let signature_sighash = block_proposal.block.header.signer_signature_hash(); // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signature_sighash) - { + let mut block_info = match self.signer_db.block_lookup(&signature_sighash) { Ok(Some(block_info)) => { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted @@ -722,6 +780,11 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), ); if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -769,7 +832,7 @@ impl Signer { let block_hash = &rejection.signer_signature_hash; let signature = &rejection.signature; - let mut block_info = match self.signer_db.block_lookup(self.reward_cycle, block_hash) { + let mut block_info = match self.signer_db.block_lookup(block_hash) { Ok(Some(block_info)) => { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted @@ -842,7 +905,7 @@ impl Signer { // Not enough rejection signatures to make a decision return; } - debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + debug!("{self}: {total_reject_weight}/{total_weight} signers voted to reject the block {block_hash}"); if let Err(e) = block_info.mark_globally_rejected() { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } @@ -867,6 +930,7 @@ impl Signer { signer_signature_hash: block_hash, signature, metadata, + .. } = accepted; debug!( "{self}: Received a block-accept signature: ({block_hash}, {signature}, {})", @@ -874,10 +938,7 @@ impl Signer { ); // Have we already processed this block? - match self - .signer_db - .get_block_state(self.reward_cycle, block_hash) - { + match self.signer_db.get_block_state(block_hash) { Ok(Some(state)) => { if state == BlockState::GloballyAccepted || state == BlockState::GloballyRejected { debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", state); @@ -959,19 +1020,14 @@ impl Signer { } // have enough signatures to broadcast! - let Ok(Some(mut block_info)) = self - .signer_db - .block_lookup(self.reward_cycle, block_hash) - .map_err(|e| { - warn!("{self}: Failed to load block {block_hash}: {e:?})"); - e - }) - else { + let Ok(Some(mut block_info)) = self.signer_db.block_lookup(block_hash).inspect_err(|e| { + warn!("{self}: Failed to load block {block_hash}: {e:?})"); + }) else { warn!("{self}: No such block {block_hash}"); return; }; // move block to LOCALLY accepted state. - // We only mark this GLOBALLY accepted if we manage to broadcast it... + // It is only considered globally accepted IFF we receive a new block event confirming it OR see the chain tip of the node advance to it. if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold warn!("{self}: Failed to mark block as locally accepted: {e:?}"); @@ -984,22 +1040,8 @@ impl Signer { panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] - { - if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block broadcast is stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block_info.block.block_id(), - "height" => block_info.block.header.chain_length, - ); - } - } + self.test_pause_block_broadcast(&block_info); + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); if self .submitted_block_proposal @@ -1039,76 +1081,14 @@ impl Signer { ); stacks_client.post_block_until_ok(self, &block); - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } } - #[cfg(any(test, feature = "testing"))] - fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - let block_hash = block.header.signer_signature_hash(); - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return true; - } - false - } - - #[cfg(any(test, feature = "testing"))] - fn test_reject_block_proposal( - &mut self, - block_proposal: &BlockProposal, - block_info: &mut BlockInfo, - block_response: Option, - ) -> Option { - let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { - return block_response; - }; - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - }; - // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject - // as invalid since we rejected in a prior round if this crops up again) - // in case this is the first time we saw this block. Safe to do since this is testing case only. - self.signer_db - .insert_block(block_info) - .unwrap_or_else(|e| self.handle_insert_block_error(e)); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - )) - } else { - None - } - } - /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -1123,7 +1103,7 @@ impl Signer { } /// Helper for logging insert_block error - fn handle_insert_block_error(&self, e: DBError) { + pub fn handle_insert_block_error(&self, e: DBError) { error!("{self}: Failed to insert block into signer-db: {e:?}"); panic!("{self} Failed to write block to signerdb: {e}"); } diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs new file mode 100644 index 0000000000..0b9cdcc569 --- /dev/null +++ b/stacks-signer/src/v0/tests.rs @@ -0,0 +1,141 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::LazyLock; + +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use libsigner::v0::messages::{BlockResponse, RejectCode}; +use libsigner::BlockProposal; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::tests::TestFlag; +use stacks_common::{info, warn}; + +use super::signer::Signer; +use crate::signerdb::BlockInfo; + +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: LazyLock>> = + LazyLock::new(TestFlag::default); + +/// A global variable that can be used to pause broadcasting the block to the network +pub static TEST_PAUSE_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +/// A global variable that can be used to skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); + +impl Signer { + /// Skip the block broadcast if the TEST_SKIP_BLOCK_BROADCAST flag is set + pub fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if TEST_SKIP_BLOCK_BROADCAST.get() { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + + /// Reject block proposals if the TEST_REJECT_ALL_BLOCK_PROPOSAL flag is set for the signer's public key + pub fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let public_keys = TEST_REJECT_ALL_BLOCK_PROPOSAL.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), + )) + } else { + block_response + } + } + + /// Pause the block broadcast if the TEST_PAUSE_BLOCK_BROADCAST flag is set + pub fn test_pause_block_broadcast(&self, block_info: &BlockInfo) { + if TEST_PAUSE_BLOCK_BROADCAST.get() { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while TEST_PAUSE_BLOCK_BROADCAST.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("{self}: Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } + } + + /// Ignore block proposals if the TEST_IGNORE_ALL_BLOCK_PROPOSALS flag is set for the signer's public key + pub fn test_ignore_all_block_proposals(&self, block_proposal: &BlockProposal) -> bool { + let public_keys = TEST_IGNORE_ALL_BLOCK_PROPOSALS.get(); + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + return true; + } + false + } +} diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index edd58c6161..cbee2bfc98 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -58,6 +58,7 @@ libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" hashbrown = { workspace = true } rusqlite = { workspace = true } +toml = { workspace = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/stackslib/conf/mainnet-follower-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-follower-conf.toml rename to stackslib/conf/mainnet-follower-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/stackslib/conf/mainnet-miner-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-miner-conf.toml rename to stackslib/conf/mainnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/stackslib/conf/mainnet-mockminer-conf.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-mockminer-conf.toml rename to stackslib/conf/mainnet-mockminer-conf.toml diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/stackslib/conf/mainnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/mainnet-signer.toml rename to stackslib/conf/mainnet-signer.toml diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/stackslib/conf/testnet-follower-conf.toml similarity index 96% rename from testnet/stacks-node/conf/testnet-follower-conf.toml rename to stackslib/conf/testnet-follower-conf.toml index 80226c5b89..c294a628b4 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/stackslib/conf/testnet-follower-conf.toml @@ -78,3 +78,7 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" start_height = 56_457 + +[[burnchain.epochs]] +epoch_name = "3.1" +start_height = 77_770 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/stackslib/conf/testnet-miner-conf.toml similarity index 96% rename from testnet/stacks-node/conf/testnet-miner-conf.toml rename to stackslib/conf/testnet-miner-conf.toml index 93455dcee5..65f8cace68 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/stackslib/conf/testnet-miner-conf.toml @@ -74,3 +74,7 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" start_height = 56_457 + +[[burnchain.epochs]] +epoch_name = "3.1" +start_height = 77_770 diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/stackslib/conf/testnet-signer.toml similarity index 100% rename from testnet/stacks-node/conf/testnet-signer.toml rename to stackslib/conf/testnet-signer.toml diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 83c8903d35..3361301675 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -924,7 +924,7 @@ impl BitcoinIndexer { return Ok(()); } warn!( - "Header at height {} is not wihtin 2 hours of now (is at {})", + "Header at height {} is not within 2 hours of now (is at {})", highest_header_height, highest_header.block_header.header.time ); self.drop_headers(highest_header_height.saturating_sub(1))?; diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 84a45eb278..b688097d70 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -1083,7 +1083,9 @@ impl Burnchain { } /// Hand off the block to the ChainsCoordinator _and_ process the sortition - /// *only* to be used by legacy stacks node interfaces, like the Helium node + /// *only* to be used by legacy stacks node interfaces, like the Helium node. + /// + /// It does not work on mainnet. fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, @@ -1120,6 +1122,7 @@ impl Burnchain { // method is deprecated and only used in defunct helium nodes db.evaluate_sortition( + false, &header, blockstack_txs, burnchain, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0bc68897cb..3e153df53b 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -450,6 +450,7 @@ impl PoxConstants { ) } + // NOTE: this is the *old* pre-Nakamoto testnet pub fn testnet_default() -> PoxConstants { PoxConstants::new( POX_REWARD_CYCLE_LENGTH / 2, // 1050 @@ -468,6 +469,10 @@ impl PoxConstants { ) // total liquid supply is 40000000000000000 µSTX } + pub fn nakamoto_testnet_default() -> PoxConstants { + PoxConstants::new(900, 100, 51, 100, 0, u64::MAX, u64::MAX, 242, 243, 246, 244) + } + // TODO: add tests from mutation testing results #4838 #[cfg_attr(test, mutants::skip)] pub fn regtest_default() -> PoxConstants { diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index b08d7a097e..8d72d4efa9 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -44,7 +44,6 @@ use crate::chainstate::burn::{ BlockSnapshot, ConsensusHash, ConsensusHashExtensions, OpsHash, SortitionHash, }; use crate::chainstate::stacks::address::StacksAddressExtensions; -use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::util_lib::db::Error as db_error; @@ -478,6 +477,7 @@ fn test_process_block_ops() { let (sn121, _) = tx .process_block_ops( + false, &burnchain, &initial_snapshot, &header, @@ -500,6 +500,7 @@ fn test_process_block_ops() { let (sn122, _) = tx .process_block_ops( + false, &burnchain, &block_121_snapshot, &header, @@ -521,6 +522,7 @@ fn test_process_block_ops() { let mut tx = SortitionHandleTx::begin(&mut db, &block_122_snapshot.sortition_id).unwrap(); let (sn123, _) = tx .process_block_ops( + false, &burnchain, &block_122_snapshot, &header, @@ -632,6 +634,7 @@ fn test_process_block_ops() { SortitionHandleTx::begin(&mut db, &block_123_snapshot.sortition_id).unwrap(); let (sn124, _) = tx .process_block_ops( + false, &burnchain, &block_123_snapshot, &header, @@ -873,6 +876,7 @@ fn test_burn_snapshot_sequence() { let mut tx = SortitionHandleTx::begin(&mut db, &prev_snapshot.sortition_id).unwrap(); let (sn, _) = tx .process_block_ops( + false, &burnchain, &prev_snapshot, &header, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index e7fa51a89c..c8543b1142 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -351,10 +351,30 @@ impl TestMinerFactory { impl TestBurnchainBlock { pub fn new(parent_snapshot: &BlockSnapshot, fork_id: u64) -> TestBurnchainBlock { + let burn_header_hash = BurnchainHeaderHash::from_test_data( + parent_snapshot.block_height + 1, + &parent_snapshot.index_root, + fork_id, + ); TestBurnchainBlock { parent_snapshot: parent_snapshot.clone(), block_height: parent_snapshot.block_height + 1, - txs: vec![], + txs: vec![ + // make sure that no block-commit gets vtxindex == 0 unless explicitly structured. + // This prestx mocks a burnchain coinbase + BlockstackOperationType::PreStx(PreStxOp { + output: StacksAddress::burn_address(false), + txid: Txid::from_test_data( + parent_snapshot.block_height + 1, + 0, + &burn_header_hash, + 128, + ), + vtxindex: 0, + block_height: parent_snapshot.block_height + 1, + burn_header_hash, + }), + ], fork_id: fork_id, timestamp: get_epoch_time_secs(), } @@ -397,6 +417,7 @@ impl TestBurnchainBlock { parent_block_snapshot: Option<&BlockSnapshot>, new_seed: Option, epoch_marker: u8, + parent_is_shadow: bool, ) -> LeaderBlockCommitOp { let pubks = miner .privks @@ -435,6 +456,13 @@ impl TestBurnchainBlock { ) .expect("FATAL: failed to read block commit"); + if parent_is_shadow { + assert!( + get_commit_res.is_none(), + "FATAL: shadow parent should not have a block-commit" + ); + } + let input = SortitionDB::get_last_block_commit_by_sender(ic.conn(), &apparent_sender) .unwrap() .map(|commit| (commit.txid.clone(), 1 + (commit.commit_outs.len() as u32))) @@ -454,7 +482,8 @@ impl TestBurnchainBlock { block_hash, self.block_height, &new_seed, - &parent, + parent.block_height as u32, + parent.vtxindex as u16, leader_key.block_height as u32, leader_key.vtxindex as u16, burn_fee, @@ -464,16 +493,42 @@ impl TestBurnchainBlock { txop } None => { - // initial - let txop = LeaderBlockCommitOp::initial( - block_hash, - self.block_height, - &new_seed, - leader_key, - burn_fee, - &input, - &apparent_sender, - ); + let txop = if parent_is_shadow { + test_debug!( + "Block-commit for {} (burn height {}) builds on shadow sortition", + block_hash, + self.block_height + ); + + LeaderBlockCommitOp::new( + block_hash, + self.block_height, + &new_seed, + last_snapshot_with_sortition.block_height as u32, + 0, + leader_key.block_height as u32, + leader_key.vtxindex as u16, + burn_fee, + &input, + &apparent_sender, + ) + } else { + // initial + test_debug!( + "Block-commit for {} (burn height {}) builds on genesis", + block_hash, + self.block_height, + ); + LeaderBlockCommitOp::initial( + block_hash, + self.block_height, + &new_seed, + leader_key, + burn_fee, + &input, + &apparent_sender, + ) + }; txop } }; @@ -517,6 +572,7 @@ impl TestBurnchainBlock { parent_block_snapshot, None, STACKS_EPOCH_2_4_MARKER, + false, ) } @@ -571,6 +627,7 @@ impl TestBurnchainBlock { let new_snapshot = sortition_db_handle .process_block_txs( + false, &parent_snapshot, &header, burnchain, diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 82318bfe37..0aacd2816a 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -112,6 +112,7 @@ impl<'a> SortitionHandleTx<'a> { /// * return the snapshot (and sortition results) fn process_checked_block_ops( &mut self, + mainnet: bool, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, @@ -141,6 +142,7 @@ impl<'a> SortitionHandleTx<'a> { // do the cryptographic sortition and pick the next winning block. let mut snapshot = BlockSnapshot::make_snapshot( + mainnet, self, burnchain, &next_sortition_id, @@ -158,6 +160,11 @@ impl<'a> SortitionHandleTx<'a> { BurnchainError::DBError(e) })?; + let snapshot_epoch = SortitionDB::get_stacks_epoch(self, snapshot.block_height)? + .unwrap_or_else(|| { + panic!("FATAL: no epoch defined for snapshot"); + }); + // was this snapshot the first with mining? // compute the initial block rewards. let initialize_bonus = if snapshot.sortition && parent_snapshot.total_burn == 0 { @@ -166,6 +173,8 @@ impl<'a> SortitionHandleTx<'a> { let mut total_reward = 0; for burn_block_height in burnchain.initial_reward_start_block..snapshot.block_height { total_reward += StacksChainState::get_coinbase_reward( + snapshot_epoch.epoch_id, + mainnet, burn_block_height, self.context.first_block_height, ); @@ -227,6 +236,7 @@ impl<'a> SortitionHandleTx<'a> { /// Returns the BlockSnapshot created from this block. pub fn process_block_ops( &mut self, + mainnet: bool, burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_header: &BurnchainBlockHeader, @@ -279,6 +289,7 @@ impl<'a> SortitionHandleTx<'a> { // process them let res = self .process_checked_block_ops( + mainnet, burnchain, parent_snapshot, block_header, @@ -305,6 +316,7 @@ impl<'a> SortitionHandleTx<'a> { /// list of blockstack transactions. pub fn process_block_txs( &mut self, + mainnet: bool, parent_snapshot: &BlockSnapshot, this_block_header: &BurnchainBlockHeader, burnchain: &Burnchain, @@ -324,6 +336,7 @@ impl<'a> SortitionHandleTx<'a> { ); let new_snapshot = self.process_block_ops( + mainnet, burnchain, &parent_snapshot, &this_block_header, @@ -353,7 +366,6 @@ mod tests { use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegisterOp}; use crate::chainstate::burn::*; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::MICROSTACKS_PER_STACKS; @@ -432,6 +444,7 @@ mod tests { let processed = ic .process_block_ops( + false, &burnchain, &snapshot, &next_block_header, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e84526643c..e399121e07 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3163,6 +3163,7 @@ impl SortitionDB { StacksEpochId::Epoch24 => version_u32 >= 3, StacksEpochId::Epoch25 => version_u32 >= 3, StacksEpochId::Epoch30 => version_u32 >= 3, + StacksEpochId::Epoch31 => version_u32 >= 3, } } @@ -3690,6 +3691,12 @@ impl SortitionDB { .try_into() .ok() } + + /// Get the Stacks block ID for the canonical tip. + pub fn get_canonical_stacks_tip_block_id(&self) -> StacksBlockId { + let (ch, bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.conn()).unwrap(); + StacksBlockId::new(&ch, &bh) + } } impl<'a> SortitionDBTx<'a> { @@ -3980,7 +3987,7 @@ impl<'a> SortitionDBConn<'a> { tip, reward_cycle_id, )?; - info!("Fetching preprocessed reward set"; + debug!("Fetching preprocessed reward set"; "tip_sortition_id" => %tip, "reward_cycle_id" => reward_cycle_id, "prepare_phase_start_sortition_id" => %first_sortition, @@ -4282,6 +4289,7 @@ impl SortitionDB { /// commits its results. This is used to post the calculated reward set to an event observer. pub fn evaluate_sortition) -> ()>( &mut self, + mainnet: bool, burn_header: &BurnchainBlockHeader, ops: Vec, burnchain: &Burnchain, @@ -4359,6 +4367,7 @@ impl SortitionDB { }; let new_snapshot = sortition_db_handle.process_block_txs( + mainnet, &parent_snapshot, burn_header, burnchain, @@ -6595,7 +6604,6 @@ pub mod tests { BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::burn::ConsensusHash; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index ed01ae014b..59c335cd58 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -450,7 +450,6 @@ mod tests { }; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::MINING_COMMITMENT_WINDOW; diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index be92c3088f..4552210f44 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -432,7 +432,6 @@ mod tests { use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::chainstate::burn::db::sortdb::*; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::util_lib::db::Error as db_error; #[test] diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 910315f082..a752131668 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -42,7 +42,7 @@ use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::core::{ StacksEpoch, StacksEpochId, STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER, STACKS_EPOCH_2_2_MARKER, STACKS_EPOCH_2_3_MARKER, STACKS_EPOCH_2_4_MARKER, - STACKS_EPOCH_2_5_MARKER, STACKS_EPOCH_3_0_MARKER, + STACKS_EPOCH_2_5_MARKER, STACKS_EPOCH_3_0_MARKER, STACKS_EPOCH_3_1_MARKER, }; use crate::net::Error as net_error; @@ -136,7 +136,8 @@ impl LeaderBlockCommitOp { block_header_hash: &BlockHeaderHash, block_height: u64, new_seed: &VRFSeed, - parent: &LeaderBlockCommitOp, + parent_block_height: u32, + parent_vtxindex: u16, key_block_ptr: u32, key_vtxindex: u16, burn_fee: u64, @@ -148,8 +149,8 @@ impl LeaderBlockCommitOp { new_seed: new_seed.clone(), key_block_ptr: key_block_ptr, key_vtxindex: key_vtxindex, - parent_block_ptr: parent.block_height as u32, - parent_vtxindex: parent.vtxindex as u16, + parent_block_ptr: parent_block_height, + parent_vtxindex: parent_vtxindex, memo: vec![], burn_fee: burn_fee, input: input.clone(), @@ -696,8 +697,19 @@ impl LeaderBlockCommitOp { // is descendant let directly_descended_from_anchor = epoch_id.block_commits_to_parent() && self.block_header_hash == reward_set_info.anchor_block; - let descended_from_anchor = directly_descended_from_anchor || tx - .descended_from(parent_block_height, &reward_set_info.anchor_block) + + // second, if we're in a nakamoto epoch, and the parent block has vtxindex 0 (i.e. the + // coinbase of the burnchain block), then assume that this block descends from the anchor + // block for the purposes of validating its PoX payouts. The block validation logic will + // check that the parent block is indeed a shadow block, and that `self.parent_block_ptr` + // points to the shadow block's tenure's burnchain block. + let maybe_shadow_parent = epoch_id.supports_shadow_blocks() + && self.parent_block_ptr != 0 + && self.parent_vtxindex == 0; + + let descended_from_anchor = directly_descended_from_anchor + || maybe_shadow_parent + || tx.descended_from(parent_block_height, &reward_set_info.anchor_block) .map_err(|e| { error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", parent_block_height, &reward_set_info.anchor_block, e); @@ -869,6 +881,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), StacksEpochId::Epoch25 => self.check_epoch_commit_marker(STACKS_EPOCH_2_5_MARKER), StacksEpochId::Epoch30 => self.check_epoch_commit_marker(STACKS_EPOCH_3_0_MARKER), + StacksEpochId::Epoch31 => self.check_epoch_commit_marker(STACKS_EPOCH_3_1_MARKER), } } @@ -888,7 +901,8 @@ impl LeaderBlockCommitOp { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height @@ -1031,10 +1045,12 @@ impl LeaderBlockCommitOp { return Err(op_error::BlockCommitNoParent); } else if self.parent_block_ptr != 0 || self.parent_vtxindex != 0 { // not building off of genesis, so the parent block must exist + // unless the parent is a shadow block let has_parent = tx .get_block_commit_parent(parent_block_height, self.parent_vtxindex.into(), &tx_tip)? .is_some(); - if !has_parent { + let maybe_shadow_block = self.parent_vtxindex == 0 && epoch_id.supports_shadow_blocks(); + if !has_parent && !maybe_shadow_block { warn!("Invalid block commit: no parent block in this fork"; "apparent_sender" => %apparent_sender_repr ); @@ -1172,7 +1188,6 @@ mod tests { use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{ConsensusHash, *}; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::chainstate::stacks::StacksPublicKey; use crate::core::{ StacksEpoch, StacksEpochExtension, StacksEpochId, PEER_VERSION_EPOCH_1_0, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 44402adc0c..5608b6739d 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -253,7 +253,6 @@ pub mod tests { }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; use crate::chainstate::stacks::address::StacksAddressExtensions; - use crate::chainstate::stacks::index::TrieHashExtension; use crate::core::StacksEpochId; pub struct OpFixture { diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index b0221f1439..ff71b0cf10 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::{ SortitionHash, }; use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId, TrieHashExtension}; +use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId}; use crate::core::*; use crate::util_lib::db::Error as db_error; @@ -498,6 +498,7 @@ impl BlockSnapshot { /// /// Call this *after* you store all of the block's transactions to the burn db. pub fn make_snapshot( + mainnet: bool, sort_tx: &mut SortitionHandleTx, burnchain: &Burnchain, my_sortition_id: &SortitionId, @@ -518,6 +519,7 @@ impl BlockSnapshot { .epoch_id; Self::make_snapshot_in_epoch( + mainnet, sort_tx, burnchain, my_sortition_id, @@ -531,6 +533,7 @@ impl BlockSnapshot { } pub fn make_snapshot_in_epoch( + mainnet: bool, sort_tx: &mut SortitionHandleTx, burnchain: &Burnchain, my_sortition_id: &SortitionId, @@ -561,6 +564,8 @@ impl BlockSnapshot { initial_mining_bonus_ustx } else { let missed_coinbase = StacksChainState::get_coinbase_reward( + epoch_id, + mainnet, parent_snapshot.block_height, first_block_height, ); @@ -788,6 +793,7 @@ mod test { burnchain_state_transition: &BurnchainStateTransition, ) -> Result { BlockSnapshot::make_snapshot( + false, sort_tx, burnchain, my_sortition_id, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 5b7c7e89b6..209c6b8ef0 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -198,6 +198,9 @@ pub trait BlockEventDispatcher { } pub struct ChainsCoordinatorConfig { + /// true: assume all anchor blocks are present, and block chain sync until they arrive + /// false: process sortitions in reward cycles without anchor blocks + pub assume_present_anchor_blocks: bool, /// true: use affirmation maps before 2.1 /// false: only use affirmation maps in 2.1 or later pub always_use_affirmation_maps: bool, @@ -209,8 +212,17 @@ pub struct ChainsCoordinatorConfig { impl ChainsCoordinatorConfig { pub fn new() -> ChainsCoordinatorConfig { ChainsCoordinatorConfig { - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, + } + } + + pub fn test_new() -> ChainsCoordinatorConfig { + ChainsCoordinatorConfig { + always_use_affirmation_maps: false, + require_affirmed_anchor_blocks: false, + assume_present_anchor_blocks: false, } } } @@ -419,8 +431,8 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { return Ok(RewardSet::empty()); } } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - // Epoch 2.5 and 3.0 compute reward sets, but *only* if PoX-4 is active + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { + // Epoch 2.5, 3.0, and 3.1 compute reward sets, but *only* if PoX-4 is active if burnchain .pox_constants .active_pox_contract(current_burn_height) @@ -700,7 +712,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader notifier: (), atlas_config, atlas_db: Some(atlas_db), - config: ChainsCoordinatorConfig::new(), + config: ChainsCoordinatorConfig::test_new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), in_nakamoto_epoch: false, @@ -2336,6 +2348,20 @@ impl< panic!("BUG: no epoch defined at height {}", header.block_height) }); + if self.config.assume_present_anchor_blocks { + // anchor blocks are always assumed to be present in the chain history, + // so report its absence if we don't have it. + if let PoxAnchorBlockStatus::SelectedAndUnknown(missing_anchor_block, _) = + &rc_info.anchor_status + { + info!( + "Currently missing PoX anchor block {}, which is assumed to be present", + &missing_anchor_block + ); + return Ok(Some(missing_anchor_block.clone())); + } + } + if cur_epoch.epoch_id >= StacksEpochId::Epoch21 || self.config.always_use_affirmation_maps { // potentially have an anchor block, but only process the next reward cycle (and // subsequent reward cycles) with it if the prepare-phase block-commits affirm its @@ -2674,6 +2700,7 @@ impl< let (next_snapshot, _) = self .sortition_db .evaluate_sortition( + self.chain_state_db.mainnet, &header, ops, &self.burnchain, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index cb1966d806..1bb5e44192 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -58,6 +58,9 @@ use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; use crate::util_lib::db::Error as DBError; +#[cfg(any(test, feature = "testing"))] +pub static TEST_COORDINATOR_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); + #[cfg(test)] pub mod tests; @@ -484,7 +487,14 @@ pub fn load_nakamoto_reward_set( let Some(anchor_block_header) = prepare_phase_sortitions .into_iter() .find_map(|sn| { - if !sn.sortition { + let shadow_tenure = match chain_state.nakamoto_blocks_db().is_shadow_tenure(&sn.consensus_hash) { + Ok(x) => x, + Err(e) => { + return Some(Err(e)); + } + }; + + if !sn.sortition && !shadow_tenure { return None } @@ -757,6 +767,21 @@ impl< true } + #[cfg(any(test, feature = "testing"))] + fn fault_injection_pause_nakamoto_block_processing() { + if *TEST_COORDINATOR_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Coordinator is stalled due to testing directive"); + while *TEST_COORDINATOR_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Coordinator is no longer stalled due to testing directive. Continuing..."); + } + } + + #[cfg(not(any(test, feature = "testing")))] + fn fault_injection_pause_nakamoto_block_processing() {} + /// Handle one or more new Nakamoto Stacks blocks. /// If we process a PoX anchor block, then return its block hash. This unblocks processing the /// next reward cycle's burnchain blocks. Subsequent calls to this function will terminate @@ -769,6 +794,8 @@ impl< ); loop { + Self::fault_injection_pause_nakamoto_block_processing(); + // process at most one block per loop pass let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( &mut self.chain_state_db, @@ -1137,6 +1164,7 @@ impl< let (next_snapshot, _) = self .sortition_db .evaluate_sortition( + self.chain_state_db.mainnet, &header, ops, &self.burnchain, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 23bf3313e9..0525717981 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -576,7 +576,7 @@ impl<'a> TestPeer<'a> { coinbase_tx: &StacksTransaction, miner_setup: F, after_block: G, - ) -> NakamotoBlock + ) -> Result where F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, @@ -606,7 +606,7 @@ impl<'a> TestPeer<'a> { coinbase_tx: &StacksTransaction, miner_setup: F, after_block: G, - ) -> NakamotoBlock + ) -> Result where F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, @@ -631,7 +631,7 @@ impl<'a> TestPeer<'a> { sortdb, &sender_key, sender_acct.nonce, - 100, + 200, 1, &recipient_addr, ); @@ -642,10 +642,10 @@ impl<'a> TestPeer<'a> { } }, after_block, - ); + )?; assert_eq!(blocks_and_sizes.len(), 1); let block = blocks_and_sizes.pop().unwrap().0; - block + Ok(block) } pub fn mine_tenure(&mut self, block_builder: F) -> Vec<(NakamotoBlock, u64, ExecutionCost)> @@ -707,15 +707,41 @@ impl<'a> TestPeer<'a> { block_builder, |_| true, ) + .unwrap() } pub fn single_block_tenure( &mut self, sender_key: &StacksPrivateKey, miner_setup: S, - mut after_burn_ops: F, + after_burn_ops: F, after_block: G, ) -> (NakamotoBlock, u64, StacksTransaction, StacksTransaction) + where + S: FnMut(&mut NakamotoBlockBuilder), + F: FnMut(&mut Vec), + G: FnMut(&mut NakamotoBlock) -> bool, + { + self.single_block_tenure_fallible(sender_key, miner_setup, after_burn_ops, after_block) + .unwrap() + } + + /// Produce a single-block tenure, containing a stx-transfer sent from `sender_key`. + /// + /// * `after_burn_ops` is called right after `self.begin_nakamoto_tenure` to modify any burn ops + /// for this tenure + /// + /// * `miner_setup` is called right after the Nakamoto block builder is constructed, but before + /// any txs are mined + /// + /// * `after_block` is called right after the block is assembled, but before it is signed. + pub fn single_block_tenure_fallible( + &mut self, + sender_key: &StacksPrivateKey, + miner_setup: S, + mut after_burn_ops: F, + after_block: G, + ) -> Result<(NakamotoBlock, u64, StacksTransaction, StacksTransaction), ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut(&mut Vec), @@ -770,9 +796,9 @@ impl<'a> TestPeer<'a> { &coinbase_tx, miner_setup, after_block, - ); + )?; - (block, burn_height, tenure_change_tx, coinbase_tx) + Ok((block, burn_height, tenure_change_tx, coinbase_tx)) } } @@ -1422,24 +1448,27 @@ fn pox_treatment() { // set the bitvec to a heterogenous one: either punish or // reward is acceptable, so this block should just process. - let block = peer.mine_single_block_tenure( - &private_key, - &tenure_change_tx, - &coinbase_tx, - |_| {}, - |block| { - // each stacker has 3 entries in the bitvec. - // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - block.header.pox_treatment = BitVec::try_from( - [ - false, false, true, false, false, true, false, false, true, false, false, true, - ] - .as_slice(), - ) - .unwrap(); - true - }, - ); + let block = peer + .mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, + |_| {}, + |block| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + block.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, + true, + ] + .as_slice(), + ) + .unwrap(); + true + }, + ) + .unwrap(); blocks.push(block); // now we need to test punishment! @@ -1510,23 +1539,26 @@ fn pox_treatment() { // set the bitvec to a heterogenous one: either punish or // reward is acceptable, so this block should just process. - let block = peer.mine_single_block_tenure( - &private_key, - &tenure_change_tx, - &coinbase_tx, - |miner| { - // each stacker has 3 entries in the bitvec. - // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - miner.header.pox_treatment = BitVec::try_from( - [ - false, false, true, false, false, true, false, false, true, false, false, true, - ] - .as_slice(), - ) - .unwrap(); - }, - |_block| true, - ); + let block = peer + .mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, + |miner| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + miner.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, + true, + ] + .as_slice(), + ) + .unwrap(); + }, + |_block| true, + ) + .unwrap(); blocks.push(block); let tip = { @@ -3212,7 +3244,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([i as u8; 32]), - vtxindex: 1, + vtxindex: 11, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), })); @@ -3232,7 +3264,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([(i as u8) | 0x80; 32]), - vtxindex: 2, + vtxindex: 12, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), })); @@ -3244,7 +3276,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([(i as u8) | 0x40; 32]), - vtxindex: 3, + vtxindex: 13, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), })); @@ -3263,7 +3295,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([(i as u8) | 0xc0; 32]), - vtxindex: 4, + vtxindex: 14, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), }, diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 0291b1dad2..68cdb2454a 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -28,7 +28,10 @@ use clarity::vm::clarity::TransactionConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker, TrackerData}; use clarity::vm::database::BurnStateDB; use clarity::vm::errors::Error as InterpreterError; -use clarity::vm::types::{QualifiedContractIdentifier, TypeSignature}; +use clarity::vm::types::{ + QualifiedContractIdentifier, StacksAddressExtensions as ClarityStacksAddressExtensions, + TypeSignature, +}; use libstackerdb::StackerDBChunkData; use serde::Deserialize; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; @@ -37,8 +40,9 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::get_epoch_time_ms; -use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use stacks_common::util::vrf::VRFProof; use crate::burnchains::{PrivateKey, PublicKey}; use crate::chainstate::burn::db::sortdb::{ @@ -58,8 +62,8 @@ use crate::chainstate::stacks::db::transactions::{ handle_clarity_runtime_error, ClarityRuntimeTxError, }; use crate::chainstate::stacks::db::{ - ChainstateTx, ClarityTx, MinerRewardInfo, StacksBlockHeaderTypes, StacksChainState, - StacksHeaderInfo, MINER_REWARD_MATURITY, + ChainstateTx, ClarityTx, MinerRewardInfo, StacksAccount, StacksBlockHeaderTypes, + StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, }; use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; use crate::chainstate::stacks::miner::{ @@ -117,7 +121,7 @@ pub struct NakamotoBlockBuilder { /// Total burn this block represents total_burn: u64, /// Matured miner rewards to process, if any. - matured_miner_rewards_opt: Option, + pub(crate) matured_miner_rewards_opt: Option, /// bytes of space consumed so far pub bytes_so_far: u64, /// transactions selected @@ -143,7 +147,7 @@ pub struct MinerTenureInfo<'a> { pub coinbase_height: u64, pub cause: Option, pub active_reward_set: boot::RewardSet, - pub tenure_block_commit: LeaderBlockCommitOp, + pub tenure_block_commit_opt: Option, } impl NakamotoBlockBuilder { @@ -244,7 +248,21 @@ impl NakamotoBlockBuilder { burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { - debug!("Nakamoto miner tenure begin"); + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, false) + } + + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. + pub(crate) fn inner_load_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionHandleConn, + cause: Option, + shadow_block: bool, + ) -> Result, Error> { + debug!("Nakamoto miner tenure begin"; "shadow" => shadow_block, "tenure_change" => ?cause); let Some(tenure_election_sn) = SortitionDB::get_block_snapshot_consensus(&burn_dbconn, &self.header.consensus_hash)? @@ -256,19 +274,25 @@ impl NakamotoBlockBuilder { ); return Err(Error::NoSuchBlockError); }; - let Some(tenure_block_commit) = SortitionDB::get_block_commit( - &burn_dbconn, - &tenure_election_sn.winning_block_txid, - &tenure_election_sn.sortition_id, - )? - else { - warn!("Could not find winning block commit for burn block that elected the miner"; - "consensus_hash" => %self.header.consensus_hash, - "stacks_block_hash" => %self.header.block_hash(), - "stacks_block_id" => %self.header.block_id(), - "winning_txid" => %tenure_election_sn.winning_block_txid - ); - return Err(Error::NoSuchBlockError); + + let tenure_block_commit_opt = if shadow_block { + None + } else { + let Some(tenure_block_commit) = SortitionDB::get_block_commit( + &burn_dbconn, + &tenure_election_sn.winning_block_txid, + &tenure_election_sn.sortition_id, + )? + else { + warn!("Could not find winning block commit for burn block that elected the miner"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), + "winning_txid" => %tenure_election_sn.winning_block_txid + ); + return Err(Error::NoSuchBlockError); + }; + Some(tenure_block_commit) }; let elected_height = tenure_election_sn.block_height; @@ -372,11 +396,11 @@ impl NakamotoBlockBuilder { cause, coinbase_height, active_reward_set, - tenure_block_commit, + tenure_block_commit_opt, }) } - /// Begin/resume mining a tenure's transactions. + /// Begin/resume mining a (normal) tenure's transactions. /// Returns an open ClarityTx for mining the block. /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a /// transaction can't query information about the _current_ block (i.e. information that is not @@ -386,6 +410,12 @@ impl NakamotoBlockBuilder { burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerTenureInfo<'a>, ) -> Result, Error> { + let Some(block_commit) = info.tenure_block_commit_opt.as_ref() else { + return Err(Error::InvalidStacksBlock( + "Block-commit is required; cannot mine a shadow block".into(), + )); + }; + let SetupBlockResult { clarity_tx, matured_miner_rewards_opt, @@ -398,7 +428,6 @@ impl NakamotoBlockBuilder { &burn_dbconn.context.pox_constants, info.parent_consensus_hash, info.parent_header_hash, - info.parent_stacks_block_height, info.parent_burn_block_height, info.burn_tip, info.burn_tip_height, @@ -406,7 +435,7 @@ impl NakamotoBlockBuilder { info.coinbase_height, info.cause == Some(TenureChangeCause::Extended), &self.header.pox_treatment, - &info.tenure_block_commit, + block_commit, &info.active_reward_set, )?; self.matured_miner_rewards_opt = matured_miner_rewards_opt; @@ -620,7 +649,7 @@ impl NakamotoBlockBuilder { "parent_block_id" => %block.header.parent_block_id, "block_size" => size, "execution_consumed" => %consumed, - "%-full" => block_limit.proportion_largest_dimension(&consumed), + "percent_full" => block_limit.proportion_largest_dimension(&consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e5ce7b0637..929d8dfe90 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -73,7 +73,8 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, TransactionPayload, + TenureChangeError, TenureChangePayload, TokenTransferMemo, TransactionPayload, + TransactionVersion, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -108,8 +109,7 @@ use crate::core::{ }; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; -use crate::util_lib::boot; -use crate::util_lib::boot::boot_code_id; +use crate::util_lib::boot::{self, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, @@ -119,6 +119,7 @@ use crate::{chainstate, monitoring}; pub mod coordinator; pub mod keys; pub mod miner; +pub mod shadow; pub mod signer_set; pub mod staging_blocks; pub mod tenure; @@ -840,6 +841,12 @@ impl NakamotoBlockHeader { )); }; + // if this is a shadow block, then its signing weight is as if every signer signed it, even + // though the signature vector is undefined. + if self.is_shadow_block() { + return Ok(self.get_shadow_signer_weight(reward_set)?); + } + let mut total_weight_signed: u32 = 0; // `last_index` is used to prevent out-of-order signatures let mut last_index = None; @@ -1400,6 +1407,7 @@ impl NakamotoBlock { "consensus_hash" => %self.header.consensus_hash, "stacks_block_hash" => %self.header.block_hash(), "stacks_block_id" => %self.block_id(), + "parent_block_id" => %self.header.parent_block_id, "commit_seed" => %block_commit.new_seed, "proof_seed" => %VRFSeed::from_proof(&parent_vrf_proof), "parent_vrf_proof" => %parent_vrf_proof.to_hex(), @@ -1433,10 +1441,15 @@ impl NakamotoBlock { } /// Verify the miner signature over this block. + /// If this is a shadow block, then this is always Ok(()) pub(crate) fn check_miner_signature( &self, miner_pubkey_hash160: &Hash160, ) -> Result<(), ChainstateError> { + if self.is_shadow_block() { + return Ok(()); + } + let recovered_miner_hash160 = self.recover_miner_pubkh()?; if &recovered_miner_hash160 != miner_pubkey_hash160 { warn!( @@ -1501,11 +1514,13 @@ impl NakamotoBlock { /// Verify that if this block has a coinbase, that its VRF proof is consistent with the leader /// public key's VRF key. If there is no coinbase tx, then this is a no-op. - pub(crate) fn check_coinbase_tx( + fn check_normal_coinbase_tx( &self, leader_vrf_key: &VRFPublicKey, sortition_hash: &SortitionHash, ) -> Result<(), ChainstateError> { + assert!(!self.is_shadow_block()); + // If this block has a coinbase, then verify that its VRF proof was generated by this // block's miner. We'll verify that the seed of this block-commit was generated from the // parnet tenure's VRF proof via the `validate_vrf_seed()` method, which requires that we @@ -1514,11 +1529,12 @@ impl NakamotoBlock { let (_, _, vrf_proof_opt) = coinbase_tx .try_as_coinbase() .expect("FATAL: `get_coinbase_tx()` did not return a coinbase"); + let vrf_proof = vrf_proof_opt.ok_or(ChainstateError::InvalidStacksBlock( "Nakamoto coinbase must have a VRF proof".into(), ))?; - // this block's VRF proof must have ben generated from the last sortition's sortition + // this block's VRF proof must have been generated from the last sortition's sortition // hash (which includes the last commit's VRF seed) let valid = match VRF::verify(leader_vrf_key, vrf_proof, sortition_hash.as_bytes()) { Ok(v) => v, @@ -1548,27 +1564,15 @@ impl NakamotoBlock { Ok(()) } - /// Validate this Nakamoto block header against burnchain state. - /// Used to determine whether or not we'll keep a block around (even if we don't yet have its parent). + /// Verify properties of blocks against the burnchain that are common to both normal and shadow + /// blocks. /// - /// Arguments - /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's - /// tenure. It is not always the tip of the burnchain. - /// -- `expected_burn` is the total number of burnchain tokens spent, if known. - /// -- `leader_key` is the miner's leader key registration transaction - /// - /// Verifies the following: /// -- (self.header.consensus_hash) that this block falls into this block-commit's tenure /// -- (self.header.burn_spent) that this block's burn total matches `burn_tip`'s total burn - /// -- (self.header.miner_signature) that this miner signed this block - /// -- if this block has a tenure change, then it's consistent with the miner's public key and - /// self.header.consensus_hash - /// -- if this block has a coinbase, then that it's VRF proof was generated by this miner - pub fn validate_against_burnchain( + fn common_validate_against_burnchain( &self, tenure_burn_chain_tip: &BlockSnapshot, expected_burn: Option, - leader_key: &LeaderKeyRegisterOp, ) -> Result<(), ChainstateError> { // this block's consensus hash must match the sortition that selected it if tenure_burn_chain_tip.consensus_hash != self.header.consensus_hash { @@ -1599,24 +1603,37 @@ impl NakamotoBlock { } } - // miner must have signed this block - let miner_pubkey_hash160 = leader_key - .interpret_nakamoto_signing_key() - .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { - warn!( - "Leader key did not contain a hash160 of the miner signing public key"; - "leader_key" => ?leader_key, - ); - e - })?; + Ok(()) + } - self.check_miner_signature(&miner_pubkey_hash160)?; + /// Validate this Nakamoto block header against burnchain state. + /// Used to determine whether or not we'll keep a block around (even if we don't yet have its parent). + /// + /// Arguments + /// -- `mainnet`: whether or not the chain is mainnet + /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's + /// tenure. It is not always the tip of the burnchain. + /// -- `expected_burn` is the total number of burnchain tokens spent, if known. + /// -- `leader_key` is the miner's leader key registration transaction + /// + /// Verifies the following: + /// -- (self.header.consensus_hash) that this block falls into this block-commit's tenure + /// -- (self.header.burn_spent) that this block's burn total matches `burn_tip`'s total burn + /// -- (self.header.miner_signature) that this miner signed this block + /// -- if this block has a tenure change, then it's consistent with the miner's public key and + /// self.header.consensus_hash + /// -- if this block has a coinbase, then that it's VRF proof was generated by this miner + fn validate_normal_against_burnchain( + &self, + tenure_burn_chain_tip: &BlockSnapshot, + expected_burn: Option, + miner_pubkey_hash160: &Hash160, + vrf_public_key: &VRFPublicKey, + ) -> Result<(), ChainstateError> { + self.common_validate_against_burnchain(tenure_burn_chain_tip, expected_burn)?; + self.check_miner_signature(miner_pubkey_hash160)?; self.check_tenure_tx()?; - self.check_coinbase_tx( - &leader_key.public_key, - &tenure_burn_chain_tip.sortition_hash, - )?; + self.check_normal_coinbase_tx(vrf_public_key, &tenure_burn_chain_tip.sortition_hash)?; // not verified by this method: // * chain_length (need parent block header) @@ -1771,6 +1788,73 @@ impl NakamotoChainState { } } + /// Get the current burnchain view + /// This is either: + /// (1) set by the tenure change tx if one exists + /// (2) the same as parent block id + pub fn get_block_burn_view( + sort_db: &SortitionDB, + next_ready_block: &NakamotoBlock, + parent_header_info: &StacksHeaderInfo, + ) -> Result { + let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } + tenure_change.burn_view_consensus_hash + } else { + parent_header_info.burn_view.clone().ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? + }; + Ok(burnchain_view) + } + /// Process the next ready block. /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the /// receipt. Otherwise, it returns Ok(None). @@ -1801,23 +1885,36 @@ impl NakamotoChainState { let block_id = next_ready_block.block_id(); // find corresponding snapshot - let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( + let Some(next_ready_block_snapshot) = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &next_ready_block.header.consensus_hash, )? - .unwrap_or_else(|| { + else { + // might not have snapshot yet, even if the block is burn-attachable, because it could + // be a shadow block + if next_ready_block.is_shadow_block() { + test_debug!( + "Stop processing Nakamoto blocks at shadow block {}", + &next_ready_block.block_id() + ); + return Ok(None); + } + + // but this isn't allowed for non-shadow blocks, which must be marked burn-attachable + // separately panic!( "CORRUPTION: staging Nakamoto block {}/{} does not correspond to a burn block", &next_ready_block.header.consensus_hash, &next_ready_block.header.block_hash() - ) - }); + ); + }; debug!("Process staging Nakamoto block"; "consensus_hash" => %next_ready_block.header.consensus_hash, "stacks_block_hash" => %next_ready_block.header.block_hash(), "stacks_block_id" => %next_ready_block.header.block_id(), - "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash + "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash, + "parent_block_id" => %next_ready_block.header.parent_block_id, ); let elected_height = sort_db @@ -1890,62 +1987,8 @@ impl NakamotoChainState { // this is either: // (1) set by the tenure change tx if one exists // (2) the same as parent block id - - let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { - if let Some(ref parent_burn_view) = parent_header_info.burn_view { - // check that the tenure_change's burn view descends from the parent - let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - parent_burn_view, - )? - .ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: could not find parent block's burnchain view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })?; - let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; - let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? - .ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: could not find parent block's burnchain view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })?; - if connected_sort_id != parent_burn_view_sn.sortition_id { - warn!( - "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - return Err(ChainstateError::InvalidStacksBlock( - "Does not connect to burn view of parent block ID".into(), - )); - } - } - tenure_change.burn_view_consensus_hash - } else { - parent_header_info.burn_view.clone().ok_or_else(|| { - warn!( - "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "stacks_block_hash" => %next_ready_block.header.block_hash(), - "stacks_block_id" => %next_ready_block.header.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id - ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) - })? - }; + let burnchain_view = + Self::get_block_burn_view(sort_db, &next_ready_block, &parent_header_info)?; let Some(burnchain_view_sn) = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? else { @@ -1972,7 +2015,7 @@ impl NakamotoChainState { )); }; - let (commit_burn, sortition_burn) = if new_tenure { + let (commit_burn, sortition_burn) = if new_tenure && !next_ready_block.is_shadow_block() { // find block-commit to get commit-burn let block_commit = SortitionDB::get_block_commit( sort_db.conn(), @@ -1985,6 +2028,7 @@ impl NakamotoChainState { SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; (block_commit.burn_fee, sort_burn) } else { + // non-tenure-change blocks and shadow blocks both have zero additional spends (0, 0) }; @@ -2049,7 +2093,8 @@ impl NakamotoChainState { return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (mut receipt, clarity_commit, reward_set_data, phantom_unlock_events) = + ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), @@ -2103,6 +2148,20 @@ impl NakamotoChainState { &receipt.header.anchored_header.block_hash() ); + let tx_receipts = &mut receipt.tx_receipts; + if let Some(unlock_receipt) = + // For the event dispatcher, attach any STXMintEvents that + // could not be included in the block (e.g. because the + // block didn't have a Coinbase transaction). + Self::generate_phantom_unlock_tx( + phantom_unlock_events, + &stacks_chain_state.config(), + next_ready_block.header.chain_length, + ) + { + tx_receipts.push(unlock_receipt); + } + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2113,7 +2172,7 @@ impl NakamotoChainState { dispatcher.announce_block( &block_event, &receipt.header.clone(), - &receipt.tx_receipts, + &tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, &receipt.matured_rewards, @@ -2171,21 +2230,17 @@ impl NakamotoChainState { Ok(Some(burn_view_sn.total_burn)) } - /// Validate that a Nakamoto block attaches to the burn chain state. - /// Called before inserting the block into the staging DB. - /// Wraps `NakamotoBlock::validate_against_burnchain()`, and - /// verifies that all transactions in the block are allowed in this epoch. - pub fn validate_nakamoto_block_burnchain( + /// Verify that the given Nakamoto block attaches to the canonical burnchain fork. + /// Return Ok(snapshot) on success, where `snapshot` is the sortition corresponding to this + /// block's tenure. + /// Return Err(..) otherwise + fn validate_nakamoto_tenure_snapshot( db_handle: &SortitionHandleConn, - expected_burn: Option, block: &NakamotoBlock, - mainnet: bool, - chain_id: u32, - ) -> Result<(), ChainstateError> { + ) -> Result { // find the sortition-winning block commit for this block, as well as the block snapshot // containing the parent block-commit. This is the snapshot that corresponds to when the // miner begain its tenure; it may not be the burnchain tip. - let block_hash = block.header.block_hash(); let consensus_hash = &block.header.consensus_hash; let sort_tip = SortitionDB::get_canonical_burn_chain_tip(db_handle)?; @@ -2194,7 +2249,7 @@ impl NakamotoChainState { let Some(tenure_burn_chain_tip) = SortitionDB::get_block_snapshot_consensus(db_handle, consensus_hash)? else { - warn!("No sortition for {}", &consensus_hash); + warn!("No sortition for {}", consensus_hash); return Err(ChainstateError::InvalidStacksBlock( "No sortition for block's consensus hash".into(), )); @@ -2221,7 +2276,58 @@ impl NakamotoChainState { )); }; - // the block-commit itself + Ok(tenure_burn_chain_tip) + } + + /// Statically validate the block's transactions against the burnchain epoch. + /// Return Ok(()) if they pass all static checks + /// Return Err(..) if not. + fn validate_nakamoto_block_transactions_static( + mainnet: bool, + chain_id: u32, + sortdb_conn: &Connection, + block: &NakamotoBlock, + block_tenure_burn_height: u64, + ) -> Result<(), ChainstateError> { + // check the _next_ block's tenure, since when Nakamoto's miner activates, the current chain tip + // will be in epoch 2.5 (the next block will be epoch 3.0) + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb_conn, block_tenure_burn_height + 1)? + .expect("FATAL: no epoch defined for current Stacks block"); + + // static checks on transactions all pass + let valid = block.validate_transactions_static(mainnet, chain_id, cur_epoch.epoch_id); + if !valid { + warn!( + "Invalid Nakamoto block, transactions failed static checks: {}/{} (epoch {})", + &block.header.consensus_hash, + &block.header.block_hash(), + cur_epoch.epoch_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: failed static transaction checks".into(), + )); + } + + Ok(()) + } + + /// Validate that a normal Nakamoto block attaches to the burn chain state. + /// Called before inserting the block into the staging DB. + /// Wraps `NakamotoBlock::validate_against_burnchain()`, and + /// verifies that all transactions in the block are allowed in this epoch. + pub(crate) fn validate_normal_nakamoto_block_burnchain( + staging_db: NakamotoStagingBlocksConnRef, + db_handle: &SortitionHandleConn, + expected_burn: Option, + block: &NakamotoBlock, + mainnet: bool, + chain_id: u32, + ) -> Result<(), ChainstateError> { + assert!(!block.is_shadow_block()); + + let tenure_burn_chain_tip = Self::validate_nakamoto_tenure_snapshot(db_handle, block)?; + + // block-commit of this sortition let Some(block_commit) = db_handle.get_block_commit_by_txid( &tenure_burn_chain_tip.sortition_id, &tenure_burn_chain_tip.winning_block_txid, @@ -2229,13 +2335,20 @@ impl NakamotoChainState { else { warn!( "No block commit for {} in sortition for {}", - &tenure_burn_chain_tip.winning_block_txid, &consensus_hash + &tenure_burn_chain_tip.winning_block_txid, &block.header.consensus_hash ); return Err(ChainstateError::InvalidStacksBlock( "No block-commit in sortition for block's consensus hash".into(), )); }; + // if the *parent* of this block is a shadow block, then the block-commit's + // parent_vtxindex *MUST* be 0 and the parent_block_ptr *MUST* be the tenure of the + // shadow block. + // + // if the parent is not a shadow block, then this is a no-op. + Self::validate_shadow_parent_burnchain(staging_db, db_handle, block, &block_commit)?; + // key register of the winning miner let leader_key = db_handle .get_leader_key_at( @@ -2244,40 +2357,42 @@ impl NakamotoChainState { )? .expect("FATAL: have block commit but no leader key"); + // miner key hash160. + let miner_pubkey_hash160 = leader_key + .interpret_nakamoto_signing_key() + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!( + "Leader key did not contain a hash160 of the miner signing public key"; + "leader_key" => ?leader_key, + ); + e + })?; + // attaches to burn chain - if let Err(e) = - block.validate_against_burnchain(&tenure_burn_chain_tip, expected_burn, &leader_key) - { + if let Err(e) = block.validate_normal_against_burnchain( + &tenure_burn_chain_tip, + expected_burn, + &miner_pubkey_hash160, + &leader_key.public_key, + ) { warn!( "Invalid Nakamoto block, could not validate on burnchain"; - "consensus_hash" => %consensus_hash, - "stacks_block_hash" => %block_hash, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), "error" => ?e ); return Err(e); } - // check the _next_ block's tenure, since when Nakamoto's miner activates, the current chain tip - // will be in epoch 2.5 (the next block will be epoch 3.0) - let cur_epoch = SortitionDB::get_stacks_epoch( + Self::validate_nakamoto_block_transactions_static( + mainnet, + chain_id, db_handle.deref(), - tenure_burn_chain_tip.block_height + 1, - )? - .expect("FATAL: no epoch defined for current Stacks block"); - - // static checks on transactions all pass - let valid = block.validate_transactions_static(mainnet, chain_id, cur_epoch.epoch_id); - if !valid { - warn!( - "Invalid Nakamoto block, transactions failed static checks: {}/{} (epoch {})", - consensus_hash, block_hash, cur_epoch.epoch_id - ); - return Err(ChainstateError::InvalidStacksBlock( - "Invalid Nakamoto block: failed static transaction checks".into(), - )); - } - + block, + tenure_burn_chain_tip.block_height, + )?; Ok(()) } @@ -2365,7 +2480,7 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - reward_set: RewardSet, + reward_set: &RewardSet, obtain_method: NakamotoBlockObtainMethod, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -2397,9 +2512,31 @@ impl NakamotoChainState { // checked on `::append_block()` let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, block)?; + if block.is_shadow_block() { + // this block is already present in the staging DB, so just perform some prefunctory + // validation (since they're constructed a priori to be valid) + if let Err(e) = Self::validate_shadow_nakamoto_block_burnchain( + staging_db_tx.conn(), + db_handle, + expected_burn_opt, + block, + config.mainnet, + config.chain_id, + ) { + error!("Unacceptable shadow Nakamoto block"; + "stacks_block_id" => %block.block_id(), + "error" => ?e + ); + panic!("Unacceptable shadow Nakamoto block"); + } + + return Ok(false); + } + // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. - if let Err(e) = Self::validate_nakamoto_block_burnchain( + if let Err(e) = Self::validate_normal_nakamoto_block_burnchain( + staging_db_tx.conn(), db_handle, expected_burn_opt, block, @@ -2413,7 +2550,7 @@ impl NakamotoChainState { return Ok(false); }; - let signing_weight = match block.header.verify_signer_signatures(&reward_set) { + let signing_weight = match block.header.verify_signer_signatures(reward_set) { Ok(x) => x, Err(e) => { warn!("Received block, but the signer signatures are invalid"; @@ -2511,6 +2648,24 @@ impl NakamotoChainState { Ok(None) } + /// Load the block version of a Nakamoto blocok + pub fn get_nakamoto_block_version( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT version FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let args = rusqlite::params![index_block_hash]; + let mut stmt = chainstate_conn.prepare(sql)?; + let result = stmt + .query_row(args, |row| { + let version: u8 = row.get(0)?; + Ok(version) + }) + .optional()?; + + Ok(result) + } + /// Load the parent block ID of a Nakamoto block pub fn get_nakamoto_parent_block_id( chainstate_conn: &Connection, @@ -2782,6 +2937,12 @@ impl NakamotoChainState { consensus_hash: &ConsensusHash, block_commit_txid: &Txid, ) -> Result { + // is the tip a shadow block (and necessarily a Nakamoto block)? + if let Some(shadow_vrf_proof) = Self::get_shadow_vrf_proof(chainstate_conn, tip_block_id)? { + return Ok(shadow_vrf_proof); + } + + // parent tenure is a normal tenure let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, consensus_hash)?.ok_or( ChainstateError::InvalidStacksBlock("No sortition for consensus hash".into()), )?; @@ -2803,7 +2964,10 @@ impl NakamotoChainState { let parent_vrf_proof = Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &parent_sn.consensus_hash)? - .ok_or(ChainstateError::NoSuchBlockError) + .ok_or_else(|| { + warn!("No VRF proof for {}", &parent_sn.consensus_hash); + ChainstateError::NoSuchBlockError + }) .map_err(|e| { warn!("Could not find parent VRF proof"; "tip_block_id" => %tip_block_id, @@ -2923,6 +3087,11 @@ impl NakamotoChainState { sortdb_conn: &Connection, block: &NakamotoBlock, ) -> Result<(), ChainstateError> { + if block.is_shadow_block() { + // no-op + return Ok(()); + } + // get the block-commit for this block let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? @@ -3526,6 +3695,143 @@ impl NakamotoChainState { )) } + /// Begin block-processing for a normal block and return all of the pre-processed state within a + /// `SetupBlockResult`. Used by the Nakamoto miner, and called by Self::setup_normal_block() + pub fn setup_block<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + burn_header_hash: BurnchainHeaderHash, + burn_header_height: u32, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + block_bitvec: &BitVec<4000>, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, + ) -> Result, ChainstateError> { + // this block's bitvec header must match the miner's block commit punishments + Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; + Self::inner_setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash, + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + ) + } + + /// Begin block-processing for a normal block and return all of the pre-processed state within a + /// `SetupBlockResult`. + /// + /// Called as part of block processing + fn setup_normal_block_processing<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_chain_tip: &StacksHeaderInfo, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + tenure_block_snapshot: BlockSnapshot, + block: &NakamotoBlock, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + block_bitvec: &BitVec<4000>, + active_reward_set: &RewardSet, + ) -> Result, ChainstateError> { + let burn_header_hash = tenure_block_snapshot.burn_header_hash.clone(); + let burn_header_height = + u32::try_from(tenure_block_snapshot.block_height).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Could not downcast burn block height to u32".into(), + ) + })?; + let tenure_block_commit = SortitionDB::get_block_commit( + sortition_dbconn.sqlite_conn(), + &tenure_block_snapshot.winning_block_txid, + &tenure_block_snapshot.sortition_id, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: has no block-commit in its sortition"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "sortition_id" => %tenure_block_snapshot.sortition_id, + "block_commit_txid" => %tenure_block_snapshot.winning_block_txid + ); + ChainstateError::NoSuchBlockError + })?; + + // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start + // block. + // (note that we can't check this earlier, since we need the parent tenure to have been + // processed) + if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { + let parent_block_id = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); + let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header( + chainstate_tx.as_tx(), + &parent_block_id, + &parent_consensus_hash, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: no start-tenure block for parent"; + "parent_consensus_hash" => %parent_consensus_hash, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id()); + ChainstateError::NoSuchBlockError + })?; + + if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() + { + warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; + "parent_consensus_hash" => %parent_consensus_hash, + "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), + "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id(), + "parent_tip" => %parent_block_id, + ); + test_debug!("Faulty commit: {:?}", &tenure_block_commit); + + return Err(ChainstateError::NoSuchBlockError); + } + } + Self::setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash, + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + block_bitvec, + &tenure_block_commit, + active_reward_set, + ) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// @@ -3550,15 +3856,12 @@ impl NakamotoChainState { /// * coinbase_height: the number of tenures that this block confirms (including epoch2 blocks) /// (this is equivalent to the number of coinbases) /// * tenure_extend: whether or not to reset the tenure's ongoing execution cost - /// * block_bitvec: the bitvec that will control PoX reward handling for this block - /// * tenure_block_commit: the block commit that elected this miner - /// * active_reward_set: the reward and signer set active during `tenure_block_commit` /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, /// miner rewards tuples, the stacks epoch id, and a boolean that /// represents whether the epoch transition has been applied. - pub fn setup_block<'a, 'b>( + fn inner_setup_block<'a, 'b>( chainstate_tx: &'b mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, sortition_dbconn: &'b dyn SortitionDBRef, @@ -3566,19 +3869,13 @@ impl NakamotoChainState { pox_constants: &PoxConstants, parent_consensus_hash: ConsensusHash, parent_header_hash: BlockHeaderHash, - _parent_stacks_height: u64, parent_burn_height: u32, burn_header_hash: BurnchainHeaderHash, burn_header_height: u32, new_tenure: bool, coinbase_height: u64, tenure_extend: bool, - block_bitvec: &BitVec<4000>, - tenure_block_commit: &LeaderBlockCommitOp, - active_reward_set: &RewardSet, ) -> Result, ChainstateError> { - Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; - let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_sortition_id = sortition_dbconn .get_sortition_id_from_consensus_hash(&parent_consensus_hash) @@ -3614,7 +3911,7 @@ impl NakamotoChainState { // Nakamoto must load block cost from parent if this block isn't a tenure change. // If this is a tenure-extend, then the execution cost is reset. let initial_cost = if new_tenure || tenure_extend { - ExecutionCost::zero() + ExecutionCost::ZERO } else { let parent_cost_total = Self::get_total_tenure_cost_at(chainstate_tx.as_tx(), &parent_index_hash)? @@ -3814,79 +4111,84 @@ impl NakamotoChainState { Ok(lockup_events) } + /// Verify that the PoX bitvector from the block header is consistent with the block-commit's + /// PoX outputs, as determined by the active reward set and whether or not the 0's in the + /// bitvector correspond to signers' PoX outputs. fn check_pox_bitvector( block_bitvec: &BitVec<4000>, tenure_block_commit: &LeaderBlockCommitOp, active_reward_set: &RewardSet, ) -> Result<(), ChainstateError> { - if !tenure_block_commit.treatment.is_empty() { - let address_to_indeces: HashMap<_, Vec<_>> = active_reward_set - .rewarded_addresses + if tenure_block_commit.treatment.is_empty() { + return Ok(()); + } + + let address_to_indeces: HashMap<_, Vec<_>> = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .fold(HashMap::new(), |mut map, (ix, addr)| { + map.entry(addr).or_insert_with(Vec::new).push(ix); + map + }); + + // our block commit issued a punishment, check the reward set and bitvector + // to ensure that this was valid. + for treated_addr in tenure_block_commit.treatment.iter() { + if treated_addr.is_burn() { + // Don't need to assert anything about burn addresses. + // If they were in the reward set, "punishing" them is meaningless. + continue; + } + // otherwise, we need to find the indices in the rewarded_addresses + // corresponding to this address. + let empty_vec = vec![]; + let address_indices = address_to_indeces + .get(treated_addr.deref()) + .unwrap_or(&empty_vec); + + // if any of them are 0, punishment is okay. + // if all of them are 1, punishment is not okay. + // if all of them are 0, *must* have punished + let bitvec_values: Result, ChainstateError> = address_indices .iter() - .enumerate() - .fold(HashMap::new(), |mut map, (ix, addr)| { - map.entry(addr).or_insert_with(Vec::new).push(ix); - map - }); - - // our block commit issued a punishment, check the reward set and bitvector - // to ensure that this was valid. - for treated_addr in tenure_block_commit.treatment.iter() { - if treated_addr.is_burn() { - // Don't need to assert anything about burn addresses. - // If they were in the reward set, "punishing" them is meaningless. - continue; - } - // otherwise, we need to find the indices in the rewarded_addresses - // corresponding to this address. - let empty_vec = vec![]; - let address_indices = address_to_indeces - .get(treated_addr.deref()) - .unwrap_or(&empty_vec); - - // if any of them are 0, punishment is okay. - // if all of them are 1, punishment is not okay. - // if all of them are 0, *must* have punished - let bitvec_values: Result, ChainstateError> = address_indices - .iter() - .map( - |ix| { - let ix = u16::try_from(*ix) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; - let bitvec_value = block_bitvec.get(ix) - .unwrap_or_else(|| { - warn!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); - true - }); - Ok(bitvec_value) - } - ) - .collect(); - let bitvec_values = bitvec_values?; - let all_1 = bitvec_values.iter().all(|x| *x); - let all_0 = bitvec_values.iter().all(|x| !x); - if all_1 { - if treated_addr.is_punish() { - warn!( - "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; - "reward_address" => %treated_addr.deref(), - "bitvec_values" => ?bitvec_values, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Bitvec does not match the block commit's PoX handling".into(), - )); - } - } else if all_0 { - if treated_addr.is_reward() { - warn!( - "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; - "reward_address" => %treated_addr.deref(), - "bitvec_values" => ?bitvec_values, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Bitvec does not match the block commit's PoX handling".into(), - )); + .map( + |ix| { + let ix = u16::try_from(*ix) + .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; + let bitvec_value = block_bitvec.get(ix) + .unwrap_or_else(|| { + warn!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + true + }); + Ok(bitvec_value) } + ) + .collect(); + let bitvec_values = bitvec_values?; + let all_1 = bitvec_values.iter().all(|x| *x); + let all_0 = bitvec_values.iter().all(|x| !x); + if all_1 { + if treated_addr.is_punish() { + warn!( + "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } else if all_0 { + if treated_addr.is_reward() { + warn!( + "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); } } } @@ -3906,11 +4208,13 @@ impl NakamotoChainState { applied_epoch_transition: bool, signers_updated: bool, coinbase_height: u64, + phantom_lockup_events: Vec, ) -> Result< ( StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -3936,7 +4240,7 @@ impl NakamotoChainState { tx_receipts, matured_rewards, matured_rewards_info: matured_rewards_info_opt, - parent_microblocks_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, anchored_block_cost: block_execution_cost, parent_burn_block_hash, parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal @@ -3947,7 +4251,7 @@ impl NakamotoChainState { coinbase_height, }; - return Ok((epoch_receipt, clarity_commit, None)); + return Ok((epoch_receipt, clarity_commit, None, phantom_lockup_events)); } /// Append a Nakamoto Stacks block to the Stacks chain state. @@ -3973,6 +4277,7 @@ impl NakamotoChainState { StacksEpochReceipt, PreCommitClarityBlock<'a>, Option, + Vec, ), ChainstateError, > { @@ -4013,8 +4318,6 @@ impl NakamotoChainState { // It must exist in the same Bitcoin fork as our `burn_dbconn`. let tenure_block_snapshot = Self::check_sortition_exists(burn_dbconn, &block.header.consensus_hash)?; - let burn_header_hash = tenure_block_snapshot.burn_header_hash.clone(); - let burn_header_height = tenure_block_snapshot.block_height; let block_hash = block.header.block_hash(); let new_tenure = block.is_wellformed_tenure_start_block().map_err(|_| { @@ -4091,54 +4394,6 @@ impl NakamotoChainState { )); } - // this block's bitvec header must match the miner's block commit punishments - let tenure_block_commit = SortitionDB::get_block_commit( - burn_dbconn.conn(), - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; - "consensus_hash" => %block.header.consensus_hash, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id(), - "sortition_id" => %tenure_block_snapshot.sortition_id, - "block_commit_txid" => %tenure_block_snapshot.winning_block_txid - ); - ChainstateError::NoSuchBlockError - })?; - - // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start - // block. - // (note that we can't check this earlier, since we need the parent tenure to have been - // processed) - if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header( - chainstate_tx.as_tx(), - &parent_block_id, - &parent_ch, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: no start-tenure block for parent"; - "parent_consensus_hash" => %parent_ch, - "consensus_hash" => %block.header.consensus_hash, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id()); - ChainstateError::NoSuchBlockError - })?; - - if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() - { - warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; - "parent_consensus_hash" => %parent_ch, - "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), - "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id() - ); - - return Err(ChainstateError::NoSuchBlockError); - } - } - // verify VRF proof, if present // only need to do this once per tenure // get the resulting vrf proof bytes @@ -4198,27 +4453,43 @@ impl NakamotoChainState { mut auto_unlock_events, signer_set_calc, burn_vote_for_aggregate_key_ops, - } = Self::setup_block( - chainstate_tx, - clarity_instance, - burn_dbconn, - first_block_height, - pox_constants, - parent_ch, - parent_block_hash, - parent_chain_tip.stacks_block_height, - parent_chain_tip.burn_header_height, - burn_header_hash, - burn_header_height.try_into().map_err(|_| { - ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) - })?, - new_tenure, - coinbase_height, - tenure_extend, - &block.header.pox_treatment, - &tenure_block_commit, - active_reward_set, - )?; + } = if block.is_shadow_block() { + // shadow block + Self::setup_shadow_block_processing( + chainstate_tx, + clarity_instance, + burn_dbconn, + first_block_height, + pox_constants, + parent_ch, + parent_block_hash, + parent_chain_tip.burn_header_height, + tenure_block_snapshot, + new_tenure, + coinbase_height, + tenure_extend, + )? + } else { + // normal block + Self::setup_normal_block_processing( + chainstate_tx, + clarity_instance, + burn_dbconn, + first_block_height, + pox_constants, + &parent_chain_tip, + parent_ch, + parent_block_hash, + parent_chain_tip.burn_header_height, + tenure_block_snapshot, + block, + new_tenure, + coinbase_height, + tenure_extend, + &block.header.pox_treatment, + active_reward_set, + )? + }; let starting_cost = clarity_tx.cost_so_far(); @@ -4274,18 +4545,20 @@ impl NakamotoChainState { Ok(lockup_events) => lockup_events, }; - // if any, append lockups events to the coinbase receipt - if lockup_events.len() > 0 { + // If any, append lockups events to the coinbase receipt + if let Some(receipt) = tx_receipts.get_mut(0) { // Receipts are appended in order, so the first receipt should be // the one of the coinbase transaction - if let Some(receipt) = tx_receipts.get_mut(0) { - if receipt.is_coinbase_tx() { - receipt.events.append(&mut lockup_events); - } - } else { - warn!("Unable to attach lockups events, block's first transaction is not a coinbase transaction") + if receipt.is_coinbase_tx() { + receipt.events.append(&mut lockup_events); } } + + // If lockup_events still contains items, it means they weren't attached + if !lockup_events.is_empty() { + info!("Unable to attach lockup events, block's first transaction is not a coinbase transaction. Will attach as a phantom tx."); + } + // if any, append auto unlock events to the coinbase receipt if auto_unlock_events.len() > 0 { // Receipts are appended in order, so the first receipt should be @@ -4358,6 +4631,7 @@ impl NakamotoChainState { applied_epoch_transition, signer_set_calc.is_some(), coinbase_height, + lockup_events, ); } @@ -4460,7 +4734,7 @@ impl NakamotoChainState { tx_receipts, matured_rewards, matured_rewards_info: matured_rewards_info_opt, - parent_microblocks_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, anchored_block_cost: block_execution_cost, parent_burn_block_hash, parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal @@ -4471,7 +4745,12 @@ impl NakamotoChainState { coinbase_height, }; - Ok((epoch_receipt, clarity_commit, reward_set_data)) + Ok(( + epoch_receipt, + clarity_commit, + reward_set_data, + lockup_events, + )) } /// Create a StackerDB config for the .miners contract. @@ -4632,6 +4911,53 @@ impl NakamotoChainState { clarity.save_analysis(&contract_id, &analysis).unwrap(); }) } + + /// Generate a "phantom" transaction to include STXMintEvents for + /// lockups that could not be attached to a Coinbase transaction + /// (because the block doesn't have a Coinbase transaction). + fn generate_phantom_unlock_tx( + events: Vec, + config: &ChainstateConfig, + stacks_block_height: u64, + ) -> Option { + if events.is_empty() { + return None; + } + info!("Generating phantom unlock tx"); + let version = if config.mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + // Make the txid unique -- the phantom tx payload should include something block-specific otherwise + // they will always have the same txid. In this case we use the block height in the memo. This also + // happens to give some indication of the purpose of this phantom tx, for anyone looking. + let memo = TokenTransferMemo({ + let str = format!("Block {} token unlocks", stacks_block_height); + let mut buf = [0u8; 34]; + buf[..str.len().min(34)].copy_from_slice(&str.as_bytes()[..]); + buf + }); + let boot_code_address = boot_code_addr(config.mainnet); + let boot_code_auth = boot_code_tx_auth(boot_code_address.clone()); + let unlock_tx = StacksTransaction::new( + version, + boot_code_auth, + TransactionPayload::TokenTransfer( + PrincipalData::Standard(boot_code_address.into()), + 0, + memo, + ), + ); + let unlock_receipt = StacksTransactionReceipt::from_stx_transfer( + unlock_tx, + events, + Value::okay_true(), + ExecutionCost::ZERO, + ); + Some(unlock_receipt) + } } impl StacksMessageCodec for NakamotoBlock { diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs new file mode 100644 index 0000000000..cdc099e120 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -0,0 +1,1008 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::costs::ExecutionCost; +use rusqlite::params; +/// Shadow blocks +/// +/// In the event of an emergency chain halt, a SIP will be written to declare that a chain halt has +/// happened, and what transactions and blocks (if any) need to be mined at which burnchain block +/// heights to recover the chain. +/// +/// If this remedy is necessary, these blocks will be mined into one or more _shadow_ blocks and +/// _shadow_ tenures. +/// +/// Shadow blocks are blocks that are inserted directly into the staging blocks DB as part of a +/// schema update. They are neither mined nor relayed. Instead, they are synthesized as part of an +/// emergency node upgrade in order to ensure that the conditions which lead to the chain stall +/// never occur. +/// +/// For example, if a prepare phase is mined without a single block-commit hitting the Bitcoin +/// chain, a pair of shadow block tenures will be synthesized to create a PoX anchor block and +/// restore the chain's liveness. As another example, if insufficiently many STX are locked in PoX +/// to get a healthy set of signers, a shadow block can be synthesized with extra `stack-stx` +/// transactions submitted from healthy stackers in order to create a suitable PoX reward set. +/// +/// This module contains shadow block-specific logic for the Nakamoto block header, Nakamoto block, +/// Nakamoto chainstate, and Nakamoto miner structures. +use rusqlite::Connection; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, +}; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; + +use crate::burnchains::PoxConstants; +use crate::chainstate::nakamoto::miner::{MinerTenureInfo, NakamotoBlockBuilder}; +use crate::chainstate::nakamoto::{ + BlockSnapshot, ChainstateError, LeaderBlockCommitOp, NakamotoBlock, NakamotoBlockHeader, + NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConn, + NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, SetupBlockResult, SortitionDB, + SortitionHandleConn, StacksDBIndexed, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::blocks::DummyEventDispatcher; +use crate::chainstate::stacks::db::{ + ChainstateTx, ClarityTx, StacksAccount, StacksChainState, StacksHeaderInfo, +}; +use crate::chainstate::stacks::miner::{ + BlockBuilder, BlockLimitFunction, TransactionError, TransactionProblematic, TransactionResult, + TransactionSkipped, +}; +use crate::chainstate::stacks::{ + CoinbasePayload, Error, StacksTransaction, StacksTransactionSigner, TenureChangeCause, + TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionVersion, +}; +use crate::clarity::vm::types::StacksAddressExtensions; +use crate::clarity_vm::clarity::ClarityInstance; +use crate::clarity_vm::database::SortitionDBRef; +use crate::net::Error as NetError; +use crate::util_lib::db::{query_row, u64_to_sql, Error as DBError}; + +impl NakamotoBlockHeader { + /// Is this a shadow block? + /// + /// This is a special kind of block that is directly inserted into the chainstate by means of a + /// consensus rule. It won't be downloaded or broadcasted, but every node will have it. They + /// get created as a result of a consensus-level SIP in order to restore the chain to working + /// order. + /// + /// Shadow blocks have the high bit of their version field set. + pub fn is_shadow_block(&self) -> bool { + Self::is_shadow_block_version(self.version) + } + + /// Is a block version a shadow block version? + pub fn is_shadow_block_version(version: u8) -> bool { + version & 0x80 != 0 + } + + /// Get the signing weight of a shadow block + pub fn get_shadow_signer_weight(&self, reward_set: &RewardSet) -> Result { + let Some(signers) = &reward_set.signers else { + return Err(ChainstateError::InvalidStacksBlock( + "No signers in the reward set".to_string(), + )); + }; + let shadow_weight = signers + .iter() + .fold(0u32, |acc, signer| acc.saturating_add(signer.weight)); + + Ok(shadow_weight) + } +} + +impl NakamotoBlock { + /// Is this block a shadow block? + /// Check the header + pub fn is_shadow_block(&self) -> bool { + self.header.is_shadow_block() + } + + /// Verify that if this shadow block has a coinbase, that its VRF proof is consistent with the leader + /// public key's VRF key. If there is no coinbase tx, then this is a no-op. + pub(crate) fn check_shadow_coinbase_tx(&self, mainnet: bool) -> Result<(), ChainstateError> { + if !self.is_shadow_block() { + error!( + "FATAL: tried to validate non-shadow block in a shadow-block-specific validator" + ); + panic!(); + } + + // If this shadow block has a coinbase, then verify that it has a VRF proof (which will be + // verified later) and that its recipient is the burn address. Shadow blocks do not award + // STX. + if let Some(coinbase_tx) = self.get_coinbase_tx() { + let (_, recipient_opt, vrf_proof_opt) = coinbase_tx + .try_as_coinbase() + .expect("FATAL: `get_coinbase_tx()` did not return a coinbase"); + + if vrf_proof_opt.is_none() { + return Err(ChainstateError::InvalidStacksBlock( + "Shadow Nakamoto coinbase must have a VRF proof".into(), + )); + } + + let Some(recipient) = recipient_opt else { + warn!("Invalid shadow block: no recipient"); + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block did not pay to burn address".into(), + )); + }; + + // must be the standard burn address for this network + let burn_addr = StacksAddress::burn_address(mainnet).to_account_principal(); + if burn_addr != *recipient { + warn!("Invalid shadow block: recipient does not burn"); + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block did not pay to burn address".into(), + )); + } + + // can't check the VRF proof because the creator of the shadow block (e.g. the SIP + // process) isn't a miner, so it could be anything. + } + Ok(()) + } + + /// Validate this Nakamoto shadow block header against burnchain state. + /// + /// Arguments + /// -- `mainnet`: whether or not the chain is mainnet + /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's + /// tenure. It is not always the tip of the burnchain. + /// -- `expected_burn` is the total number of burnchain tokens spent, if known. + /// + /// Verifies the following: + /// -- (self.header.consensus_hash) that this block falls into this block-commit's tenure + /// -- (self.header.burn_spent) that this block's burn total matches `burn_tip`'s total burn + /// -- if this block has a tenure change, then it's consistent with the miner's public key and + /// self.header.consensus_hash + /// + /// NOTE: unlike normal blocks, we do not need to verify the VRF proof or miner signature + pub(crate) fn validate_shadow_against_burnchain( + &self, + mainnet: bool, + tenure_burn_chain_tip: &BlockSnapshot, + expected_burn: Option, + ) -> Result<(), ChainstateError> { + if !self.is_shadow_block() { + error!( + "FATAL: tried to validate non-shadow block in a shadow-block-specific validator" + ); + panic!(); + } + self.common_validate_against_burnchain(tenure_burn_chain_tip, expected_burn)?; + self.check_tenure_tx()?; + self.check_shadow_coinbase_tx(mainnet)?; + + // not verified by this method: + // * chain_length (need parent block header) + // * parent_block_id (need parent block header) + // * block-commit seed (need parent block) + // * tx_merkle_root (already verified; validated on deserialization) + // * state_index_root (validated on process_block()) + // * stacker signature (validated on accept_block()) + Ok(()) + } +} + +impl NakamotoChainState { + /// Verify that the shadow parent of a normal block is consistent with the normal block's + /// tenure's block-commit. + /// + /// * the block-commit vtxindex must be 0 (i.e. burnchain coinbase) + /// * the block-commit block ptr must be the shadow parent tenure's sortition + /// + /// Returns Ok(()) if the parent is _not_ a shadow block + /// Returns Ok(()) if the parent is a shadow block, and the above criteria are met + /// Returns Err(ChainstateError::InvalidStacksBlock(..)) if the parent is a shadow block, and + /// some of the criteria above are false + /// Returns Err(..) on other (DB-related) errors + pub(crate) fn validate_shadow_parent_burnchain( + staging_db: NakamotoStagingBlocksConnRef, + db_handle: &SortitionHandleConn, + block: &NakamotoBlock, + block_commit: &LeaderBlockCommitOp, + ) -> Result<(), ChainstateError> { + // only applies if the parent is a nakamoto block (since all shadow blocks are nakamoto + // blocks) + let Some(parent_header) = + staging_db.get_nakamoto_block_header(&block.header.parent_block_id)? + else { + return Ok(()); + }; + + if !parent_header.is_shadow_block() { + return Ok(()); + } + + if block_commit.parent_vtxindex != 0 { + warn!("Invalid Nakamoto block: parent {} of {} is a shadow block but block-commit vtxindex is {}", &parent_header.block_id(), &block.block_id(), block_commit.parent_vtxindex); + return Err(ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: invalid block-commit parent vtxindex for parent shadow block".into())); + } + let Some(parent_sn) = + SortitionDB::get_block_snapshot_consensus(db_handle, &parent_header.consensus_hash)? + else { + warn!( + "Invalid Nakamoto block: No sortition for parent shadow block {}", + &block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: parent shadow block has no sortition".into(), + )); + }; + if u64::from(block_commit.parent_block_ptr) != parent_sn.block_height { + warn!("Invalid Nakamoto block: parent {} of {} is a shadow block but block-commit parent ptr is {}", &parent_header.block_id(), &block.block_id(), block_commit.parent_block_ptr); + return Err(ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: invalid block-commit parent block ptr for parent shadow block".into())); + } + + Ok(()) + } + + /// Validate a shadow Nakamoto block against burnchain state. + /// Wraps `NakamotoBlock::validate_shadow_against_burnchain()`, and + /// verifies that all transactions in the block are allowed in this epoch. + pub(crate) fn validate_shadow_nakamoto_block_burnchain( + staging_db: NakamotoStagingBlocksConnRef, + db_handle: &SortitionHandleConn, + expected_burn: Option, + block: &NakamotoBlock, + mainnet: bool, + chain_id: u32, + ) -> Result<(), ChainstateError> { + if !block.is_shadow_block() { + error!( + "FATAL: tried to validate non-shadow block in a shadow-block-specific validator" + ); + panic!(); + } + + // this block must already be stored + if !staging_db.has_shadow_nakamoto_block_with_index_hash(&block.block_id())? { + warn!("Invalid shadow Nakamoto block, must already be stored"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id() + ); + + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block must already be stored".into(), + )); + } + + let tenure_burn_chain_tip = Self::validate_nakamoto_tenure_snapshot(db_handle, block)?; + if let Err(e) = + block.validate_shadow_against_burnchain(mainnet, &tenure_burn_chain_tip, expected_burn) + { + warn!( + "Invalid shadow Nakamoto block, could not validate on burnchain"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id(), + "error" => ?e + ); + + return Err(e); + } + Self::validate_nakamoto_block_transactions_static( + mainnet, + chain_id, + db_handle.conn(), + block, + tenure_burn_chain_tip.block_height, + )?; + Ok(()) + } + + /// Load the stored VRF proof for the given shadow block's tenure. + /// + /// Returns Ok(Some(vrf proof)) on success + /// Returns Ok(None) if the parent tenure isn't a shadow tenure + pub(crate) fn get_shadow_vrf_proof( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, + ) -> Result, ChainstateError> { + // is the tip a shadow block (and necessarily a Nakamoto block)? + let Some(parent_version) = + NakamotoChainState::get_nakamoto_block_version(chainstate_conn.sqlite(), tip_block_id)? + else { + return Ok(None); + }; + + if !NakamotoBlockHeader::is_shadow_block_version(parent_version) { + return Ok(None); + } + + // this is a shadow block + let tenure_consensus_hash = NakamotoChainState::get_block_header_nakamoto_tenure_id( + chainstate_conn.sqlite(), + tip_block_id, + )? + .ok_or_else(|| { + warn!("No tenure consensus hash for block {}", tip_block_id); + ChainstateError::NoSuchBlockError + })?; + + // the shadow tenure won't have a block-commit, but we just found its tenure ID anyway + debug!( + "Load VRF proof for shadow tenure {}", + &tenure_consensus_hash + ); + let vrf_proof = + Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &tenure_consensus_hash)? + .ok_or_else(|| { + warn!("No VRF proof for {}", &tenure_consensus_hash); + ChainstateError::NoSuchBlockError + }) + .map_err(|e| { + warn!("Could not find shadow tenure VRF proof"; + "tip_block_id" => %tip_block_id, + "shadow consensus_hash" => %tenure_consensus_hash); + e + })?; + + return Ok(Some(vrf_proof)); + } + + /// Begin block-processing for a shadow block and return all of the pre-processed state within a + /// `SetupBlockResult`. + /// + /// Called to begin processing a shadow block + pub(crate) fn setup_shadow_block_processing<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + tenure_block_snapshot: BlockSnapshot, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + ) -> Result, ChainstateError> { + let burn_header_hash = &tenure_block_snapshot.burn_header_hash; + let burn_header_height = + u32::try_from(tenure_block_snapshot.block_height).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Failed to downcast burn block height to u32".into(), + ) + })?; + let block_consensus_hash = &tenure_block_snapshot.consensus_hash; + + let parent_block_id = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); + + // tenure start header must exist and be processed + let _ = Self::get_nakamoto_tenure_start_block_header( + chainstate_tx.as_tx(), + &parent_block_id, + &parent_consensus_hash, + )? + .ok_or_else(|| { + warn!("Invalid shadow Nakamoto block: no start-tenure block for parent"; + "parent_consensus_hash" => %parent_consensus_hash, + "consensus_hash" => %block_consensus_hash + ); + ChainstateError::NoSuchBlockError + })?; + + Self::inner_setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash.clone(), + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + ) + } +} + +impl NakamotoBlockBuilder { + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. + /// + /// It's used to create shadow blocks. + pub(crate) fn shadow_load_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionHandleConn, + cause: Option, + ) -> Result, Error> { + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, true) + } + + /// Begin/resume mining a shadow tenure's transactions. + /// Returns an open ClarityTx for mining the block. + /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a + /// transaction can't query information about the _current_ block (i.e. information that is not + /// yet known). + pub fn shadow_tenure_begin<'a, 'b>( + &mut self, + burn_dbconn: &'a SortitionHandleConn, + info: &'b mut MinerTenureInfo<'a>, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, Error> { + let tenure_snapshot = SortitionDB::get_block_snapshot_consensus( + burn_dbconn.conn(), + tenure_id_consensus_hash, + )? + .ok_or_else(|| Error::NoSuchBlockError)?; + + let SetupBlockResult { + clarity_tx, + matured_miner_rewards_opt, + .. + } = NakamotoChainState::setup_shadow_block_processing( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + burn_dbconn.context.first_block_height, + &burn_dbconn.context.pox_constants, + info.parent_consensus_hash, + info.parent_header_hash, + info.parent_burn_block_height, + tenure_snapshot, + info.cause == Some(TenureChangeCause::BlockFound), + info.coinbase_height, + info.cause == Some(TenureChangeCause::Extended), + )?; + self.matured_miner_rewards_opt = matured_miner_rewards_opt; + Ok(clarity_tx) + } + + /// Get an address's account + pub fn get_account( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + addr: &StacksAddress, + tip: &StacksHeaderInfo, + ) -> Result { + let snapshot = + SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash)? + .ok_or_else(|| Error::NoSuchBlockError)?; + + let account = chainstate + .with_read_only_clarity_tx( + &sortdb.index_handle(&snapshot.sortition_id), + &tip.index_block_hash(), + |clarity_conn| { + StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) + }, + ) + .ok_or_else(|| Error::NoSuchBlockError)?; + + Ok(account) + } + + /// Make a shadow block from transactions + pub fn make_shadow_block_from_txs( + mut builder: NakamotoBlockBuilder, + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionHandleConn, + tenure_id_consensus_hash: &ConsensusHash, + mut txs: Vec, + ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { + use clarity::vm::ast::ASTRules; + + debug!( + "Build shadow Nakamoto block from {} transactions", + txs.len() + ); + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let mut tenure_cause = None; + for tx in txs.iter() { + let TransactionPayload::TenureChange(payload) = &tx.payload else { + continue; + }; + tenure_cause = Some(payload.cause); + break; + } + + let mut miner_tenure_info = + builder.shadow_load_tenure_info(&mut chainstate, burn_dbconn, tenure_cause)?; + let mut tenure_tx = builder.shadow_tenure_begin( + burn_dbconn, + &mut miner_tenure_info, + tenure_id_consensus_hash, + )?; + for tx in txs.drain(..) { + let tx_len = tx.tx_len(); + match builder.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + ) { + TransactionResult::Success(..) => { + debug!("Included {}", &tx.txid()); + } + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) => { + match error { + Error::BlockTooBigError => { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!("Block budget exceeded on tx {}", &tx.txid()); + } + Error::InvalidStacksTransaction(_emsg, true) => { + // if we have an invalid transaction that was quietly ignored, don't warn here either + test_debug!( + "Failed to apply tx {}: InvalidStacksTransaction '{:?}'", + &tx.txid(), + &_emsg + ); + continue; + } + Error::ProblematicTransaction(txid) => { + test_debug!("Encountered problematic transaction. Aborting"); + return Err(Error::ProblematicTransaction(txid)); + } + e => { + warn!("Failed to apply tx {}: {:?}", &tx.txid(), &e); + continue; + } + } + } + TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { + // drop from the mempool + debug!("Encountered problematic transaction {}", &tx.txid()); + return Err(Error::ProblematicTransaction(tx.txid())); + } + } + } + let block = builder.mine_nakamoto_block(&mut tenure_tx); + let size = builder.bytes_so_far; + let cost = builder.tenure_finish(tenure_tx)?; + Ok((block, size, cost)) + } + + /// Produce a single-block shadow tenure. + /// Used by tooling to synthesize shadow blocks in case of an emergency. + /// The details and circumstances will be recorded in an accompanying SIP. + /// + /// `naka_tip_id` is the Stacks chain tip on top of which the shadow block will be built. + /// `tenure_id_consensus_hash` is the sortition in which the shadow block will be built. + /// `txs` are transactions to include, beyond a coinbase and tenure-change + pub fn make_shadow_tenure( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + naka_tip_id: StacksBlockId, + tenure_id_consensus_hash: ConsensusHash, + mut txs: Vec, + ) -> Result { + let mainnet = chainstate.config().mainnet; + let chain_id = chainstate.config().chain_id; + + let recipient = StacksAddress::burn_address(mainnet).to_account_principal(); + let vrf_proof_bytes = vec![ + 0x92, 0x75, 0xdf, 0x67, 0xa6, 0x8c, 0x87, 0x45, 0xc0, 0xff, 0x97, 0xb4, 0x82, 0x01, + 0xee, 0x6d, 0xb4, 0x47, 0xf7, 0xc9, 0x3b, 0x23, 0xae, 0x24, 0xcd, 0xc2, 0x40, 0x0f, + 0x52, 0xfd, 0xb0, 0x8a, 0x1a, 0x6a, 0xc7, 0xec, 0x71, 0xbf, 0x9c, 0x9c, 0x76, 0xe9, + 0x6e, 0xe4, 0x67, 0x5e, 0xbf, 0xf6, 0x06, 0x25, 0xaf, 0x28, 0x71, 0x85, 0x01, 0x04, + 0x7b, 0xfd, 0x87, 0xb8, 0x10, 0xc2, 0xd2, 0x13, 0x9b, 0x73, 0xc2, 0x3b, 0xd6, 0x9d, + 0xe6, 0x63, 0x60, 0x95, 0x3a, 0x64, 0x2c, 0x2a, 0x33, 0x0a, + ]; + + // safety -- we know it's a good proof + let vrf_proof = VRFProof::from_bytes(vrf_proof_bytes.as_slice()).unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header(chainstate.db(), &naka_tip_id)? + .ok_or_else(|| { + warn!("No such Nakamoto tip: {:?}", &naka_tip_id); + Error::NoSuchBlockError + })?; + + let naka_tip_tenure_start_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &naka_tip_id, + &naka_tip_header.consensus_hash, + )? + .ok_or_else(|| { + Error::InvalidStacksBlock(format!( + "No tenure-start block header for tenure {}", + &naka_tip_header.consensus_hash + )) + })?; + + if naka_tip_header.anchored_header.height() + 1 + <= naka_tip_tenure_start_header.anchored_header.height() + { + return Err(Error::InvalidStacksBlock( + "Nakamoto tip is lower than its tenure-start block".into(), + )); + } + + let coinbase_payload = CoinbasePayload(naka_tip_tenure_start_header.index_block_hash().0); + + // the miner key is irrelevant + let miner_key = StacksPrivateKey::new(); + let miner_addr = StacksAddress::p2pkh(mainnet, &StacksPublicKey::from_private(&miner_key)); + let miner_tx_auth = TransactionAuth::from_p2pkh(&miner_key).ok_or_else(|| { + Error::InvalidStacksBlock( + "Unable to construct transaction auth from transient private key".into(), + ) + })?; + + let tx_version = if mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + let miner_account = Self::get_account(chainstate, sortdb, &miner_addr, &naka_tip_header)?; + + // tenure change payload (BlockFound) + let tenure_change_payload = TenureChangePayload { + tenure_consensus_hash: tenure_id_consensus_hash.clone(), + prev_tenure_consensus_hash: naka_tip_header.consensus_hash, + burn_view_consensus_hash: tenure_id_consensus_hash.clone(), + previous_tenure_end: naka_tip_id, + previous_tenure_blocks: (naka_tip_header.anchored_header.height() + 1 + - naka_tip_tenure_start_header.anchored_header.height()) + as u32, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private(&miner_key)), + }; + + // tenure-change tx + let tenure_change_tx = { + let mut tx_tenure_change = StacksTransaction::new( + tx_version.clone(), + miner_tx_auth.clone(), + TransactionPayload::TenureChange(tenure_change_payload), + ); + tx_tenure_change.chain_id = chain_id; + tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_tenure_change.auth.set_origin_nonce(miner_account.nonce); + + let mut tx_signer = StacksTransactionSigner::new(&tx_tenure_change); + tx_signer.sign_origin(&miner_key)?; + let tx_tenure_change_signed = tx_signer + .get_tx() + .ok_or_else(|| Error::InvalidStacksBlock("Failed to sign tenure change".into()))?; + tx_tenure_change_signed + }; + + // coinbase tx + let coinbase_tx = { + let mut tx_coinbase = StacksTransaction::new( + tx_version.clone(), + miner_tx_auth.clone(), + TransactionPayload::Coinbase(coinbase_payload, Some(recipient), Some(vrf_proof)), + ); + tx_coinbase.chain_id = chain_id; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(miner_account.nonce + 1); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + tx_signer.sign_origin(&miner_key)?; + let tx_coinbase_signed = tx_signer + .get_tx() + .ok_or_else(|| Error::InvalidStacksBlock("Failed to sign coinbase".into()))?; + tx_coinbase_signed + }; + + // `burn_tip` corresponds to the burn view consensus hash of the tenure. + let burn_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_id_consensus_hash)? + .ok_or_else(|| Error::InvalidStacksBlock("No such tenure ID".into()))?; + + debug!( + "Build Nakamoto shadow block in tenure {} sortition {} parent_tip {}", + &tenure_id_consensus_hash, &burn_tip.consensus_hash, &naka_tip_id + ); + + // make a block + let builder = NakamotoBlockBuilder::new( + &naka_tip_header, + &tenure_id_consensus_hash, + burn_tip.total_burn, + Some(&tenure_change_tx), + Some(&coinbase_tx), + 1, + None, + )?; + + let mut block_txs = vec![tenure_change_tx, coinbase_tx]; + block_txs.append(&mut txs); + let (mut shadow_block, _size, _cost) = Self::make_shadow_block_from_txs( + builder, + &chainstate, + &sortdb.index_handle(&burn_tip.sortition_id), + &tenure_id_consensus_hash, + block_txs, + )?; + + shadow_block.header.version |= 0x80; + + // no need to sign with the signer set; just the miner is sufficient + // (and it can be any miner) + shadow_block.header.sign_miner(&miner_key)?; + + Ok(shadow_block) + } +} + +impl<'a> NakamotoStagingBlocksConnRef<'a> { + /// Determine if we have a particular block with the given index hash. + /// Returns Ok(true) if so + /// Returns Ok(false) if not + /// Returns Err(..) on DB error + pub fn has_shadow_nakamoto_block_with_index_hash( + &self, + index_block_hash: &StacksBlockId, + ) -> Result { + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1 AND obtain_method = ?2"; + let args = params![ + index_block_hash, + &NakamotoBlockObtainMethod::Shadow.to_string() + ]; + let res: Option = query_row(self, qry, args)?; + Ok(res.is_some()) + } + + /// Is this a shadow tenure? + /// If any block is a shadow block in the tenure, they must all be. + /// + /// Returns true if the tenure has at least one shadow block. + pub fn is_shadow_tenure( + &self, + consensus_hash: &ConsensusHash, + ) -> Result { + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND obtain_method = ?2"; + let args = rusqlite::params![ + consensus_hash, + NakamotoBlockObtainMethod::Shadow.to_string() + ]; + let present: Option = query_row(self, qry, args)?; + Ok(present.is_some()) + } + + /// Shadow blocks, unlike Stacks blocks, have a unique place in the chain history. + /// They are inserted post-hoc, so they and their underlying burnchain blocks don't get + /// invalidated via a fork. A consensus hash can identify (1) no tenures, (2) a single + /// shadow tenure, or (3) one or more non-shadow tenures. + /// + /// This is important when downloading a tenure that is ended by a shadow block, since it won't + /// be processed beforehand and its hash isn't learned from the burnchain (so we must be able + /// to infer that if this is a shadow tenure, none of the blocks in it have siblings). + pub fn get_shadow_tenure_start_block( + &self, + ch: &ConsensusHash, + ) -> Result, ChainstateError> { + let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND obtain_method = ?2 ORDER BY height DESC LIMIT 1"; + let args = params![ch, &NakamotoBlockObtainMethod::Shadow.to_string()]; + let res: Option> = query_row(self, qry, args)?; + let Some(block_bytes) = res else { + return Ok(None); + }; + let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?; + if !block.is_shadow_block() { + error!("Staging DB corruption: expected shadow block from {}", ch); + return Err(DBError::Corruption.into()); + } + Ok(Some(block)) + } +} + +impl<'a> NakamotoStagingBlocksTx<'a> { + /// Add a shadow block. + /// Fails if there are any non-shadow blocks present in the tenure. + pub fn add_shadow_block(&self, shadow_block: &NakamotoBlock) -> Result<(), ChainstateError> { + if !shadow_block.is_shadow_block() { + return Err(ChainstateError::InvalidStacksBlock( + "Not a shadow block".into(), + )); + } + let block_id = shadow_block.block_id(); + + // is this block stored already? + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; + let args = params![block_id]; + let present: Option = query_row(self, qry, args)?; + if present.is_some() { + return Ok(()); + } + + // this tenure must be empty, or it must be a shadow tenure + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1"; + let args = rusqlite::params![&shadow_block.header.consensus_hash]; + let present: Option = query_row(self, qry, args)?; + if present.is_some() + && !self + .conn() + .is_shadow_tenure(&shadow_block.header.consensus_hash)? + { + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block cannot be inserted into non-empty non-shadow tenure".into(), + )); + } + + // there must not be a block at this height in this tenure + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND height = ?2"; + let args = rusqlite::params![ + &shadow_block.header.consensus_hash, + u64_to_sql(shadow_block.header.chain_length)? + ]; + let present: Option = query_row(self, qry, args)?; + if present.is_some() { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Conflicting block at height {} in tenure {}", + shadow_block.header.chain_length, &shadow_block.header.consensus_hash + ))); + } + + // the shadow block is crafted post-hoc, so we know the consensus hash exists. + // thus, it's always burn-attachable + let burn_attachable = true; + + // shadow blocks cannot be replaced + let signing_weight = u32::MAX; + + self.store_block( + shadow_block, + burn_attachable, + signing_weight, + NakamotoBlockObtainMethod::Shadow, + )?; + Ok(()) + } +} + +/// DO NOT RUN ON A RUNNING NODE (unless you're testing). +/// +/// Insert and process a shadow block into the Stacks chainstate. +pub fn process_shadow_block( + chain_state: &mut StacksChainState, + sort_db: &mut SortitionDB, + shadow_block: NakamotoBlock, +) -> Result<(), ChainstateError> { + let tx = chain_state.staging_db_tx_begin()?; + tx.add_shadow_block(&shadow_block)?; + tx.commit()?; + + let no_dispatch: Option = None; + loop { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; + + // process at most one block per loop pass + let processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( + chain_state, + sort_db, + &sort_tip.sortition_id, + no_dispatch.as_ref(), + ) { + Ok(receipt_opt) => receipt_opt, + Err(ChainstateError::InvalidStacksBlock(msg)) => { + warn!("Encountered invalid block: {}", &msg); + continue; + } + Err(ChainstateError::NetError(NetError::DeserializeError(msg))) => { + // happens if we load a zero-sized block (i.e. an invalid block) + warn!("Encountered invalid block (codec error): {}", &msg); + continue; + } + Err(e) => { + // something else happened + return Err(e.into()); + } + }; + + if processed_block_receipt.is_none() { + // out of blocks + info!("No more blocks to process (no receipts)"); + break; + }; + + let Some((_, processed, orphaned, _)) = chain_state + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &shadow_block.header.consensus_hash, + &shadow_block.header.block_hash(), + )? + else { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Shadow block {} for tenure {} not store", + &shadow_block.block_id(), + &shadow_block.header.consensus_hash + ))); + }; + + if orphaned { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Shadow block {} for tenure {} was orphaned", + &shadow_block.block_id(), + &shadow_block.header.consensus_hash + ))); + } + + if processed { + break; + } + } + Ok(()) +} + +/// DO NOT RUN ON A RUNNING NODE (unless you're testing). +/// +/// Automatically repair a node that has been stalled due to an empty prepare phase. +/// Works by synthesizing, inserting, and processing shadow tenures in-between the last sortition +/// with a winner and the burnchain tip. +/// +/// This is meant to be accessed by the tooling. Once the blocks are synthesized, they would be +/// added into other broken nodes' chainstates by the same tooling. Ultimately, a patched node +/// would be released with these shadow blocks added in as part of the chainstate schema. +/// +/// Returns the syntheisized shadow blocks on success. +/// Returns error on failure. +pub fn shadow_chainstate_repair( + chain_state: &mut StacksChainState, + sort_db: &mut SortitionDB, +) -> Result, ChainstateError> { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; + + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + + let header_sn = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &header.consensus_hash)? + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock( + "Canonical stacks header does not have a sortition".into(), + ) + })?; + + let mut shadow_blocks = vec![]; + for burn_height in (header_sn.block_height + 1)..sort_tip.block_height { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; + let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); + let sn = sort_handle + .get_block_snapshot_by_height(burn_height)? + .ok_or_else(|| ChainstateError::InvalidStacksBlock("No sortition at height".into()))?; + + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + + let chain_tip = header.index_block_hash(); + let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( + chain_state, + sort_db, + chain_tip.clone(), + sn.consensus_hash, + vec![], + )?; + + shadow_blocks.push(shadow_block.clone()); + + process_shadow_block(chain_state, sort_db, shadow_block)?; + } + + Ok(shadow_blocks) +} diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 382c708850..c3e8432878 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -28,7 +28,7 @@ use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlockHeader}; @@ -41,10 +41,16 @@ use crate::util_lib::db::{ /// The means by which a block is obtained. #[derive(Debug, PartialEq, Clone, Copy)] pub enum NakamotoBlockObtainMethod { + /// The block was fetched by te block downloader Downloaded, + /// The block was uploaded to us via p2p Pushed, + /// This node mined the block Mined, + /// The block was uploaded to us via HTTP Uploaded, + /// This is a shadow block -- it was created by a SIP to fix a consensus bug + Shadow, } impl fmt::Display for NakamotoBlockObtainMethod { @@ -149,7 +155,12 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ r#"INSERT INTO db_version (version) VALUES (2)"#, ]; -pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 2; +pub const NAKAMOTO_STAGING_DB_SCHEMA_3: &'static [&'static str] = &[ + r#"CREATE INDEX nakamoto_staging_blocks_by_obtain_method ON nakamoto_staging_blocks(consensus_hash,obtain_method);"#, + r#"UPDATE db_version SET version = 3"#, +]; + +pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 3; pub struct NakamotoStagingBlocksConn(rusqlite::Connection); @@ -211,6 +222,21 @@ impl<'a> DerefMut for NakamotoStagingBlocksTx<'a> { &mut self.0 } } +/// Open a Blob handle to a Nakamoto block +fn inner_open_nakamoto_block<'a>( + conn: &'a Connection, + rowid: i64, + readwrite: bool, +) -> Result, ChainstateError> { + let blob = conn.blob_open( + rusqlite::DatabaseName::Main, + "nakamoto_staging_blocks", + "data", + rowid, + !readwrite, + )?; + Ok(blob) +} impl NakamotoStagingBlocksConn { /// Open a Blob handle to a Nakamoto block @@ -219,18 +245,20 @@ impl NakamotoStagingBlocksConn { rowid: i64, readwrite: bool, ) -> Result, ChainstateError> { - let blob = self.blob_open( - rusqlite::DatabaseName::Main, - "nakamoto_staging_blocks", - "data", - rowid, - !readwrite, - )?; - Ok(blob) + inner_open_nakamoto_block(self.deref(), rowid, readwrite) } } impl<'a> NakamotoStagingBlocksConnRef<'a> { + /// Open a Blob handle to a Nakamoto block + pub fn open_nakamoto_block( + &'a self, + rowid: i64, + readwrite: bool, + ) -> Result, ChainstateError> { + inner_open_nakamoto_block(self.deref(), rowid, readwrite) + } + /// Determine if we have a particular block with the given index hash. /// Returns Ok(true) if so /// Returns Ok(false) if not @@ -250,7 +278,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { /// There will be at most one such block. /// /// NOTE: for Nakamoto blocks, the sighash is the same as the block hash. - pub(crate) fn get_block_processed_and_signed_weight( + pub fn get_block_processed_and_signed_weight( &self, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, @@ -332,6 +360,32 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { ))) } + /// Get a Nakamoto block header by index block hash. + /// Verifies its integrity + /// Returns Ok(Some(header)) if the block was present + /// Returns Ok(None) if there was no such block + /// Returns Err(..) on DB error, including corruption + pub fn get_nakamoto_block_header( + &self, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let Some(rowid) = self.get_nakamoto_block_rowid(index_block_hash)? else { + return Ok(None); + }; + + let mut fd = self.open_nakamoto_block(rowid, false)?; + let block_header = NakamotoBlockHeader::consensus_deserialize(&mut fd)?; + if &block_header.block_id() != index_block_hash { + error!( + "Staging DB corruption: expected {}, got {}", + index_block_hash, + &block_header.block_id() + ); + return Err(DBError::Corruption.into()); + } + Ok(Some(block_header)) + } + /// Get the size of a Nakamoto block, given its index block hash /// Returns Ok(Some(size)) if the block was present /// Returns Ok(None) if there was no such block @@ -443,14 +497,6 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { }) } - /// Given a block ID, determine if it has children that have been processed and accepted - pub fn has_children(&self, index_block_hash: &StacksBlockId) -> Result { - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE parent_block_id = ?1 AND processed = 1 AND orphaned = 0 LIMIT 1"; - let args = rusqlite::params![index_block_hash]; - let children_flags: Option = query_row(self, qry, args)?; - Ok(children_flags.is_some()) - } - /// Given a consensus hash, determine if the burn block has been processed. /// Because this is stored in a denormalized way, we'll want to do this whenever we store a /// block (so we can set `burn_attachable` accordingly) @@ -534,6 +580,19 @@ impl<'a> NakamotoStagingBlocksTx<'a> { .is_burn_block_processed(&block.header.consensus_hash)? }; + let obtain_method = if block.is_shadow_block() { + // override + NakamotoBlockObtainMethod::Shadow + } else { + obtain_method + }; + + if self.conn().is_shadow_tenure(&block.header.consensus_hash)? && !block.is_shadow_block() { + return Err(ChainstateError::InvalidStacksBlock( + "Tried to insert a non-shadow block into a shadow tenure".into(), + )); + } + self.execute( "INSERT INTO nakamoto_staging_blocks ( block_hash, @@ -715,15 +774,37 @@ impl StacksChainState { /// Perform migrations pub fn migrate_nakamoto_staging_blocks(conn: &Connection) -> Result<(), ChainstateError> { - let mut version = Self::get_nakamoto_staging_blocks_db_version(conn)?; - if version < 2 { - debug!("Migrate Nakamoto staging blocks DB to schema 2"); - for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { - conn.execute(cmd, NO_PARAMS)?; + loop { + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + if version == NAKAMOTO_STAGING_DB_SCHEMA_LATEST { + return Ok(()); + } + match version { + 1 => { + debug!("Migrate Nakamoto staging blocks DB to schema 2"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 2, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 2"); + } + 2 => { + debug!("Migrate Nakamoto staging blocks DB to schema 3"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_3.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 3, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 3"); + } + NAKAMOTO_STAGING_DB_SCHEMA_LATEST => { + break; + } + _ => { + panic!("Unusable staging DB: Unknown schema version {}", version); + } } - version = Self::get_nakamoto_staging_blocks_db_version(conn)?; - assert_eq!(version, 2, "Nakamoto staging DB migration failure"); - debug!("Migrated Nakamoto staging blocks DB to schema 2"); } Ok(()) } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 5c729d845d..b72bbdda14 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -749,8 +749,18 @@ impl NakamotoChainState { warn!("Invalid tenure-change: parent snapshot comes after current tip"; "burn_view_consensus_hash" => %tenure_payload.burn_view_consensus_hash, "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash); return Ok(None); } - if !prev_sn.sortition { - // parent wasn't a sortition-induced tenure change + + // is the parent a shadow block? + // Only possible if the parent is also a nakamoto block + let is_parent_shadow_block = NakamotoChainState::get_nakamoto_block_version( + headers_conn.sqlite(), + &block_header.parent_block_id, + )? + .map(|parent_version| NakamotoBlockHeader::is_shadow_block_version(parent_version)) + .unwrap_or(false); + + if !is_parent_shadow_block && !prev_sn.sortition { + // parent wasn't a shadow block (we expect a sortition), but this wasn't a sortition-induced tenure change warn!("Invalid tenure-change: no block found"; "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash ); @@ -758,8 +768,8 @@ impl NakamotoChainState { } } - // the tenure must correspond to sortitions - if !tenure_sn.sortition { + // if this isn't a shadow block, then the tenure must correspond to sortitions + if !block_header.is_shadow_block() && !tenure_sn.sortition { warn!("Invalid tenure-change: no block found"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash ); @@ -961,6 +971,8 @@ impl NakamotoChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( + evaluated_epoch, + chainstate_tx.config.mainnet, chain_tip_burn_header_height, burn_dbconn.context.first_block_height, ); @@ -1056,6 +1068,15 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; + if snapshot.consensus_hash != *block_consensus_hash { + // should be unreachable, but check defensively + warn!( + "Snapshot for {} is not the same as the one for {}", + &burn_header_hash, block_consensus_hash + ); + return Err(ChainstateError::NoSuchBlockError); + } + Ok(snapshot) } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index ea163730ec..94ef81c077 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1663,7 +1663,9 @@ pub fn test_load_store_update_nakamoto_blocks() { /// Tests: /// * NakamotoBlockHeader::check_miner_signature /// * NakamotoBlockHeader::check_tenure_tx -/// * NakamotoBlockHeader::check_coinbase_tx +/// * NakamotoBlockHeader::is_shadow_block +/// * NakamotoBlockHeader::check_normal_coinbase_tx +/// * NakamotoBlockHeader::check_shadow_coinbase_tx #[test] fn test_nakamoto_block_static_verification() { let private_key = StacksPrivateKey::new(); @@ -1674,9 +1676,25 @@ fn test_nakamoto_block_static_verification() { let sortition_hash = SortitionHash([0x01; 32]); let vrf_proof = VRF::prove(&vrf_privkey, sortition_hash.as_bytes()); + let burn_recipient = StacksAddress::burn_address(false).to_account_principal(); + let alt_recipient = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key_2)) + .to_account_principal(); + let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(vrf_proof.clone())); + let coinbase_recipient_payload = TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(alt_recipient), + Some(vrf_proof.clone()), + ); + + let coinbase_shadow_recipient_payload = TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(burn_recipient), + Some(vrf_proof.clone()), + ); + let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&private_key).unwrap(), @@ -1685,6 +1703,22 @@ fn test_nakamoto_block_static_verification() { coinbase_tx.chain_id = 0x80000000; coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut coinbase_recipient_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_recipient_payload.clone(), + ); + coinbase_recipient_tx.chain_id = 0x80000000; + coinbase_recipient_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut coinbase_shadow_recipient_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_shadow_recipient_payload.clone(), + ); + coinbase_shadow_recipient_tx.chain_id = 0x80000000; + coinbase_shadow_recipient_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let tenure_change_payload = TenureChangePayload { tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), @@ -1754,6 +1788,29 @@ fn test_nakamoto_block_static_verification() { MerkleTree::::new(&txid_vecs).root() }; + let nakamoto_recipient_txs = vec![tenure_change_tx.clone(), coinbase_recipient_tx.clone()]; + let nakamoto_recipient_tx_merkle_root = { + let txid_vecs = nakamoto_recipient_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + + let nakamoto_shadow_recipient_txs = vec![ + tenure_change_tx.clone(), + coinbase_shadow_recipient_tx.clone(), + ]; + let nakamoto_shadow_recipient_tx_merkle_root = { + let txid_vecs = nakamoto_shadow_recipient_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + let nakamoto_txs_bad_ch = vec![tenure_change_tx_bad_ch.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root_bad_ch = { let txid_vecs = nakamoto_txs_bad_ch @@ -1837,6 +1894,48 @@ fn test_nakamoto_block_static_verification() { txs: nakamoto_txs_bad_miner_sig, }; + let mut nakamoto_recipient_header = NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: nakamoto_recipient_tx_merkle_root, + state_index_root: TrieHash([0x07; 32]), + timestamp: 8, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + nakamoto_recipient_header.sign_miner(&private_key).unwrap(); + + let nakamoto_recipient_block = NakamotoBlock { + header: nakamoto_recipient_header.clone(), + txs: nakamoto_recipient_txs, + }; + + let mut nakamoto_shadow_recipient_header = NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: nakamoto_shadow_recipient_tx_merkle_root, + state_index_root: TrieHash([0x07; 32]), + timestamp: 8, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + nakamoto_shadow_recipient_header + .sign_miner(&private_key) + .unwrap(); + + let nakamoto_shadow_recipient_block = NakamotoBlock { + header: nakamoto_shadow_recipient_header.clone(), + txs: nakamoto_shadow_recipient_txs, + }; + assert_eq!( nakamoto_block.header.recover_miner_pk().unwrap(), StacksPublicKey::from_private(&private_key) @@ -1863,13 +1962,78 @@ fn test_nakamoto_block_static_verification() { let vrf_alt_pubkey = VRFPublicKey::from_private(&vrf_alt_privkey); assert!(nakamoto_block - .check_coinbase_tx(&vrf_pubkey, &sortition_hash) + .check_normal_coinbase_tx(&vrf_pubkey, &sortition_hash) .is_ok()); assert!(nakamoto_block - .check_coinbase_tx(&vrf_pubkey, &SortitionHash([0x02; 32])) + .check_normal_coinbase_tx(&vrf_pubkey, &SortitionHash([0x02; 32])) .is_err()); assert!(nakamoto_block - .check_coinbase_tx(&vrf_alt_pubkey, &sortition_hash) + .check_normal_coinbase_tx(&vrf_alt_pubkey, &sortition_hash) + .is_err()); + + let mut shadow_block = nakamoto_shadow_recipient_block.clone(); + shadow_block.header.version |= 0x80; + + assert!(!nakamoto_shadow_recipient_block.is_shadow_block()); + assert!(shadow_block.is_shadow_block()); + + // miner key not checked for shadow blocks + assert!(shadow_block + .check_miner_signature(&Hash160::from_node_public_key( + &StacksPublicKey::from_private(&private_key_2) + )) + .is_ok()); + + // shadow block VRF is not checked + assert!(shadow_block.check_shadow_coinbase_tx(false).is_ok()); + + // shadow blocks need burn recipeints for coinbases + let mut shadow_block_no_recipient = nakamoto_block.clone(); + shadow_block_no_recipient.header.version |= 0x80; + + assert!(shadow_block_no_recipient.is_shadow_block()); + assert!(shadow_block_no_recipient + .check_shadow_coinbase_tx(false) + .is_err()); + + let mut shadow_block_alt_recipient = nakamoto_block.clone(); + shadow_block_alt_recipient.header.version |= 0x80; + + assert!(shadow_block_alt_recipient.is_shadow_block()); + assert!(shadow_block_alt_recipient + .check_shadow_coinbase_tx(false) + .is_err()); + + // tenure tx requirements still hold for shadow blocks + let mut shadow_nakamoto_block = nakamoto_block.clone(); + let mut shadow_nakamoto_block_bad_ch = nakamoto_block_bad_ch.clone(); + let mut shadow_nakamoto_block_bad_miner_sig = nakamoto_block_bad_miner_sig.clone(); + + shadow_nakamoto_block.header.version |= 0x80; + shadow_nakamoto_block_bad_ch.header.version |= 0x80; + shadow_nakamoto_block_bad_miner_sig.header.version |= 0x80; + + shadow_nakamoto_block + .header + .sign_miner(&private_key) + .unwrap(); + shadow_nakamoto_block_bad_ch + .header + .sign_miner(&private_key) + .unwrap(); + shadow_nakamoto_block_bad_miner_sig + .header + .sign_miner(&private_key) + .unwrap(); + + assert!(shadow_nakamoto_block.is_shadow_block()); + assert!(shadow_nakamoto_block_bad_ch.is_shadow_block()); + assert!(shadow_nakamoto_block_bad_miner_sig.is_shadow_block()); + + assert!(shadow_nakamoto_block.check_tenure_tx().is_ok()); + assert!(shadow_nakamoto_block_bad_ch.check_tenure_tx().is_err()); + assert!(shadow_nakamoto_block_bad_miner_sig + .check_tenure_tx() .is_err()); } diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 0645ecd15b..9a488d6a09 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -26,12 +26,13 @@ use hashbrown::HashMap; use rand::seq::SliceRandom; use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; +use rusqlite::params; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{ BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; -use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -51,11 +52,15 @@ use crate::chainstate::coordinator::{ use crate::chainstate::nakamoto::coordinator::{ get_nakamoto_next_recipients, load_nakamoto_reward_set, }; -use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; +use crate::chainstate::nakamoto::miner::{MinerTenureInfo, NakamotoBlockBuilder}; +use crate::chainstate::nakamoto::staging_blocks::{ + NakamotoBlockObtainMethod, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, StacksDBIndexed, +}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::db::blocks::test::store_staging_block; use crate::chainstate::stacks::db::test::*; @@ -71,7 +76,7 @@ use crate::cost_estimates::UnitEstimator; use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::test::{TestPeer, TestPeerConfig, *}; use crate::util_lib::boot::boot_code_addr; -use crate::util_lib::db::Error as db_error; +use crate::util_lib::db::{query_row, Error as db_error}; #[derive(Debug, Clone)] pub struct TestStacker { @@ -182,6 +187,7 @@ impl TestBurnchainBlock { fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, vrf_seed: VRFSeed, + parent_is_shadow_block: bool, ) -> LeaderBlockCommitOp { let tenure_id_as_block_hash = BlockHeaderHash(last_tenure_id.0.clone()); self.inner_add_block_commit( @@ -194,6 +200,7 @@ impl TestBurnchainBlock { parent_block_snapshot, Some(vrf_seed), STACKS_EPOCH_3_0_MARKER, + parent_is_shadow_block, ) } } @@ -221,15 +228,26 @@ impl TestMiner { recipient: Option, vrf_proof: VRFProof, nonce: u64, + ) -> StacksTransaction { + self.make_nakamoto_coinbase_with_nonce_and_payload( + recipient, + vrf_proof, + nonce, + CoinbasePayload([(self.nonce % 256) as u8; 32]), + ) + } + + pub fn make_nakamoto_coinbase_with_nonce_and_payload( + &mut self, + recipient: Option, + vrf_proof: VRFProof, + nonce: u64, + payload: CoinbasePayload, ) -> StacksTransaction { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, self.as_transaction_auth().unwrap(), - TransactionPayload::Coinbase( - CoinbasePayload([(self.nonce % 256) as u8; 32]), - recipient, - Some(vrf_proof), - ), + TransactionPayload::Coinbase(payload, recipient, Some(vrf_proof)), ); tx_coinbase.chain_id = self.chain_id; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -273,6 +291,15 @@ impl TestMiner { } } +impl<'a> NakamotoStagingBlocksConnRef<'a> { + pub fn get_any_normal_tenure(&self) -> Result, ChainstateError> { + let qry = "SELECT consensus_hash FROM nakamoto_staging_blocks WHERE obtain_method != ?1 ORDER BY RANDOM() LIMIT 1"; + let args = params![&NakamotoBlockObtainMethod::Shadow.to_string()]; + let res: Option = query_row(self, qry, args)?; + Ok(res) + } +} + impl TestStacksNode { pub fn add_nakamoto_tenure_commit( sortdb: &SortitionDB, @@ -283,6 +310,7 @@ impl TestStacksNode { key_op: &LeaderKeyRegisterOp, parent_block_snapshot: Option<&BlockSnapshot>, vrf_seed: VRFSeed, + parent_is_shadow_block: bool, ) -> LeaderBlockCommitOp { let block_commit_op = { let ic = sortdb.index_conn(); @@ -296,6 +324,7 @@ impl TestStacksNode { Some(&parent_snapshot), parent_block_snapshot, vrf_seed, + parent_is_shadow_block, ) }; block_commit_op @@ -350,6 +379,7 @@ impl TestStacksNode { miner_key: &LeaderKeyRegisterOp, parent_block_snapshot_opt: Option<&BlockSnapshot>, expect_success: bool, + parent_is_shadow_block: bool, ) -> LeaderBlockCommitOp { info!( "Miner {}: Commit to Nakamoto tenure starting at {}", @@ -385,6 +415,7 @@ impl TestStacksNode { miner_key, parent_block_snapshot_opt, vrf_seed, + parent_is_shadow_block, ); test_debug!( @@ -453,71 +484,125 @@ impl TestStacksNode { ) -> (LeaderBlockCommitOp, TenureChangePayload) { // this is the tenure that the block-commit confirms. // It's not the last-ever tenure; it's the one just before it. - let (last_tenure_id, parent_block_snapshot) = - if let Some(parent_blocks) = parent_nakamoto_tenure { - // parent is an epoch 3 nakamoto block - let first_parent = parent_blocks.first().unwrap(); - let last_parent = parent_blocks.last().unwrap(); - let parent_tenure_id = StacksBlockId::new( - &first_parent.header.consensus_hash, - &first_parent.header.block_hash(), - ); - let parent_sortition = SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), - &first_parent.header.consensus_hash, + let (last_tenure_id, parent_block_snapshot, parent_is_shadow) = if let Some(parent_blocks) = + parent_nakamoto_tenure + { + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + let last_parent = parent_blocks.last().unwrap(); + let parent_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + + let parent_sortition = if last_parent.is_shadow_block() { + // load up sortition that the shadow block replaces + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &last_parent.header.consensus_hash, ) .unwrap() - .unwrap(); + .unwrap() + } else { + // parent sortition must be the last sortition _with a winner_. + // This is not guaranteed with shadow blocks, so we have to search back if + // necessary. + let mut cursor = first_parent.header.consensus_hash; + let parent_sortition = loop { + let parent_sortition = + SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &cursor) + .unwrap() + .unwrap(); - test_debug!( - "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}", - burn_block.block_height, - burn_block.parent_snapshot.burn_header_hash, - parent_sortition.total_burn, - last_parent.header.chain_length + 1, - &parent_tenure_id, - ); + if parent_sortition.sortition { + break parent_sortition; + } - (parent_tenure_id, parent_sortition) - } else if let Some(parent_stacks_block) = parent_stacks_block { - // building off an existing stacks block - let parent_stacks_block_snapshot = { - let ic = sortdb.index_conn(); - let parent_stacks_block_snapshot = - SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &burn_block.parent_snapshot.sortition_id, - &parent_stacks_block.block_hash(), + // last tenure was a shadow tenure? + let Ok(Some(tenure_start_header)) = + NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &parent_tenure_id, + &cursor, + ) + else { + panic!("No tenure-start block header for tenure {}", &cursor); + }; + + let version = tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .version; + + assert!(NakamotoBlockHeader::is_shadow_block_version(version)); + cursor = self + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash( + &tenure_start_header.index_block_hash(), + &cursor, ) .unwrap() .unwrap(); - parent_stacks_block_snapshot }; + parent_sortition + }; - let parent_chain_tip = StacksChainState::get_anchored_block_header_info( - self.chainstate.db(), - &parent_stacks_block_snapshot.consensus_hash, - &parent_stacks_block.header.block_hash(), - ) - .unwrap() - .unwrap(); - - let parent_tenure_id = parent_chain_tip.index_block_hash(); - - test_debug!( - "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", + test_debug!( + "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}. Parent sortition is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, - parent_stacks_block_snapshot.total_burn, - parent_chain_tip.anchored_header.height(), + parent_sortition.total_burn, + last_parent.header.chain_length + 1, &parent_tenure_id, + &parent_sortition.consensus_hash ); - (parent_tenure_id, parent_stacks_block_snapshot) - } else { - panic!("Neither Nakamoto nor epoch2 parent found"); + ( + parent_tenure_id, + parent_sortition, + last_parent.is_shadow_block(), + ) + } else if let Some(parent_stacks_block) = parent_stacks_block { + // building off an existing stacks block + let parent_stacks_block_snapshot = { + let ic = sortdb.index_conn(); + let parent_stacks_block_snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &burn_block.parent_snapshot.sortition_id, + &parent_stacks_block.block_hash(), + ) + .unwrap() + .unwrap(); + parent_stacks_block_snapshot }; + let parent_chain_tip = StacksChainState::get_anchored_block_header_info( + self.chainstate.db(), + &parent_stacks_block_snapshot.consensus_hash, + &parent_stacks_block.header.block_hash(), + ) + .unwrap() + .unwrap(); + + let parent_tenure_id = parent_chain_tip.index_block_hash(); + + test_debug!( + "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", + burn_block.block_height, + burn_block.parent_snapshot.burn_header_hash, + parent_stacks_block_snapshot.total_burn, + parent_chain_tip.anchored_header.height(), + &parent_tenure_id, + ); + + (parent_tenure_id, parent_stacks_block_snapshot, false) + } else { + panic!("Neither Nakamoto nor epoch2 parent found"); + }; + // the tenure-change contains a pointer to the end of the last tenure, which is currently // the canonical tip unless overridden let (previous_tenure_end, previous_tenure_consensus_hash, previous_tenure_blocks) = @@ -551,7 +636,9 @@ impl TestStacksNode { ); (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) } else { - // building atop epoch2 + // building atop epoch2 (so the parent block can't be a shadow block, meaning + // that parent_block_snapshot is _guaranteed_ to be the snapshot that chose + // last_tenure_id). debug!( "Tenure length of epoch2 tenure {} is {}; tipped at {}", &parent_block_snapshot.consensus_hash, 1, &last_tenure_id @@ -585,6 +672,7 @@ impl TestStacksNode { miner_key, Some(&parent_block_snapshot), tenure_change_cause == TenureChangeCause::BlockFound, + parent_is_shadow, ); (block_commit_op, tenure_change_payload) @@ -599,6 +687,10 @@ impl TestStacksNode { /// The first block will contain a coinbase, if coinbase is Some(..) /// Process the blocks via the chains coordinator as we produce them. /// + /// If malleablize is true, then malleablized blocks will be created by varying the number of + /// signatures. Each malleablized block will be processed and stored if its signatures clear + /// the signing threshold. + /// /// Returns a list of /// * the block /// * its size @@ -626,7 +718,7 @@ impl TestStacksNode { mut after_block: G, malleablize: bool, mined_canonical: bool, - ) -> Vec<(NakamotoBlock, u64, ExecutionCost, Vec)> + ) -> Result)>, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( @@ -665,7 +757,7 @@ impl TestStacksNode { let parent_tip_opt = if let Some(parent_id) = parent_id_opt { if let Some(nakamoto_parent) = - NakamotoChainState::get_block_header(chainstate.db(), &parent_id).unwrap() + NakamotoChainState::get_block_header(chainstate.db(), &parent_id)? { debug!( "Use parent tip identified by produced TenureChange ({})", @@ -674,8 +766,7 @@ impl TestStacksNode { Some(nakamoto_parent) } else { warn!("Produced Tenure change transaction does not point to a real block"); - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? } } else if let Some(tenure_change) = tenure_change.as_ref() { // make sure parent tip is consistent with a tenure change @@ -683,9 +774,7 @@ impl TestStacksNode { if let Some(nakamoto_parent) = NakamotoChainState::get_block_header( chainstate.db(), &payload.previous_tenure_end, - ) - .unwrap() - { + )? { debug!( "Use parent tip identified by given TenureChange ({})", &payload.previous_tenure_end @@ -693,17 +782,16 @@ impl TestStacksNode { Some(nakamoto_parent) } else { debug!("Use parent tip identified by canonical tip pointer (no parent block {})", &payload.previous_tenure_end); - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? } } else { panic!("Tenure change transaction does not have a TenureChange payload"); } } else { - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap() + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? }; - let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; debug!( "Build Nakamoto block in tenure {} sortition {} parent_tip {:?}", @@ -730,8 +818,7 @@ impl TestStacksNode { }, 1, None, - ) - .unwrap() + )? } else { NakamotoBlockBuilder::new_first_block( &tenure_change.clone().unwrap(), @@ -748,22 +835,21 @@ impl TestStacksNode { chainstate, &sortdb.index_handle_at_tip(), txs, - ) - .unwrap(); + )?; let try_to_process = after_block(&mut nakamoto_block); miner.sign_nakamoto_block(&mut nakamoto_block); let tenure_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), tenure_id_consensus_hash) - .unwrap() - .unwrap(); + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), tenure_id_consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + let cycle = sortdb .pox_constants .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) .unwrap(); // Get the reward set - let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let reward_set = load_nakamoto_reward_set( miner .burnchain @@ -804,9 +890,11 @@ impl TestStacksNode { &block_id, &nakamoto_block.txs ); - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let mut sort_handle = sortdb.index_handle(&sort_tip); - let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); + let stacks_tip = sort_handle + .get_nakamoto_tip_block_id()? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let mut block_to_store = nakamoto_block.clone(); let mut processed_blocks = vec![]; @@ -865,9 +953,8 @@ impl TestStacksNode { let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( chainstate.db(), &sortdb, - ) - .unwrap() - .unwrap(); + )? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let nakamoto_chain_tip = stacks_chain_tip .anchored_header .as_stacks_nakamoto() @@ -918,11 +1005,11 @@ impl TestStacksNode { all_malleablized_blocks.push(malleablized_blocks); block_count += 1; } - blocks + Ok(blocks .into_iter() .zip(all_malleablized_blocks.into_iter()) .map(|((blk, sz, cost), mals)| (blk, sz, cost, mals)) - .collect() + .collect()) } pub fn make_nakamoto_block_from_txs( @@ -1107,33 +1194,74 @@ impl<'a> TestPeer<'a> { // find the VRF leader key register tx to use. // it's the one pointed to by the parent tenure - let parent_consensus_hash_opt = if let Some(parent_tenure) = parent_tenure_opt.as_ref() { - let tenure_start_block = parent_tenure.first().unwrap(); - Some(tenure_start_block.header.consensus_hash) - } else if let Some(parent_block) = parent_block_opt.as_ref() { - let parent_header_info = - StacksChainState::get_stacks_block_header_info_by_index_block_hash( - stacks_node.chainstate.db(), - &last_tenure_id, + let parent_consensus_hash_and_tenure_start_id_opt = + if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(( + tenure_start_block.header.consensus_hash, + tenure_start_block.block_id(), + )) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(( + parent_header_info.consensus_hash, + parent_header_info.index_block_hash(), + )) + } else { + None + }; + + let last_key = if let Some((ch, parent_tenure_start_block_id)) = + parent_consensus_hash_and_tenure_start_id_opt.clone() + { + // it's possible that the parent was a shadow block. + // if so, find the highest non-shadow ancestor's block-commit, so we can + let mut cursor = ch; + let (tenure_sn, tenure_block_commit) = loop { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) + .unwrap() + .unwrap(); + + let Some(tenure_block_commit) = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, ) - .unwrap() - .unwrap(); - Some(parent_header_info.consensus_hash) - } else { - None - }; + .unwrap() else { + // parent must be a shadow block + let header = NakamotoChainState::get_block_header_nakamoto( + stacks_node.chainstate.db(), + &parent_tenure_start_block_id, + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .cloned() + .unwrap(); + + if !header.is_shadow_block() { + panic!("Parent tenure start block ID {} has no block-commit and is not a shadow block", &parent_tenure_start_block_id); + } + + cursor = stacks_node + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) + .unwrap() + .unwrap(); + + continue; + }; + break (tenure_sn, tenure_block_commit); + }; - let last_key = if let Some(ch) = parent_consensus_hash_opt.clone() { - let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &ch) - .unwrap() - .unwrap(); - let tenure_block_commit = get_block_commit_by_txid( - sortdb.conn(), - &tenure_sn.sortition_id, - &tenure_sn.winning_block_txid, - ) - .unwrap() - .unwrap(); let tenure_leader_key = SortitionDB::get_leader_key_at( &sortdb.index_conn(), tenure_block_commit.key_block_ptr.into(), @@ -1318,6 +1446,7 @@ impl<'a> TestPeer<'a> { block_builder, |_| true, ) + .unwrap() } /// Produce and process a Nakamoto tenure, after processing the block-commit from @@ -1333,7 +1462,7 @@ impl<'a> TestPeer<'a> { miner_setup: S, block_builder: F, after_block: G, - ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + ) -> Result, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( @@ -1345,59 +1474,55 @@ impl<'a> TestPeer<'a> { G: FnMut(&mut NakamotoBlock) -> bool, { let cycle = self.get_reward_cycle(); - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut sortdb = self.sortdb.take().unwrap(); + self.with_dbs(|peer, sortdb, stacks_node, mempool| { + // Ensure the signers are setup for the current cycle + signers.generate_aggregate_key(cycle); - // Ensure the signers are setup for the current cycle - signers.generate_aggregate_key(cycle); - - let blocks = TestStacksNode::make_nakamoto_tenure_blocks( - &mut stacks_node.chainstate, - &mut sortdb, - &mut self.miner, - signers, - &tenure_change - .try_as_tenure_change() - .unwrap() - .tenure_consensus_hash - .clone(), - Some(tenure_change), - Some(coinbase), - &mut self.coord, - miner_setup, - block_builder, - after_block, - self.mine_malleablized_blocks, - self.nakamoto_parent_tenure_opt.is_none(), - ); - - let just_blocks = blocks - .clone() - .into_iter() - .map(|(block, _, _, _)| block) - .collect(); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + sortdb, + &mut peer.miner, + signers, + &tenure_change + .try_as_tenure_change() + .unwrap() + .tenure_consensus_hash + .clone(), + Some(tenure_change), + Some(coinbase), + &mut peer.coord, + miner_setup, + block_builder, + after_block, + peer.mine_malleablized_blocks, + peer.nakamoto_parent_tenure_opt.is_none(), + )?; + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _, _)| block) + .collect(); - stacks_node.add_nakamoto_tenure_blocks(just_blocks); + stacks_node.add_nakamoto_tenure_blocks(just_blocks); - let mut malleablized_blocks: Vec = blocks - .clone() - .into_iter() - .map(|(_, _, _, malleablized)| malleablized) - .flatten() - .collect(); + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .map(|(_, _, _, malleablized)| malleablized) + .flatten() + .collect(); - self.malleablized_blocks.append(&mut malleablized_blocks); + peer.malleablized_blocks.append(&mut malleablized_blocks); - let block_data = blocks - .clone() - .into_iter() - .map(|(blk, sz, cost, _)| (blk, sz, cost)) - .collect(); - - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + let block_data = blocks + .clone() + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); - block_data + Ok(block_data) + }) } /// Produce and process a Nakamoto tenure extension. @@ -1461,7 +1586,8 @@ impl<'a> TestPeer<'a> { |_| true, self.mine_malleablized_blocks, self.nakamoto_parent_tenure_opt.is_none(), - ); + ) + .unwrap(); let just_blocks = blocks .clone() @@ -1833,7 +1959,7 @@ impl<'a> TestPeer<'a> { ); let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( &mut chainstate.index_conn(), - &block.block_id(), + &block.header.parent_block_id, &sortdb.conn(), &block.header.consensus_hash, &tenure_block_commit.txid, @@ -2197,5 +2323,287 @@ impl<'a> TestPeer<'a> { ) .unwrap()); } + + // validate_shadow_parent_burnchain + // should always succeed + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + block, + &tenure_block_commit, + ) + .unwrap(); + + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .map(|hdr| hdr.is_shadow_block()) + .unwrap_or(false) + { + // test error cases + let mut bad_tenure_block_commit_vtxindex = tenure_block_commit.clone(); + bad_tenure_block_commit_vtxindex.parent_vtxindex = 1; + + let mut bad_tenure_block_commit_block_ptr = tenure_block_commit.clone(); + bad_tenure_block_commit_block_ptr.parent_block_ptr += 1; + + let mut bad_block_no_parent = block.clone(); + bad_block_no_parent.header.parent_block_id = StacksBlockId([0x11; 32]); + + // not a problem if there's no (nakamoto) parent, since the parent can be a + // (non-shadow) epoch2 block not present in the staging chainstate + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + &bad_block_no_parent, + &tenure_block_commit, + ) + .unwrap(); + + // should fail because vtxindex must be 0 + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + block, + &bad_tenure_block_commit_vtxindex, + ) + .unwrap_err() + else { + panic!("validate_shadow_parent_burnchain did not fail as expected"); + }; + + // should fail because it doesn't point to shadow tenure + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + block, + &bad_tenure_block_commit_block_ptr, + ) + .unwrap_err() + else { + panic!("validate_shadow_parent_burnchain did not fail as expected"); + }; + } + + if block.is_shadow_block() { + // block is stored + assert!(chainstate + .nakamoto_blocks_db() + .has_shadow_nakamoto_block_with_index_hash(&block.block_id()) + .unwrap()); + + // block is in a shadow tenure + assert!(chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&block.header.consensus_hash) + .unwrap()); + + // shadow tenure has a start block + assert!(chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block(&block.header.consensus_hash) + .unwrap() + .is_some()); + + // succeeds without burn + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + None, + &block, + false, + 0x80000000, + ) + .unwrap(); + + // succeeds with expected burn + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + Some(block.header.burn_spent), + &block, + false, + 0x80000000, + ) + .unwrap(); + + // fails with invalid burn + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + Some(block.header.burn_spent + 1), + &block, + false, + 0x80000000, + ) + .unwrap_err() + else { + panic!("validate_shadow_nakamoto_block_burnchain succeeded when it shouldn't have"); + }; + + // block must be stored alreay + let mut bad_block = block.clone(); + bad_block.header.version += 1; + + // fails because block_id() isn't present + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + None, + &bad_block, + false, + 0x80000000, + ) + .unwrap_err() + else { + panic!("validate_shadow_nakamoto_block_burnchain succeeded when it shouldn't have"); + }; + + // VRF proof must be present + assert!(NakamotoChainState::get_shadow_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .is_some()); + } else { + // not a shadow block + assert!(!chainstate + .nakamoto_blocks_db() + .has_shadow_nakamoto_block_with_index_hash(&block.block_id()) + .unwrap()); + assert!(!chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&block.header.consensus_hash) + .unwrap()); + assert!(chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block(&block.header.consensus_hash) + .unwrap() + .is_none()); + assert!(NakamotoChainState::get_shadow_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .is_none()); + } + } + + /// Add a shadow tenure on a given tip. + /// * Advance the burnchain and create an empty sortition (so we have a new consensus hash) + /// * Generate a shadow block for the empty sortition + /// * Store the shadow block to the staging DB + /// * Process it + /// + /// Tests: + /// * NakamotoBlockHeader::get_shadow_signer_weight() + pub fn make_shadow_tenure(&mut self, tip: Option) -> NakamotoBlock { + let naka_tip_id = tip.unwrap_or(self.network.stacks_tip.block_id()); + let (_, _, tenure_id_consensus_hash) = self.next_burnchain_block(vec![]); + + test_debug!( + "\n\nMake shadow tenure for tenure {} off of tip {}\n\n", + &tenure_id_consensus_hash, + &naka_tip_id + ); + + let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.sortdb.take().unwrap(); + + let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( + &mut stacks_node.chainstate, + &sortdb, + naka_tip_id, + tenure_id_consensus_hash, + vec![], + ) + .unwrap(); + + // Get the reward set + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let reward_set = load_nakamoto_reward_set( + self.miner + .burnchain + .block_height_to_reward_cycle(sort_tip_sn.block_height) + .expect("FATAL: no reward cycle for sortition"), + &sort_tip_sn.sortition_id, + &self.miner.burnchain, + &mut stacks_node.chainstate, + &shadow_block.header.parent_block_id, + &sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to load reward set") + .expect("Expected a reward set") + .0 + .known_selected_anchor_block_owned() + .expect("Unknown reward set"); + + // check signer weight + let mut max_signing_weight = 0; + for signer in reward_set.signers.as_ref().unwrap().iter() { + max_signing_weight += signer.weight; + } + + assert_eq!( + shadow_block + .header + .get_shadow_signer_weight(&reward_set) + .unwrap(), + max_signing_weight + ); + + // put it into Stacks staging DB + let tx = stacks_node.chainstate.staging_db_tx_begin().unwrap(); + tx.add_shadow_block(&shadow_block).unwrap(); + + // inserts of the same block are idempotent + tx.add_shadow_block(&shadow_block).unwrap(); + + tx.commit().unwrap(); + + let rollback_tx = stacks_node.chainstate.staging_db_tx_begin().unwrap(); + + if let Some(normal_tenure) = rollback_tx.conn().get_any_normal_tenure().unwrap() { + // can't insert into a non-shadow tenure + let mut bad_shadow_block_tenure = shadow_block.clone(); + bad_shadow_block_tenure.header.consensus_hash = normal_tenure; + + let ChainstateError::InvalidStacksBlock(_) = rollback_tx + .add_shadow_block(&bad_shadow_block_tenure) + .unwrap_err() + else { + panic!("add_shadow_block succeeded when it should have failed"); + }; + } + + // can't insert into the same height twice with different blocks + let mut bad_shadow_block_height = shadow_block.clone(); + bad_shadow_block_height.header.version += 1; + let ChainstateError::InvalidStacksBlock(_) = rollback_tx + .add_shadow_block(&bad_shadow_block_height) + .unwrap_err() + else { + panic!("add_shadow_block succeeded when it should have failed"); + }; + + drop(rollback_tx); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + + // process it + self.coord.handle_new_nakamoto_stacks_block().unwrap(); + + // verify that it processed + self.refresh_burnchain_view(); + assert_eq!(self.network.stacks_tip.block_id(), shadow_block.block_id()); + + shadow_block } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 7ae25d00f6..64782c67d6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1039,7 +1039,7 @@ fn test_simple_pox_lockup_transition_pox_2() { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == key_to_stacks_addr(&charlie) { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); @@ -1385,7 +1385,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == charlie_address { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index dc65db0324..8a173c6adc 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -459,7 +459,7 @@ fn simple_pox_lockup_transition_pox_2() { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == key_to_stacks_addr(&charlie) { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 7c81410e87..bf84cc1362 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -856,6 +856,14 @@ impl StacksChainState { burn_total ); + // in the case of shadow blocks, there will be zero burns. + // the coinbase is still generated, but it's rendered unspendable + let (this_burn_total, burn_total) = if burn_total == 0 { + (1, 1) + } else { + (this_burn_total, burn_total) + }; + // each participant gets a share of the coinbase proportional to the fraction it burned out // of all participants' burns. let coinbase_reward = participant @@ -1184,7 +1192,7 @@ mod test { new_tip.microblock_tail.clone(), &block_reward, None, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, 123, false, vec![], diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 04f772da02..233a9d5978 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -3612,41 +3612,13 @@ impl StacksChainState { } /// Get the coinbase at this burn block height, in microSTX - pub fn get_coinbase_reward(burn_block_height: u64, first_burn_block_height: u64) -> u128 { - /* - From https://forum.stacks.org/t/pox-consensus-and-stx-future-supply - - """ - - 1000 STX for years 0-4 - 500 STX for years 4-8 - 250 STX for years 8-12 - 125 STX in perpetuity - - - From the Token Whitepaper: - - We expect that once native mining goes live, approximately 4383 blocks will be pro- - cessed per month, or approximately 52,596 blocks will be processed per year. - - """ - */ - // this is saturating subtraction for the initial reward calculation - // where we are computing the coinbase reward for blocks that occur *before* - // the `first_burn_block_height` - let effective_ht = burn_block_height.saturating_sub(first_burn_block_height); - let blocks_per_year = 52596; - let stx_reward = if effective_ht < blocks_per_year * 4 { - 1000 - } else if effective_ht < blocks_per_year * 8 { - 500 - } else if effective_ht < blocks_per_year * 12 { - 250 - } else { - 125 - }; - - stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) + pub fn get_coinbase_reward( + epoch: StacksEpochId, + mainnet: bool, + burn_block_height: u64, + first_burn_block_height: u64, + ) -> u128 { + epoch.coinbase_reward(mainnet, first_burn_block_height, burn_block_height) } /// Create the block reward. @@ -4132,7 +4104,12 @@ impl StacksChainState { current_epoch = StacksEpochId::Epoch30; } StacksEpochId::Epoch30 => { - panic!("No defined transition from Epoch30 forward") + // no special initialization is needed, since only the coinbase emission + // schedule is changing. + current_epoch = StacksEpochId::Epoch31; + } + StacksEpochId::Epoch31 => { + panic!("No defined transition from Epoch31 forward") } } } @@ -4327,7 +4304,7 @@ impl StacksChainState { post_condition_aborted: false, stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -4942,8 +4919,7 @@ impl StacksChainState { )?; Ok((stack_ops, transfer_ops, delegate_ops, vec![])) } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - // TODO: sbtc ops in epoch 3.0 + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, @@ -5033,7 +5009,7 @@ impl StacksChainState { pox_reward_cycle, pox_start_cycle_info, ), - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 => { Self::handle_pox_cycle_start_pox_4( clarity_tx, pox_reward_cycle, @@ -5131,7 +5107,7 @@ impl StacksChainState { ); cost } else { - ExecutionCost::zero() + ExecutionCost::ZERO }; let mut clarity_tx = StacksChainState::chainstate_block_begin( @@ -5218,7 +5194,7 @@ impl StacksChainState { // if we get here, then we need to reset the block-cost back to 0 since this begins the // epoch defined by this miner. - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); // is this stacks block the first of a new epoch? let (applied_epoch_transition, mut tx_receipts) = @@ -5758,6 +5734,8 @@ impl StacksChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( + evaluated_epoch, + mainnet, u64::from(chain_tip_burn_header_height), burn_dbconn.context.first_block_height, ); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6b6f523f88..ffdea5a7dd 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -307,6 +307,7 @@ impl DBConfig { StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch31 => version_u32 >= 3 && version_u32 <= 8, } } } @@ -1623,7 +1624,7 @@ impl StacksChainState { allocations_tx, allocation_events, Value::okay_true(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ); receipts.push(allocations_receipt); @@ -1724,7 +1725,7 @@ impl StacksChainState { &mut tx, &parent_hash, &first_tip_info, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, 0, )?; tx.commit()?; @@ -2902,7 +2903,7 @@ pub mod test { // Just update the expected value assert_eq!( genesis_root_hash.to_string(), - "c771616ff6acb710051238c9f4a3c48020a6d70cda637d34b89f2311a7e27886" + "0eb3076f0635ccdfcdc048afb8dea9048c5180a2e2b2952874af1d18f06321e8" ); } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e9de9139a2..3df99ea886 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -192,7 +192,7 @@ impl StacksTransactionReceipt { result: Value::okay_true(), stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -307,7 +307,7 @@ impl StacksTransactionReceipt { result: Value::okay_true(), stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -8725,6 +8725,7 @@ pub mod test { StacksEpochId::Epoch24 => self.get_stacks_epoch(5), StacksEpochId::Epoch25 => self.get_stacks_epoch(6), StacksEpochId::Epoch30 => self.get_stacks_epoch(7), + StacksEpochId::Epoch31 => self.get_stacks_epoch(8), } } fn get_pox_payout_addrs( diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 7da2ff1599..6f7a9fe9ea 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -254,7 +254,7 @@ impl UnconfirmedState { let mut total_burns = 0; let mut all_receipts = vec![]; let mut mined_txs = UnconfirmedTxMap::new(); - let mut new_cost = ExecutionCost::zero(); + let mut new_cost = ExecutionCost::ZERO; let mut new_bytes = 0; let mut num_new_mblocks = 0; let mut have_state = self.have_state; @@ -351,7 +351,7 @@ impl UnconfirmedState { // apply injected faults if self.disable_cost_check { warn!("Fault injection: disabling microblock miner's cost tracking"); - self.cost_so_far = ExecutionCost::zero(); + self.cost_so_far = ExecutionCost::ZERO; } if self.disable_bytes_check { warn!("Fault injection: disabling microblock miner's size tracking"); @@ -709,7 +709,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, @@ -946,7 +946,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, @@ -1205,7 +1205,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, diff --git a/stackslib/src/chainstate/stacks/index/bits.rs b/stackslib/src/chainstate/stacks/index/bits.rs index e212b03299..6397cee3a3 100644 --- a/stackslib/src/chainstate/stacks/index/bits.rs +++ b/stackslib/src/chainstate/stacks/index/bits.rs @@ -29,7 +29,7 @@ use stacks_common::util::macros::is_trace; use crate::chainstate::stacks::index::node::{ clear_backptr, ConsensusSerializable, TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, - TrieNodeID, TrieNodeType, TriePtr, TRIEPATH_MAX_LEN, TRIEPTR_SIZE, + TrieNodeID, TrieNodeType, TriePtr, TRIEPTR_SIZE, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::{BlockMap, Error, MarfTrieId, TrieLeaf}; @@ -55,15 +55,15 @@ pub fn path_from_bytes(r: &mut R) -> Result, Error> { } })?; - if lenbuf[0] as usize > TRIEPATH_MAX_LEN { + if lenbuf[0] as usize > TRIEHASH_ENCODED_SIZE { trace!( "Path length is {} (expected <= {})", lenbuf[0], - TRIEPATH_MAX_LEN + TRIEHASH_ENCODED_SIZE ); return Err(Error::CorruptionError(format!( "Node path is longer than {} bytes (got {})", - TRIEPATH_MAX_LEN, lenbuf[0] + TRIEHASH_ENCODED_SIZE, lenbuf[0] ))); } @@ -326,7 +326,7 @@ pub fn read_nodetype_at_head_nohash( /// node hash id ptrs & ptr data path /// /// X is fixed and determined by the TrieNodeType variant. -/// Y is variable, but no more than TriePath::len(). +/// Y is variable, but no more than TrieHash::len(). /// /// If `read_hash` is false, then the contents of the node hash are undefined. fn inner_read_nodetype_at_head( diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index 7f92efdd8b..7547fd6d80 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -40,7 +40,7 @@ use crate::chainstate::stacks::index::bits::{ }; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::{trie_sql, ClarityMarfTrieId, Error, MarfTrieId, TrieLeaf}; use crate::util_lib::db::{ @@ -420,7 +420,7 @@ pub mod test { } } else { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -443,7 +443,7 @@ pub mod test { for (i, block_data) in data.iter().enumerate() { test_debug!("Read block {}", i); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let read_time = SystemTime::now(); diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 4123b1310a..5a7da69e52 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -42,7 +42,7 @@ use crate::chainstate::stacks::index::bits::{ }; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{NodeHashReader, TrieStorageConnection}; use crate::chainstate::stacks::index::{trie_sql, ClarityMarfTrieId, Error, MarfTrieId, TrieLeaf}; diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index d5dd77c51f..a4082627fd 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -28,14 +28,14 @@ use stacks_common::util::log; use crate::chainstate::stacks::index::bits::{get_leaf_hash, get_node_hash, read_root_hash}; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, CursorError, TrieCursor, TrieNode, TrieNode16, - TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, TRIEPTR_SIZE, + TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, TRIEPTR_SIZE, }; use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, TrieStorageTransaction, }; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHashExtension, TrieLeaf, TrieMerkleProof, + ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof, }; use crate::util_lib::db::Error as db_error; @@ -122,11 +122,39 @@ pub trait MarfConnection { fn sqlite_conn(&self) -> &Connection; + /// Get and check a value against get_from_hash + /// (test only) + #[cfg(test)] + fn get_and_check_with_hash(&mut self, block_hash: &T, key: &str) { + let res = self.with_conn(|c| MARF::get_by_key(c, block_hash, key)); + let res_with_hash = + self.with_conn(|c| MARF::get_by_hash(c, block_hash, &TrieHash::from_key(key))); + match (res, res_with_hash) { + (Ok(Some(x)), Ok(Some(y))) => { + assert_eq!(x, y); + } + (Ok(None), Ok(None)) => {} + (Err(_), Err(_)) => {} + (x, y) => { + panic!("Inconsistency: {x:?} != {y:?}"); + } + } + } + + #[cfg(not(test))] + fn get_and_check_with_hash(&mut self, _block_hash: &T, _key: &str) {} + /// Resolve a key from the MARF to a MARFValue with respect to the given block height. fn get(&mut self, block_hash: &T, key: &str) -> Result, Error> { + self.get_and_check_with_hash(block_hash, key); self.with_conn(|c| MARF::get_by_key(c, block_hash, key)) } + /// Resolve a TrieHash from the MARF to a MARFValue with respect to the given block height. + fn get_from_hash(&mut self, block_hash: &T, th: &TrieHash) -> Result, Error> { + self.with_conn(|c| MARF::get_by_hash(c, block_hash, th)) + } + fn get_with_proof( &mut self, block_hash: &T, @@ -142,6 +170,21 @@ pub trait MarfConnection { }) } + fn get_with_proof_from_hash( + &mut self, + block_hash: &T, + hash: &TrieHash, + ) -> Result)>, Error> { + self.with_conn(|conn| { + let marf_value = match MARF::get_by_path(conn, block_hash, hash)? { + None => return Ok(None), + Some(x) => x, + }; + let proof = TrieMerkleProof::from_path(conn, hash, &marf_value, block_hash)?; + Ok(Some((marf_value, proof))) + }) + } + fn get_block_at_height(&mut self, height: u32, tip: &T) -> Result, Error> { self.with_conn(|c| MARF::get_block_at_height(c, height, tip)) } @@ -781,7 +824,7 @@ impl MARF { fn walk_cow( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { let block_id = storage.get_block_identifier(block_hash); MARF::extend_trie(storage, block_hash)?; @@ -886,7 +929,7 @@ impl MARF { fn walk( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result<(TrieCursor, TrieNodeType), Error> { storage.open_block(block_hash)?; @@ -994,7 +1037,7 @@ impl MARF { pub fn get_path( storage: &mut TrieStorageConnection, block_hash: &T, - path: &TriePath, + path: &TrieHash, ) -> Result, Error> { trace!("MARF::get_path({:?}) {:?}", block_hash, path); @@ -1045,7 +1088,7 @@ impl MARF { fn do_insert_leaf( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, leaf_value: &TrieLeaf, update_skiplist: bool, ) -> Result<(), Error> { @@ -1076,7 +1119,7 @@ impl MARF { pub fn insert_leaf( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, value: &TrieLeaf, ) -> Result<(), Error> { if storage.readonly() { @@ -1089,7 +1132,7 @@ impl MARF { pub fn insert_leaf_in_batch( storage: &mut TrieStorageTransaction, block_hash: &T, - path: &TriePath, + path: &TrieHash, value: &TrieLeaf, ) -> Result<(), Error> { if storage.readonly() { @@ -1123,6 +1166,35 @@ impl MARF { Ok(MARF::from_storage(file_storage)) } + pub fn get_by_path( + storage: &mut TrieStorageConnection, + block_hash: &T, + path: &TrieHash, + ) -> Result, Error> { + let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); + + let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }); + + // restore + storage + .open_block_maybe_id(&cur_block_hash, cur_block_id) + .map_err(|e| { + warn!( + "Failed to re-open {} {:?}: {:?}", + &cur_block_hash, cur_block_id, &e + ); + warn!("Result of failed path lookup '{}': {:?}", path, &result); + e + })?; + + result.map(|option_result| option_result.map(|leaf| leaf.data)) + } + + /// Load up a MARF value by key, given a handle to the storage connection and a tip to work off + /// of. pub fn get_by_key( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1130,7 +1202,7 @@ impl MARF { ) -> Result, Error> { let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { Error::NotFoundError => Ok(None), @@ -1152,6 +1224,35 @@ impl MARF { result.map(|option_result| option_result.map(|leaf| leaf.data)) } + /// Load up a MARF value by TrieHash, given a handle to the storage connection and a tip to + /// work off of. + pub fn get_by_hash( + storage: &mut TrieStorageConnection, + block_hash: &T, + path: &TrieHash, + ) -> Result, Error> { + let (cur_block_hash, cur_block_id) = storage.get_cur_block_and_id(); + + let result = MARF::get_path(storage, block_hash, &path).or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }); + + // restore + storage + .open_block_maybe_id(&cur_block_hash, cur_block_id) + .map_err(|e| { + warn!( + "Failed to re-open {} {:?}: {:?}", + &cur_block_hash, cur_block_id, &e + ); + warn!("Result of failed hash lookup '{}': {:?}", path, &result); + e + })?; + + result.map(|option_result| option_result.map(|leaf| leaf.data)) + } + pub fn get_block_height_miner_tip( storage: &mut TrieStorageConnection, block_hash: &T, @@ -1262,7 +1363,7 @@ impl MARF { .zip(values[0..last].iter()) .try_for_each(|((index, key), value)| { let marf_leaf = TrieLeaf::from_value(&[], value.clone()); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); if eta_enabled { let updated_progress = 100 * index / last; @@ -1280,7 +1381,7 @@ impl MARF { if result.is_ok() { // last insert updates the root with the skiplist hash let marf_leaf = TrieLeaf::from_value(&[], values[last].clone()); - let path = TriePath::from_key(&keys[last]); + let path = TrieHash::from_key(&keys[last]); result = MARF::insert_leaf(conn, block_hash, &path, &marf_leaf); } @@ -1320,6 +1421,20 @@ impl MARF { Ok(Some((marf_value, proof))) } + pub fn get_with_proof_from_hash( + &mut self, + block_hash: &T, + path: &TrieHash, + ) -> Result)>, Error> { + let mut conn = self.storage.connection(); + let marf_value = match MARF::get_by_path(&mut conn, block_hash, &path)? { + None => return Ok(None), + Some(x) => x, + }; + let proof = TrieMerkleProof::from_path(&mut conn, &path, &marf_value, block_hash)?; + Ok(Some((marf_value, proof))) + } + pub fn get_bhh_at_height(&mut self, block_hash: &T, height: u32) -> Result, Error> { MARF::get_block_at_height(&mut self.storage.connection(), height, block_hash) } @@ -1356,14 +1471,14 @@ impl MARF { return Err(Error::ReadOnlyError); } let marf_leaf = TrieLeaf::from_value(&[], value); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); self.insert_raw(path, marf_leaf) } /// Insert the given (key, value) pair into the MARF. Inserting the same key twice silently /// overwrites the existing key. Succeeds if there are no storage errors. /// Must be called after a call to .begin() (will fail otherwise) - pub fn insert_raw(&mut self, path: TriePath, marf_leaf: TrieLeaf) -> Result<(), Error> { + pub fn insert_raw(&mut self, path: TrieHash, marf_leaf: TrieLeaf) -> Result<(), Error> { if self.storage.readonly() { return Err(Error::ReadOnlyError); } diff --git a/stackslib/src/chainstate/stacks/index/mod.rs b/stackslib/src/chainstate/stacks/index/mod.rs index eb082747c5..9fee7ab2d6 100644 --- a/stackslib/src/chainstate/stacks/index/mod.rs +++ b/stackslib/src/chainstate/stacks/index/mod.rs @@ -151,71 +151,6 @@ impl MarfTrieId for BurnchainHeaderHash {} #[cfg(test)] impl MarfTrieId for BlockHeaderHash {} -pub trait TrieHashExtension { - fn from_empty_data() -> TrieHash; - fn from_data(data: &[u8]) -> TrieHash; - fn from_data_array>(data: &[B]) -> TrieHash; - fn to_string(&self) -> String; -} - -impl TrieHashExtension for TrieHash { - /// TrieHash of zero bytes - fn from_empty_data() -> TrieHash { - // sha2-512/256 hash of empty string. - // this is used so frequently it helps performance if we just have a constant for it. - TrieHash([ - 0xc6, 0x72, 0xb8, 0xd1, 0xef, 0x56, 0xed, 0x28, 0xab, 0x87, 0xc3, 0x62, 0x2c, 0x51, - 0x14, 0x06, 0x9b, 0xdd, 0x3a, 0xd7, 0xb8, 0xf9, 0x73, 0x74, 0x98, 0xd0, 0xc0, 0x1e, - 0xce, 0xf0, 0x96, 0x7a, - ]) - } - - /// TrieHash from bytes - fn from_data(data: &[u8]) -> TrieHash { - if data.len() == 0 { - return TrieHash::from_empty_data(); - } - - let mut tmp = [0u8; 32]; - - let mut hasher = TrieHasher::new(); - hasher.update(data); - tmp.copy_from_slice(hasher.finalize().as_slice()); - - TrieHash(tmp) - } - - fn from_data_array>(data: &[B]) -> TrieHash { - if data.len() == 0 { - return TrieHash::from_empty_data(); - } - - let mut tmp = [0u8; 32]; - - let mut hasher = TrieHasher::new(); - - for item in data.iter() { - hasher.update(item); - } - tmp.copy_from_slice(hasher.finalize().as_slice()); - TrieHash(tmp) - } - - /// Convert to a String that can be used in e.g. sqlite - fn to_string(&self) -> String { - let s = format!("{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", - self.0[0], self.0[1], self.0[2], self.0[3], - self.0[4], self.0[5], self.0[6], self.0[7], - self.0[8], self.0[9], self.0[10], self.0[11], - self.0[12], self.0[13], self.0[14], self.0[15], - self.0[16], self.0[17], self.0[18], self.0[19], - self.0[20], self.0[21], self.0[22], self.0[23], - self.0[24], self.0[25], self.0[26], self.0[27], - self.0[28], self.0[29], self.0[30], self.0[31]); - s - } -} - /// Structure that holds the actual data in a MARF leaf node. /// It only stores the hash of some value string, but we add 8 extra bytes for future extensions. /// If not used (the rule today), then they should all be 0. diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 19e8aa327f..da9fc8bbd2 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -32,8 +32,8 @@ use crate::chainstate::stacks::index::bits::{ get_path_byte_len, get_ptrs_byte_len, path_from_bytes, ptrs_from_bytes, write_path_to_bytes, }; use crate::chainstate::stacks::index::{ - BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHashExtension, TrieHasher, - TrieLeaf, MARF_VALUE_ENCODED_SIZE, + BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, TrieHasher, TrieLeaf, + MARF_VALUE_ENCODED_SIZE, }; #[derive(Debug, Clone, PartialEq)] @@ -106,23 +106,6 @@ fn ptrs_consensus_hash( Ok(()) } -/// A path in the Trie is the SHA2-512/256 hash of its key. -pub struct TriePath([u8; 32]); -impl_array_newtype!(TriePath, u8, 32); -impl_array_hexstring_fmt!(TriePath); -impl_byte_array_newtype!(TriePath, u8, 32); - -pub const TRIEPATH_MAX_LEN: usize = 32; - -impl TriePath { - pub fn from_key(k: &str) -> TriePath { - let h = TrieHash::from_data(k.as_bytes()); - let mut hb = [0u8; TRIEPATH_MAX_LEN]; - hb.copy_from_slice(h.as_bytes()); - TriePath(hb) - } -} - /// All Trie nodes implement the following methods: pub trait TrieNode { /// Node ID for encoding/decoding @@ -339,7 +322,7 @@ impl TriePtr { /// nodes to visit when updating the root node hash. #[derive(Debug, Clone, PartialEq)] pub struct TrieCursor { - pub path: TriePath, // the path to walk + pub path: TrieHash, // the path to walk pub index: usize, // index into the path pub node_path_index: usize, // index into the currently-visited node's compressed path pub nodes: Vec, // list of nodes this cursor visits @@ -349,7 +332,7 @@ pub struct TrieCursor { } impl TrieCursor { - pub fn new(path: &TriePath, root_ptr: TriePtr) -> TrieCursor { + pub fn new(path: &TrieHash, root_ptr: TriePtr) -> TrieCursor { TrieCursor { path: path.clone(), index: 0, diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 815def9c91..85e91ebefb 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -35,14 +35,13 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::marf::MARF; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, ConsensusSerializable, CursorError, TrieCursor, - TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePath, - TriePtr, + TrieNode, TrieNode16, TrieNode256, TrieNode4, TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ BlockMap, ClarityMarfTrieId, Error, MARFValue, MarfTrieId, ProofTrieNode, ProofTriePtr, - TrieHashExtension, TrieLeaf, TrieMerkleProof, TrieMerkleProofType, + TrieLeaf, TrieMerkleProof, TrieMerkleProofType, }; impl ConsensusSerializable<()> for ProofTrieNode { @@ -1004,7 +1003,7 @@ impl TrieMerkleProof { /// * segment proof i+1 must be a prefix of segment proof i /// * segment proof 0 must end in a leaf /// * all segment proofs must end in a Node256 (a root) - fn is_proof_well_formed(proof: &Vec>, expected_path: &TriePath) -> bool { + fn is_proof_well_formed(proof: &Vec>, expected_path: &TrieHash) -> bool { if proof.len() == 0 { trace!("Proof is empty"); return false; @@ -1048,7 +1047,7 @@ impl TrieMerkleProof { } }; - // first path bytes must be the expected TriePath + // first path bytes must be the expected TrieHash if expected_path.as_bytes().to_vec() != path_bytes { trace!( "Invalid proof -- path bytes {:?} differs from the expected path {:?}", @@ -1121,7 +1120,7 @@ impl TrieMerkleProof { /// NOTE: Trie root hashes are globally unique by design, even if they represent the same contents, so the root_to_block map is bijective with high probability. pub fn verify_proof( proof: &Vec>, - path: &TriePath, + path: &TrieHash, value: &MARFValue, root_hash: &TrieHash, root_to_block: &HashMap, @@ -1351,7 +1350,7 @@ impl TrieMerkleProof { /// Verify this proof pub fn verify( &self, - path: &TriePath, + path: &TrieHash, marf_value: &MARFValue, root_hash: &TrieHash, root_to_block: &HashMap, @@ -1362,7 +1361,7 @@ impl TrieMerkleProof { /// Walk down the trie pointed to by s until we reach a backptr or a leaf fn walk_to_leaf_or_backptr( storage: &mut TrieStorageConnection, - path: &TriePath, + path: &TrieHash, ) -> Result<(TrieCursor, TrieNodeType, TriePtr), Error> { trace!( "Walk path {:?} from {:?} to the first backptr", @@ -1438,7 +1437,7 @@ impl TrieMerkleProof { /// If the path doesn't resolve, return an error (NotFoundError) pub fn from_path( storage: &mut TrieStorageConnection, - path: &TriePath, + path: &TrieHash, expected_value: &MARFValue, root_block_header: &T, ) -> Result, Error> { @@ -1562,7 +1561,7 @@ impl TrieMerkleProof { root_block_header: &T, ) -> Result, Error> { let marf_value = MARFValue::from_value(value); - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); TrieMerkleProof::from_path(storage, &path, &marf_value, root_block_header) } @@ -1572,7 +1571,7 @@ impl TrieMerkleProof { value: &MARFValue, root_block_header: &T, ) -> Result, Error> { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); TrieMerkleProof::from_path(storage, &path, value, root_block_header) } } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 6994c7ad05..6e7ca815c9 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -46,13 +46,12 @@ use crate::chainstate::stacks::index::file::{TrieFile, TrieFileNodeHashReader}; use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::profile::TrieBenchmark; use crate::chainstate::stacks::index::trie::Trie; use crate::chainstate::stacks::index::{ - trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHashExtension, TrieHasher, - TrieLeaf, + trie_sql, BlockMap, ClarityMarfTrieId, Error, MarfTrieId, TrieHasher, TrieLeaf, }; use crate::util_lib::db::{ sql_pragma, sqlite_open, tx_begin_immediate, tx_busy_handler, Error as db_error, diff --git a/stackslib/src/chainstate/stacks/index/test/cache.rs b/stackslib/src/chainstate/stacks/index/test/cache.rs index 5a0bc41d00..1abd0e741a 100644 --- a/stackslib/src/chainstate/stacks/index/test/cache.rs +++ b/stackslib/src/chainstate/stacks/index/test/cache.rs @@ -105,7 +105,7 @@ fn test_marf_with_cache( } } else { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -128,7 +128,7 @@ fn test_marf_with_cache( for (i, block_data) in data.iter().enumerate() { test_debug!("Read block {}", i); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let read_time = SystemTime::now(); diff --git a/stackslib/src/chainstate/stacks/index/test/file.rs b/stackslib/src/chainstate/stacks/index/test/file.rs index 499198aca5..19ac5e60e4 100644 --- a/stackslib/src/chainstate/stacks/index/test/file.rs +++ b/stackslib/src/chainstate/stacks/index/test/file.rs @@ -106,7 +106,7 @@ fn test_migrate_existing_trie_blobs() { marf.begin(&last_block_header, &block_header).unwrap(); for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let leaf = TrieLeaf::from_value(&vec![], value.clone()); marf.insert_raw(path, leaf).unwrap(); } @@ -147,7 +147,7 @@ fn test_migrate_existing_trie_blobs() { // verify that we can read everything from the blobs for (i, block_data) in data.iter().enumerate() { for (key, value) in block_data.iter() { - let path = TriePath::from_key(key); + let path = TrieHash::from_key(key); let marf_leaf = TrieLeaf::from_value(&vec![], value.clone()); let leaf = MARF::get_path( diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index b66fc4dd8a..e7535e9553 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -32,9 +32,7 @@ use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::test::*; use crate::chainstate::stacks::index::trie::*; -use crate::chainstate::stacks::index::{ - ClarityMarfTrieId, Error, MARFValue, TrieHashExtension, TrieLeaf, -}; +use crate::chainstate::stacks::index::{ClarityMarfTrieId, Error, MARFValue, TrieLeaf}; #[test] fn marf_insert_different_leaf_same_block_100() { @@ -52,7 +50,7 @@ fn marf_insert_different_leaf_same_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); @@ -117,7 +115,7 @@ fn marf_insert_different_leaf_different_path_different_block_100() { marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &block_header) .unwrap(); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -140,7 +138,7 @@ fn marf_insert_different_leaf_different_path_different_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, i as u8, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); let leaf = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &path) @@ -189,7 +187,7 @@ fn marf_insert_same_leaf_different_block_100() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); for i in 0..100 { let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); @@ -197,7 +195,7 @@ fn marf_insert_same_leaf_different_block_100() { marf.commit().unwrap(); marf.begin(&BlockHeaderHash::sentinel(), &next_block_header) .unwrap(); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); marf.insert_raw(path, value).unwrap(); } @@ -271,7 +269,7 @@ fn marf_insert_leaf_sequence_2() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let prior_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); let next_block_header = BlockHeaderHash::from_bytes(&[i + 1 as u8; 32]).unwrap(); marf.commit().unwrap(); @@ -294,7 +292,7 @@ fn marf_insert_leaf_sequence_2() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); let leaf = MARF::get_path( @@ -348,7 +346,7 @@ fn marf_insert_leaf_sequence_100() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); marf.commit().unwrap(); let next_block_header = BlockHeaderHash::from_bytes(&[i as u8; 32]).unwrap(); @@ -372,7 +370,7 @@ fn marf_insert_leaf_sequence_100() { i as u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); eprintln!("Finding value inserted at {}", &next_block_header); @@ -567,7 +565,7 @@ where let next_path = path_gen(i, path.clone()); - let triepath = TriePath::from_bytes(&next_path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&next_path[..]).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); debug!("----------------"); @@ -582,7 +580,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, - &TriePath::from_bytes(&next_path[..]).unwrap(), + &TrieHash::from_bytes(&next_path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -603,7 +601,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &next_block_header, - &TriePath::from_bytes(&prev_path[..]).unwrap(), + &TrieHash::from_bytes(&prev_path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -675,7 +673,7 @@ where // add a leaf at the end of the path let next_path = path_gen(i, path.clone()); - let triepath = TriePath::from_bytes(&next_path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&next_path[..]).unwrap(); let value = MARFValue([i as u8; 40]); assert_eq!( @@ -847,7 +845,7 @@ fn marf_merkle_verify_backptrs() { marf.commit().unwrap(); marf.begin(&block_header_1, &block_header_2).unwrap(); marf.insert_raw( - TriePath::from_bytes(&path_2[..]).unwrap(), + TrieHash::from_bytes(&path_2[..]).unwrap(), TrieLeaf::new(&vec![], &[20 as u8; 40].to_vec()), ) .unwrap(); @@ -865,7 +863,7 @@ fn marf_merkle_verify_backptrs() { marf.commit().unwrap(); marf.begin(&block_header_2, &block_header_3).unwrap(); marf.insert_raw( - TriePath::from_bytes(&path_3[..]).unwrap(), + TrieHash::from_bytes(&path_3[..]).unwrap(), TrieLeaf::new(&vec![], &[21 as u8; 40].to_vec()), ) .unwrap(); @@ -922,7 +920,7 @@ where let (path, next_block_header) = path_gen(i); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -944,7 +942,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -998,7 +996,7 @@ where let i1 = i % 256; let (path, _next_block_header) = path_gen(i); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1011,7 +1009,7 @@ where let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -1139,7 +1137,7 @@ fn marf_split_leaf_path() { .unwrap(); let path = [0u8; 32]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new(&vec![], &[0u8; 40].to_vec()); debug!("----------------"); @@ -1161,7 +1159,7 @@ fn marf_split_leaf_path() { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_2 = TrieLeaf::new(&vec![], &[1u8; 40].to_vec()); debug!("----------------"); @@ -1602,7 +1600,7 @@ fn marf_read_random_1048576_4096_file_storage() { let path = TrieHash::from_data(&seed[..]).as_bytes()[0..32].to_vec(); seed = path.clone(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1615,7 +1613,7 @@ fn marf_read_random_1048576_4096_file_storage() { let read_value = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -1896,7 +1894,7 @@ fn marf_insert_flush_to_different_block() { None }; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1919,7 +1917,7 @@ fn marf_insert_flush_to_different_block() { let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &target_block, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -2017,7 +2015,7 @@ fn marf_insert_flush_to_different_block() { 24, 25, 26, 27, 28, 29, i0 as u8, i1 as u8, ]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -2037,7 +2035,7 @@ fn marf_insert_flush_to_different_block() { let read_value = MARF::get_path( &mut marf.borrow_storage_backend(), &read_from_block, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ) .unwrap() .unwrap(); @@ -2074,7 +2072,7 @@ fn test_marf_read_only() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let leaf = TrieLeaf::new( &vec![], &[ @@ -2138,13 +2136,13 @@ fn test_marf_begin_from_sentinel_twice() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_1 = TriePath::from_bytes(&path_1[..]).unwrap(); + let triepath_1 = TrieHash::from_bytes(&path_1[..]).unwrap(); let path_2 = [ 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_1 = TrieLeaf::new(&vec![], &vec![1u8; 40]); let value_2 = TrieLeaf::new(&vec![], &vec![2u8; 40]); @@ -2210,14 +2208,14 @@ fn test_marf_unconfirmed() { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_1 = TriePath::from_bytes(&path_1[..]).unwrap(); + let triepath_1 = TrieHash::from_bytes(&path_1[..]).unwrap(); let value_1 = TrieLeaf::new(&vec![], &vec![1u8; 40]); let path_2 = [ 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]; - let triepath_2 = TriePath::from_bytes(&path_2[..]).unwrap(); + let triepath_2 = TrieHash::from_bytes(&path_2[..]).unwrap(); let value_2 = TrieLeaf::new(&vec![], &vec![2u8; 40]); let block_header = StacksBlockId([0x33u8; 32]); diff --git a/stackslib/src/chainstate/stacks/index/test/mod.rs b/stackslib/src/chainstate/stacks/index/test/mod.rs index 2c3b04698c..0ccdffa78b 100644 --- a/stackslib/src/chainstate/stacks/index/test/mod.rs +++ b/stackslib/src/chainstate/stacks/index/test/mod.rs @@ -31,9 +31,7 @@ use crate::chainstate::stacks::index::node::*; use crate::chainstate::stacks::index::proofs::*; use crate::chainstate::stacks::index::storage::*; use crate::chainstate::stacks::index::trie::*; -use crate::chainstate::stacks::index::{ - MARFValue, MarfTrieId, TrieHashExtension, TrieLeaf, TrieMerkleProof, -}; +use crate::chainstate::stacks::index::{MARFValue, MarfTrieId, TrieLeaf, TrieMerkleProof}; use crate::chainstate::stacks::{BlockHeaderHash, TrieHash}; pub mod cache; @@ -108,7 +106,7 @@ pub fn merkle_test( value: &Vec, ) -> () { let (_, root_hash) = Trie::read_root(s).unwrap(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let block_header = BlockHeaderHash([0u8; 32]); s.open_block(&block_header).unwrap(); @@ -147,7 +145,7 @@ pub fn merkle_test_marf( s.open_block(header).unwrap(); let (_, root_hash) = Trie::read_root(s).unwrap(); - let triepath = TriePath::from_bytes(&path[..]).unwrap(); + let triepath = TrieHash::from_bytes(&path[..]).unwrap(); let mut marf_value = [0u8; 40]; marf_value.copy_from_slice(&value[0..40]); @@ -199,7 +197,7 @@ pub fn merkle_test_marf_key_value( test_debug!("---------"); let root_to_block = root_to_block.unwrap_or_else(|| s.read_root_to_block_table().unwrap()); - let triepath = TriePath::from_key(key); + let triepath = TrieHash::from_key(key); let marf_value = MARFValue::from_value(value); assert!(proof.verify(&triepath, &marf_value, &root_hash, &root_to_block)); diff --git a/stackslib/src/chainstate/stacks/index/test/node.rs b/stackslib/src/chainstate/stacks/index/test/node.rs index a98491595d..227adda439 100644 --- a/stackslib/src/chainstate/stacks/index/test/node.rs +++ b/stackslib/src/chainstate/stacks/index/test/node.rs @@ -4215,7 +4215,7 @@ fn trie_cursor_walk_full() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4313,7 +4313,7 @@ fn trie_cursor_walk_1() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4406,7 +4406,7 @@ fn trie_cursor_walk_2() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4496,7 +4496,7 @@ fn trie_cursor_walk_3() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4585,7 +4585,7 @@ fn trie_cursor_walk_4() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4673,7 +4673,7 @@ fn trie_cursor_walk_5() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4760,7 +4760,7 @@ fn trie_cursor_walk_6() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4845,7 +4845,7 @@ fn trie_cursor_walk_10() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -4937,7 +4937,7 @@ fn trie_cursor_walk_20() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let mut walk_point = nodes[0].clone(); @@ -5028,7 +5028,7 @@ fn trie_cursor_walk_32() { // walk down the trie let mut c = TrieCursor::new( - &TriePath::from_bytes(&path).unwrap(), + &TrieHash::from_bytes(&path).unwrap(), trie_io.root_trieptr(), ); let walk_point = nodes[0].clone(); diff --git a/stackslib/src/chainstate/stacks/index/test/proofs.rs b/stackslib/src/chainstate/stacks/index/test/proofs.rs index 9642bfcdc5..9bd24af548 100644 --- a/stackslib/src/chainstate/stacks/index/test/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/test/proofs.rs @@ -59,7 +59,7 @@ fn verifier_catches_stale_proof() { let new_value = m.get(&block_2, &k1).unwrap().unwrap(); test_debug!("NEW: {:?}", new_value); - let path = TriePath::from_key(&k1); + let path = TrieHash::from_key(&k1); merkle_test_marf_key_value(&mut m.borrow_storage_backend(), &block_2, &k1, &new_v, None); @@ -75,7 +75,7 @@ fn verifier_catches_stale_proof() { .unwrap(); // the verifier should not allow a proof from k1 to old_v from block_2 - let triepath_2 = TriePath::from_key(&k1); + let triepath_2 = TrieHash::from_key(&k1); let marf_value_2 = MARFValue::from_value(&old_v); assert!(!proof_2.verify(&triepath_2, &marf_value_2, &root_hash_2, &root_to_block)); @@ -86,7 +86,7 @@ fn verifier_catches_stale_proof() { .unwrap(); // the verifier should allow a proof from k1 to old_v from block_1 - let triepath_1 = TriePath::from_key(&k1); + let triepath_1 = TrieHash::from_key(&k1); let marf_value_1 = MARFValue::from_value(&old_v); assert!(proof_1.verify(&triepath_1, &marf_value_1, &root_hash_1, &root_to_block)); } @@ -169,7 +169,7 @@ fn ncc_verifier_catches_stale_proof() { TrieMerkleProof::from_entry(&mut m.borrow_storage_backend(), &k1, &another_v, &block_5) .unwrap(); - let triepath_4 = TriePath::from_key(&k1); + let triepath_4 = TrieHash::from_key(&k1); let marf_value_4 = MARFValue::from_value(&another_v); let root_to_block = { m.borrow_storage_backend() @@ -186,7 +186,7 @@ fn ncc_verifier_catches_stale_proof() { TrieMerkleProof::from_entry(&mut m.borrow_storage_backend(), &k1, &old_v, &block_2) .unwrap(); - let triepath_4 = TriePath::from_key(&k1); + let triepath_4 = TrieHash::from_key(&k1); let marf_value_4 = MARFValue::from_value(&old_v); let root_to_block = { m.borrow_storage_backend() diff --git a/stackslib/src/chainstate/stacks/index/test/storage.rs b/stackslib/src/chainstate/stacks/index/test/storage.rs index a996bc7186..fdd3e30191 100644 --- a/stackslib/src/chainstate/stacks/index/test/storage.rs +++ b/stackslib/src/chainstate/stacks/index/test/storage.rs @@ -164,7 +164,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { ]; path_bytes[24..32].copy_from_slice(&i.to_be_bytes()); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[i as u8; 40].to_vec()); confirmed_marf.insert_raw(path.clone(), value).unwrap(); } @@ -213,7 +213,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { ]; path_bytes[24..32].copy_from_slice(&i.to_be_bytes()); - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); // NOTE: may have been overwritten; just check for presence assert!( @@ -235,7 +235,7 @@ fn load_store_trie_m_n_same(m: u64, n: u64, same: bool) { path_bytes[16..24].copy_from_slice(&j.to_be_bytes()); } - let path = TriePath::from_bytes(&path_bytes).unwrap(); + let path = TrieHash::from_bytes(&path_bytes).unwrap(); let value = TrieLeaf::new(&vec![], &[(i + 128) as u8; 40].to_vec()); new_inserted.push((path.clone(), value.clone())); diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index ca2c0ced65..9bac45508c 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -137,7 +137,7 @@ fn trie_cursor_try_attach_leaf() { path[i] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); // end of path -- cursor points to the insertion point. @@ -164,7 +164,7 @@ fn trie_cursor_try_attach_leaf() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -194,7 +194,7 @@ fn trie_cursor_try_attach_leaf() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -250,7 +250,7 @@ fn trie_cursor_promote_leaf_to_node4() { // add a single leaf let mut c = TrieCursor::new( - &TriePath::from_bytes(&[ + &TrieHash::from_bytes(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ]) @@ -275,7 +275,7 @@ fn trie_cursor_promote_leaf_to_node4() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&[ + &TrieHash::from_bytes(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ]) @@ -317,7 +317,7 @@ fn trie_cursor_promote_leaf_to_node4() { path[i] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, node, node_hash) = walk_to_insertion_point(&mut f, &mut c); // end of path -- cursor points to the insertion point @@ -342,7 +342,7 @@ fn trie_cursor_promote_leaf_to_node4() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -372,7 +372,7 @@ fn trie_cursor_promote_leaf_to_node4() { let leaf_opt_res = MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap(), + &TrieHash::from_bytes(&path[..]).unwrap(), ); assert!(leaf_opt_res.is_ok()); @@ -467,7 +467,7 @@ fn trie_cursor_promote_node4_to_node16() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); f.open_block(&block_header).unwrap(); @@ -486,7 +486,7 @@ fn trie_cursor_promote_node4_to_node16() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -515,7 +515,7 @@ fn trie_cursor_promote_node4_to_node16() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -536,7 +536,7 @@ fn trie_cursor_promote_node4_to_node16() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -627,7 +627,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -648,7 +648,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -677,7 +677,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -698,7 +698,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -734,7 +734,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = j + 40; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -755,7 +755,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -784,7 +784,7 @@ fn trie_cursor_promote_node16_to_node48() { path[k] = 129; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -806,7 +806,7 @@ fn trie_cursor_promote_node16_to_node48() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -897,7 +897,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -918,7 +918,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -947,7 +947,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 128; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -968,7 +968,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1004,7 +1004,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 40; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1024,7 +1024,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1053,7 +1053,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 129; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1074,7 +1074,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1110,7 +1110,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = j + 90; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1131,7 +1131,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1160,7 +1160,7 @@ fn trie_cursor_promote_node48_to_node256() { path[k] = 130; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1181,7 +1181,7 @@ fn trie_cursor_promote_node48_to_node256() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1256,7 +1256,7 @@ fn trie_cursor_splice_leaf_4() { path[5 * k + 2] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); test_debug!("Start splice-insert at {:?}", &c); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1283,7 +1283,7 @@ fn trie_cursor_splice_leaf_4() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1349,7 +1349,7 @@ fn trie_cursor_splice_leaf_2() { path[3 * k + 1] = 32; let mut c = - TrieCursor::new(&TriePath::from_bytes(&path[..]).unwrap(), f.root_trieptr()); + TrieCursor::new(&TrieHash::from_bytes(&path[..]).unwrap(), f.root_trieptr()); test_debug!("Start splice-insert at {:?}", &c); let (nodeptr, mut node, node_hash) = walk_to_insertion_point(&mut f, &mut c); @@ -1372,7 +1372,7 @@ fn trie_cursor_splice_leaf_2() { MARF::get_path( &mut f, &block_header, - &TriePath::from_bytes(&path[..]).unwrap() + &TrieHash::from_bytes(&path[..]).unwrap() ) .unwrap() .unwrap(), @@ -1413,7 +1413,7 @@ where for i in 0..count { eprintln!("{}", i); let path = path_gen(i); - let triepath = TriePath::from_bytes(&path).unwrap(); + let triepath = TrieHash::from_bytes(&path).unwrap(); let value = TrieLeaf::new( &vec![], &[ @@ -1519,7 +1519,7 @@ where for i in 0..count { let path = path_gen(i); - let triepath = TriePath::from_bytes(&path).unwrap(); + let triepath = TrieHash::from_bytes(&path).unwrap(); let value = MARF::get_path(&mut marf.borrow_storage_backend(), &block_header, &triepath) .unwrap() diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 6c7cc7a08a..65e41cf3ed 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -39,9 +39,7 @@ use crate::chainstate::stacks::index::node::{ use crate::chainstate::stacks::index::storage::{ TrieFileStorage, TrieHashCalculationMode, TrieStorageConnection, }; -use crate::chainstate::stacks::index::{ - Error, MarfTrieId, TrieHashExtension, TrieHasher, TrieLeaf, -}; +use crate::chainstate::stacks::index::{Error, MarfTrieId, TrieHasher, TrieLeaf}; /// We don't actually instantiate a Trie, but we still need to pass a type parameter for the /// storage implementation. diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index c9d3b40dce..8134db9d44 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -45,7 +45,7 @@ use crate::chainstate::stacks::index::bits::{ use crate::chainstate::stacks::index::file::TrieFile; use crate::chainstate::stacks::index::node::{ clear_backptr, is_backptr, set_backptr, TrieNode, TrieNode16, TrieNode256, TrieNode4, - TrieNode48, TrieNodeID, TrieNodeType, TriePath, TriePtr, + TrieNode48, TrieNodeID, TrieNodeType, TriePtr, }; use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieStorageConnection}; use crate::chainstate::stacks::index::{trie_sql, BlockMap, Error, MarfTrieId, TrieLeaf}; diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 7a72cc1652..082e9c374c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1190,7 +1190,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } if self.runtime.disable_cost_check { warn!("Fault injection: disabling miner limit on microblock runtime cost"); - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); } self.runtime.bytes_so_far = bytes_so_far; @@ -1418,7 +1418,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } if self.runtime.disable_cost_check { warn!("Fault injection: disabling miner limit on microblock runtime cost"); - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); } self.runtime.bytes_so_far = bytes_so_far; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 7b7720b996..90fc7f1705 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5064,7 +5064,7 @@ fn paramaterized_mempool_walk_test( available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index c89679f414..a5497cea24 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -215,7 +215,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { pub fn cost_so_far(&self) -> ExecutionCost { match self.cost_track { Some(ref track) => track.get_total(), - None => ExecutionCost::zero(), + None => ExecutionCost::ZERO, } } @@ -1765,7 +1765,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { pub fn cost_so_far(&self) -> ExecutionCost { match self.cost_track { Some(ref track) => track.get_total(), - None => ExecutionCost::zero(), + None => ExecutionCost::ZERO, } } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index fed0e70e95..3a8636b3b5 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -422,6 +422,31 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + self.marf + .get_with_proof_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } + fn get_data(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf @@ -452,6 +477,36 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); + self.marf + .get_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "MarfedKV get {:?} off of {:?}: not found", + hash, + &self.chain_tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } + fn put_all_data(&mut self, _items: Vec<(String, String)>) -> InterpreterResult<()> { error!("Attempted to commit changes to read-only MARF"); panic!("BUG: attempted commit to read-only MARF"); @@ -631,6 +686,36 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .transpose() } + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!("MarfedKV get_from_hash: {:?} tip={}", hash, &self.chain_tip); + self.marf + .get_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "MarfedKV get {:?} off of {:?}: not found", + hash, + &self.chain_tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } + fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf .get_with_proof(&self.chain_tip, key) @@ -653,6 +738,31 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .transpose() } + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + self.marf + .get_with_proof_from_hash(&self.chain_tip, hash) + .or_else(|e| match e { + Error::NotFoundError => Ok(None), + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.marf.sqlite_tx(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } + fn get_side_store(&mut self) -> &Connection { self.marf.sqlite_tx() } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 44eeaa2e07..0bce54dcfb 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -1,5 +1,6 @@ use std::ops::{Deref, DerefMut}; +use clarity::types::chainstate::TrieHash; use clarity::util::hash::Sha512Trunc256Sum; use clarity::vm::analysis::AnalysisDatabase; use clarity::vm::database::sqlite::{ @@ -1232,10 +1233,24 @@ impl ClarityBackingStore for MemoryBackingStore { SqliteConnection::get(self.get_side_store(), key) } + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + SqliteConnection::get(self.get_side_store(), hash.to_string().as_str()) + } + fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } + fn get_data_with_proof_from_path( + &mut self, + key: &TrieHash, + ) -> InterpreterResult)>> { + Ok( + SqliteConnection::get(self.get_side_store(), key.to_string().as_str())? + .map(|x| (x, vec![])), + ) + } + fn get_side_store(&mut self) -> &Connection { &self.side_store } diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 8db6b3043a..e7d8faff0c 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -168,7 +168,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { let (ast, _analysis) = tx .analyze_smart_contract( &boot_code_id("costs-3", false), diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index f703f8a367..1f43a34d40 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -16,8 +16,9 @@ //! Subcommands used by `stacks-inspect` binary +use std::any::type_name; use std::cell::LazyCell; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -28,9 +29,12 @@ use regex::Regex; use rusqlite::{Connection, OpenFlags}; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; use crate::burnchains::db::BurnchainDB; -use crate::burnchains::PoxConstants; +use crate::burnchains::{Burnchain, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleContext, }; @@ -42,82 +46,83 @@ use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, St use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; +use crate::config::{Config, ConfigFile, DEFAULT_MAINNET_CONFIG}; use crate::core::*; +use crate::cost_estimates::metrics::UnitMetric; +use crate::cost_estimates::UnitEstimator; use crate::util_lib::db::IndexDBTx; -/// Can be used with CLI commands to support non-mainnet chainstate -/// Allows integration testing of these functions -#[derive(Deserialize)] -pub struct StacksChainConfig { - pub chain_id: u32, - pub first_block_height: u64, - pub first_burn_header_hash: BurnchainHeaderHash, - pub first_burn_header_timestamp: u64, - pub pox_constants: PoxConstants, - pub epochs: EpochList, +/// Options common to many `stacks-inspect` subcommands +/// Returned by `process_common_opts()` +#[derive(Debug, Default)] +pub struct CommonOpts { + pub config: Option, } -impl StacksChainConfig { - pub fn default_mainnet() -> Self { - Self { - chain_id: CHAIN_ID_MAINNET, - first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: PoxConstants::mainnet_default(), - epochs: (*STACKS_EPOCHS_MAINNET).clone(), +/// Process arguments common to many `stacks-inspect` subcommands and drain them from `argv` +/// +/// Args: +/// - `argv`: Full CLI args `Vec` +/// - `start_at`: Position in args vec where to look for common options. +/// For example, if `start_at` is `1`, then look for these options **before** the subcommand: +/// ```console +/// stacks-inspect --config testnet.toml replay-block path/to/chainstate +/// ``` +pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts { + let mut i = start_at; + let mut opts = CommonOpts::default(); + while let Some(arg) = argv.get(i) { + let (prefix, opt) = arg.split_at(2); + if prefix != "--" { + // No args left to take + break; } - } - - pub fn default_testnet() -> Self { - let mut pox_constants = PoxConstants::regtest_default(); - pox_constants.prepare_length = 100; - pox_constants.reward_cycle_length = 900; - pox_constants.v1_unlock_height = 3; - pox_constants.v2_unlock_height = 5; - pox_constants.pox_3_activation_height = 5; - pox_constants.pox_4_activation_height = 6; - pox_constants.v3_unlock_height = 7; - let mut epochs = EpochList::new(&*STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch10].start_height = 0; - epochs[StacksEpochId::Epoch10].end_height = 0; - epochs[StacksEpochId::Epoch20].start_height = 0; - epochs[StacksEpochId::Epoch20].end_height = 1; - epochs[StacksEpochId::Epoch2_05].start_height = 1; - epochs[StacksEpochId::Epoch2_05].end_height = 2; - epochs[StacksEpochId::Epoch21].start_height = 2; - epochs[StacksEpochId::Epoch21].end_height = 3; - epochs[StacksEpochId::Epoch22].start_height = 3; - epochs[StacksEpochId::Epoch22].end_height = 4; - epochs[StacksEpochId::Epoch23].start_height = 4; - epochs[StacksEpochId::Epoch23].end_height = 5; - epochs[StacksEpochId::Epoch24].start_height = 5; - epochs[StacksEpochId::Epoch24].end_height = 6; - epochs[StacksEpochId::Epoch25].start_height = 6; - epochs[StacksEpochId::Epoch25].end_height = 56_457; - epochs[StacksEpochId::Epoch30].start_height = 56_457; - Self { - chain_id: CHAIN_ID_TESTNET, - first_block_height: 0, - first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH) - .unwrap(), - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants, - epochs, + // "Take" arg + i += 1; + match opt { + "config" => { + let path = &argv[i]; + i += 1; + let config_file = ConfigFile::from_path(&path).unwrap_or_else(|e| { + panic!("Failed to read '{path}' as stacks-node config: {e}") + }); + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); + opts.config.replace(config); + } + "network" => { + let network = &argv[i]; + i += 1; + let config_file = match network.to_lowercase().as_str() { + "helium" => ConfigFile::helium(), + "mainnet" => ConfigFile::mainnet(), + "mocknet" => ConfigFile::mocknet(), + "xenon" => ConfigFile::xenon(), + other => { + eprintln!("Unknown network choice `{other}`"); + process::exit(1); + } + }; + let config = Config::from_config_file(config_file, false).unwrap_or_else(|e| { + panic!("Failed to convert config file into node config: {e}") + }); + opts.config.replace(config); + } + _ => panic!("Unrecognized option: {opt}"), } } + // Remove options processed + argv.drain(start_at..i); + opts } -const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = - LazyCell::new(StacksChainConfig::default_mainnet); - /// Replay blocks from chainstate database /// Terminates on error using `process::exit()` /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -195,7 +200,7 @@ pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -212,12 +217,15 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC let chain_state_path = format!("{db_path}/chainstate/"); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); let conn = chainstate.nakamoto_blocks_db(); @@ -281,7 +289,7 @@ pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainC /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` /// - `conf`: Optional config for running on non-mainnet chainstate -pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConfig>) { +pub fn command_replay_mock_mining(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -369,32 +377,184 @@ pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConf } } +/// Replay mock mined blocks from JSON files +/// Terminates on error using `process::exit()` +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +/// - `conf`: Optional config for running on non-mainnet chainstate +pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { + let print_help_and_exit = || { + let n = &argv[0]; + eprintln!("Usage: {n} [min-fee [max-time]]"); + eprintln!(""); + eprintln!("Given a , try to ''mine'' an anchored block. This invokes the miner block"); + eprintln!("assembly, but does not attempt to broadcast a block commit. This is useful for determining"); + eprintln!("what transactions a given chain state would include in an anchor block,"); + eprintln!("or otherwise simulating a miner."); + process::exit(1); + }; + + // Parse subcommand-specific args + let db_path = argv.get(1).unwrap_or_else(print_help_and_exit); + let min_fee = argv + .get(2) + .map(|arg| arg.parse().expect("Could not parse min_fee")) + .unwrap_or(u64::MAX); + let max_time = argv + .get(3) + .map(|arg| arg.parse().expect("Could not parse max_time")) + .unwrap_or(u64::MAX); + + let start = get_epoch_time_ms(); + + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); + + let burnchain_path = format!("{db_path}/burnchain"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let chain_state_path = format!("{db_path}/chainstate/"); + + let burnchain = conf.get_burnchain(); + let sort_db = SortitionDB::open(&sort_db_path, false, burnchain.pox_constants.clone()) + .unwrap_or_else(|e| panic!("Failed to open {sort_db_path}: {e}")); + let (chain_state, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap_or_else(|e| panic!("Failed to open stacks chain state: {e}")); + let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .unwrap_or_else(|e| panic!("Failed to get sortition chain tip: {e}")); + + let estimator = Box::new(UnitEstimator); + let metric = Box::new(UnitMetric); + + let mut mempool_db = MemPoolDB::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + estimator, + metric, + ) + .unwrap_or_else(|e| panic!("Failed to open mempool db: {e}")); + + let tip_header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap_or_else(|e| panic!("Error looking up chain tip: {e}")) + .expect("No chain tip found"); + + // Fail if Nakamoto chainstate detected. `try-mine` cannot mine Nakamoto blocks yet + // TODO: Add Nakamoto block support + if matches!( + &tip_header.anchored_header, + StacksBlockHeaderTypes::Nakamoto(..) + ) { + panic!("Attempting to mine Nakamoto block. Nakamoto blocks not supported yet!"); + }; + + let sk = StacksPrivateKey::new(); + let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); + tx_auth.set_origin_nonce(0); + + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Mainnet, + tx_auth, + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), + ); + + coinbase_tx.chain_id = conf.burnchain.chain_id; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); + tx_signer.sign_origin(&sk).unwrap(); + let coinbase_tx = tx_signer.get_tx().unwrap(); + + let mut settings = BlockBuilderSettings::limited(); + settings.max_miner_time_ms = max_time; + + let result = StacksBlockBuilder::build_anchored_block( + &chain_state, + &sort_db.index_handle(&chain_tip.sortition_id), + &mut mempool_db, + &tip_header, + chain_tip.total_burn, + VRFProof::empty(), + Hash160([0; 20]), + &coinbase_tx, + settings, + None, + &Burnchain::new( + &burnchain_path, + &burnchain.chain_name, + &burnchain.network_name, + ) + .unwrap(), + ); + + let stop = get_epoch_time_ms(); + + println!( + "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", + if result.is_ok() { + "Successfully" + } else { + "Failed to" + }, + tip_header.stacks_block_height + 1, + StacksBlockHeader::make_index_block_hash( + &tip_header.consensus_hash, + &tip_header.anchored_header.block_hash() + ), + &tip_header.consensus_hash, + &tip_header.anchored_header.block_hash(), + stop.saturating_sub(start), + min_fee, + max_time + ); + + if let Ok((block, execution_cost, size)) = result { + let mut total_fees = 0; + for tx in block.txs.iter() { + total_fees += tx.get_tx_fee(); + } + println!( + "Block {}: {} uSTX, {} bytes, cost {:?}", + block.block_hash(), + total_fees, + size, + &execution_cost + ); + } + + process::exit(0); +} + /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block( - db_path: &str, - index_block_hash_hex: &str, - conf: Option<&StacksChainConfig>, -) { +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<&Config>) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -448,30 +608,31 @@ fn replay_staging_block( } /// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block( - db_path: &str, - block: AssembledAnchorBlock, - conf: Option<&StacksChainConfig>, -) { +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock, conf: Option<&Config>) { let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; - let conf = conf.unwrap_or(&default_conf); + let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -650,22 +811,28 @@ fn replay_block( } /// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate -fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &StacksChainConfig) { +fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &Config) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); - let mainnet = conf.chain_id == CHAIN_ID_MAINNET; - let (mut chainstate, _) = - StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &chain_state_path, + None, + ) + .unwrap(); + let burnchain = conf.get_burnchain(); + let epochs = conf.burnchain.get_epoch_list(); let mut sortdb = SortitionDB::connect( &sort_db_path, - conf.first_block_height, - &conf.first_burn_header_hash, - conf.first_burn_header_timestamp, - &conf.epochs, - conf.pox_constants.clone(), + burnchain.first_block_height, + &burnchain.first_block_hash, + u64::from(burnchain.first_block_timestamp), + &epochs, + burnchain.pox_constants.clone(), None, true, ) @@ -696,7 +863,7 @@ fn replay_block_nakamoto( ) }); - debug!("Process staging Nakamoto block"; + info!("Process staging Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id(), @@ -940,3 +1107,36 @@ fn replay_block_nakamoto( Ok(()) } + +#[cfg(test)] +pub mod test { + use super::*; + + fn parse_cli_command(s: &str) -> Vec { + s.split(' ').map(String::from).collect() + } + + #[test] + pub fn test_drain_common_opts() { + // Should find/remove no options + let mut argv = parse_cli_command( + "stacks-inspect try-mine --config my_config.toml /tmp/chainstate/mainnet", + ); + let argv_init = argv.clone(); + let opts = drain_common_opts(&mut argv, 0); + let opts = drain_common_opts(&mut argv, 1); + + assert_eq!(argv, argv_init); + assert!(opts.config.is_none()); + + // Should find config opts and remove from vec + let mut argv = parse_cli_command( + "stacks-inspect --network mocknet --network mainnet try-mine /tmp/chainstate/mainnet", + ); + let opts = drain_common_opts(&mut argv, 1); + let argv_expected = parse_cli_command("stacks-inspect try-mine /tmp/chainstate/mainnet"); + + assert_eq!(argv, argv_expected); + assert!(opts.config.is_some()); + } +} diff --git a/testnet/stacks-node/src/chain_data.rs b/stackslib/src/config/chain_data.rs similarity index 97% rename from testnet/stacks-node/src/chain_data.rs rename to stackslib/src/config/chain_data.rs index cc60f964a3..e4c3899511 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/stackslib/src/config/chain_data.rs @@ -17,21 +17,22 @@ use std::collections::HashMap; use std::process::{Command, Stdio}; -use stacks::burnchains::bitcoin::address::BitcoinAddress; -use stacks::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; -use stacks::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; -use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use stacks::chainstate::burn::distribution::BurnSamplePoint; -use stacks::chainstate::burn::operations::leader_block_commit::{ - MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, -}; -use stacks::chainstate::burn::operations::LeaderBlockCommitOp; -use stacks::chainstate::stacks::address::PoxAddress; -use stacks::core::MINING_COMMITMENT_WINDOW; -use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; +use crate::burnchains::bitcoin::address::BitcoinAddress; +use crate::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; +use crate::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; +use crate::chainstate::burn::distribution::BurnSamplePoint; +use crate::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, +}; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::stacks::address::PoxAddress; +use crate::core::MINING_COMMITMENT_WINDOW; +use crate::util_lib::db::Error as DBError; + pub struct MinerStats { pub unconfirmed_commits_helper: String, } @@ -526,11 +527,6 @@ pub mod tests { use std::fs; use std::io::Write; - use stacks::burnchains::{BurnchainSigner, Txid}; - use stacks::chainstate::burn::distribution::BurnSamplePoint; - use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; - use stacks::chainstate::burn::operations::LeaderBlockCommitOp; - use stacks::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPublicKey, VRFSeed, }; @@ -538,6 +534,11 @@ pub mod tests { use stacks_common::util::uint::{BitArray, Uint256}; use super::MinerStats; + use crate::burnchains::{BurnchainSigner, Txid}; + use crate::chainstate::burn::distribution::BurnSamplePoint; + use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; + use crate::chainstate::burn::operations::LeaderBlockCommitOp; + use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; #[test] fn test_burn_dist_to_prob_dist() { diff --git a/testnet/stacks-node/src/config.rs b/stackslib/src/config/mod.rs similarity index 95% rename from testnet/stacks-node/src/config.rs rename to stackslib/src/config/mod.rs index 785ce057e5..42663372f6 100644 --- a/testnet/stacks-node/src/config.rs +++ b/stackslib/src/config/mod.rs @@ -14,47 +14,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod chain_data; + use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use std::time::Duration; use std::{cmp, fs, thread}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; -use lazy_static::lazy_static; use rand::RngCore; use serde::Deserialize; -use stacks::burnchains::affirmation::AffirmationMap; -use stacks::burnchains::bitcoin::BitcoinNetworkType; -use stacks::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::MINERS_NAME; -use stacks::chainstate::stacks::index::marf::MARFOpenOpts; -use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; -use stacks::chainstate::stacks::MAX_BLOCK_LEN; -use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; -use stacks::core::{ - MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, - BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, - BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, -}; -use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; -use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; -use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; -use stacks::net::atlas::AtlasConfig; -use stacks::net::connection::ConnectionOptions; -use stacks::net::{Neighbor, NeighborKey}; -use stacks::types::chainstate::BurnchainHeaderHash; -use stacks::types::EpochList; -use stacks::util_lib::boot::boot_code_id; -use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -63,7 +36,36 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::chain_data::MinerStats; +use crate::burnchains::affirmation::AffirmationMap; +use crate::burnchains::bitcoin::BitcoinNetworkType; +use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; +use crate::chainstate::nakamoto::signer_set::NakamotoSigners; +use crate::chainstate::stacks::boot::MINERS_NAME; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; +use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; +use crate::chainstate::stacks::MAX_BLOCK_LEN; +use crate::config::chain_data::MinerStats; +use crate::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use crate::core::{ + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, + BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, + PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, +}; +use crate::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; +use crate::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; +use crate::cost_estimates::fee_scalar::ScalarFeeRateEstimator; +use crate::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use crate::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; +use crate::net::atlas::AtlasConfig; +use crate::net::connection::ConnectionOptions; +use crate::net::{Neighbor, NeighborAddress, NeighborKey}; +use crate::types::chainstate::BurnchainHeaderHash; +use crate::types::EpochList; +use crate::util::hash::to_hex; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; pub const DEFAULT_SATS_PER_VB: u64 = 50; pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; @@ -92,6 +94,45 @@ const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; + +static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = + LazyLock::new(|| ConnectionOptions { + inbox_maxlen: 100, + outbox_maxlen: 100, + timeout: 15, + idle_timeout: 15, // how long a HTTP connection can be idle before it's closed + heartbeat: 3600, + // can't use u64::max, because sqlite stores as i64. + private_key_lifetime: 9223372036854775807, + num_neighbors: 32, // number of neighbors whose inventories we track + num_clients: 750, // number of inbound p2p connections + soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track + soft_num_clients: 750, // soft limit on the number of inbound p2p connections + max_neighbors_per_host: 1, // maximum number of neighbors per host we permit + max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit + soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit + soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) + soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit + max_http_clients: 1000, // maximum number of HTTP connections + max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) + walk_interval: 60, // how often, in seconds, we do a neighbor walk + walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node + log_neighbors_freq: 60_000, // every minute, log all peer connections + inv_sync_interval: 45, // how often, in seconds, we refresh block inventories + inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet + download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) + dns_timeout: 15_000, + max_inflight_blocks: 6, + max_inflight_attachments: 6, + ..std::default::Default::default() + }); + +pub static DEFAULT_MAINNET_CONFIG: LazyLock = LazyLock::new(|| { + Config::from_config_file(ConfigFile::mainnet(), false) + .expect("Failed to create default mainnet config") +}); #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -140,7 +181,7 @@ impl ConfigFile { mode: Some("xenon".to_string()), rpc_port: Some(18332), peer_port: Some(18333), - peer_host: Some("bitcoind.testnet.stacks.co".to_string()), + peer_host: Some("0.0.0.0".to_string()), magic_bytes: Some("T2".into()), ..BurnchainConfigFile::default() }; @@ -186,9 +227,9 @@ impl ConfigFile { mode: Some("mainnet".to_string()), rpc_port: Some(8332), peer_port: Some(8333), - peer_host: Some("bitcoin.blockstack.com".to_string()), - username: Some("blockstack".to_string()), - password: Some("blockstacksystem".to_string()), + peer_host: Some("0.0.0.0".to_string()), + username: Some("bitcoin".to_string()), + password: Some("bitcoin".to_string()), magic_bytes: Some("X2".to_string()), ..BurnchainConfigFile::default() }; @@ -310,39 +351,6 @@ pub struct Config { pub atlas: AtlasConfig, } -lazy_static! { - static ref HELIUM_DEFAULT_CONNECTION_OPTIONS: ConnectionOptions = ConnectionOptions { - inbox_maxlen: 100, - outbox_maxlen: 100, - timeout: 15, - idle_timeout: 15, // how long a HTTP connection can be idle before it's closed - heartbeat: 3600, - // can't use u64::max, because sqlite stores as i64. - private_key_lifetime: 9223372036854775807, - num_neighbors: 32, // number of neighbors whose inventories we track - num_clients: 750, // number of inbound p2p connections - soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track - soft_num_clients: 750, // soft limit on the number of inbound p2p connections - max_neighbors_per_host: 1, // maximum number of neighbors per host we permit - max_clients_per_host: 4, // maximum number of inbound p2p connections per host we permit - soft_max_neighbors_per_host: 1, // soft limit on the number of neighbors per host we permit - soft_max_neighbors_per_org: 32, // soft limit on the number of neighbors per AS we permit (TODO: for now it must be greater than num_neighbors) - soft_max_clients_per_host: 4, // soft limit on how many inbound p2p connections per host we permit - max_http_clients: 1000, // maximum number of HTTP connections - max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) - walk_interval: 60, // how often, in seconds, we do a neighbor walk - walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node - log_neighbors_freq: 60_000, // every minute, log all peer connections - inv_sync_interval: 45, // how often, in seconds, we refresh block inventories - inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet - download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) - dns_timeout: 15_000, - max_inflight_blocks: 6, - max_inflight_attachments: 6, - .. std::default::Default::default() - }; -} - impl Config { /// get the up-to-date burnchain options from the config. /// If the config file can't be loaded, then return the existing config @@ -515,10 +523,7 @@ impl Config { } fn check_nakamoto_config(&self, burnchain: &Burnchain) { - let epochs = StacksEpoch::get_epochs( - self.burnchain.get_bitcoin_network().1, - self.burnchain.epochs.as_ref(), - ); + let epochs = self.burnchain.get_epoch_list(); let Some(epoch_30) = epochs.get(StacksEpochId::Epoch30) else { // no Epoch 3.0, so just return return; @@ -635,8 +640,8 @@ impl Config { BitcoinNetworkType::Mainnet => { Err("Cannot configure epochs in mainnet mode".to_string()) } - BitcoinNetworkType::Testnet => Ok(stacks::core::STACKS_EPOCHS_TESTNET.to_vec()), - BitcoinNetworkType::Regtest => Ok(stacks::core::STACKS_EPOCHS_REGTEST.to_vec()), + BitcoinNetworkType::Testnet => Ok(STACKS_EPOCHS_TESTNET.to_vec()), + BitcoinNetworkType::Regtest => Ok(STACKS_EPOCHS_REGTEST.to_vec()), }?; let mut matched_epochs = vec![]; for configured_epoch in conf_epochs.iter() { @@ -659,6 +664,8 @@ impl Config { Ok(StacksEpochId::Epoch25) } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) + } else if epoch_name == EPOCH_CONFIG_3_1_0 { + Ok(StacksEpochId::Epoch31) } else { Err(format!("Unknown epoch name specified: {epoch_name}")) }?; @@ -685,6 +692,7 @@ impl Config { StacksEpochId::Epoch24, StacksEpochId::Epoch25, StacksEpochId::Epoch30, + StacksEpochId::Epoch31, ]; for (expected_epoch, configured_epoch) in expected_list .iter() @@ -833,7 +841,12 @@ impl Config { } let miner = match config_file.miner { - Some(miner) => miner.into_config_default(miner_default_config)?, + Some(mut miner) => { + if miner.mining_key.is_none() && !node.seed.is_empty() { + miner.mining_key = Some(to_hex(&node.seed)); + } + miner.into_config_default(miner_default_config)? + } None => miner_default_config, }; @@ -1274,6 +1287,10 @@ impl BurnchainConfig { other => panic!("Invalid stacks-node mode: {other}"), } } + + pub fn get_epoch_list(&self) -> EpochList { + StacksEpoch::get_epochs(self.get_bitcoin_network().1, self.epochs.as_ref()) + } } #[derive(Clone, Deserialize, Default, Debug)] @@ -1291,6 +1308,7 @@ pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; +pub const EPOCH_CONFIG_3_1_0: &str = "3.1"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { @@ -1638,6 +1656,7 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + pub assume_present_anchor_blocks: bool, /// Fault injection for failing to push blocks pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. @@ -1921,6 +1940,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, + assume_present_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, @@ -2135,6 +2155,8 @@ pub struct MinerConfig { pub block_commit_delay: Duration, /// The percentage of the remaining tenure cost limit to consume each block. pub tenure_cost_limit_per_block_percentage: Option, + /// Duration to wait before attempting to issue a tenure extend + pub tenure_timeout: Duration, } impl Default for MinerConfig { @@ -2171,6 +2193,7 @@ impl Default for MinerConfig { tenure_cost_limit_per_block_percentage: Some( DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, ), + tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), } } } @@ -2223,6 +2246,7 @@ pub struct ConnectionOptionsFile { pub auth_token: Option, pub antientropy_retry: Option, pub reject_blocks_pushed: Option, + pub stackerdb_hint_replicas: Option, } impl ConnectionOptionsFile { @@ -2352,12 +2376,25 @@ impl ConnectionOptionsFile { handshake_timeout: self.handshake_timeout.unwrap_or(5), max_sockets: self.max_sockets.unwrap_or(800) as usize, antientropy_public: self.antientropy_public.unwrap_or(true), - private_neighbors: self.private_neighbors.unwrap_or(true), + private_neighbors: self.private_neighbors.unwrap_or(false), auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), reject_blocks_pushed: self .reject_blocks_pushed .unwrap_or(default.reject_blocks_pushed), + stackerdb_hint_replicas: self + .stackerdb_hint_replicas + .map(|stackerdb_hint_replicas_json| { + let hint_replicas_res: Result< + Vec<(QualifiedContractIdentifier, Vec)>, + String, + > = serde_json::from_str(&stackerdb_hint_replicas_json) + .map_err(|e| format!("Failed to decode `stackerdb_hint_replicas`: {e:?}")); + hint_replicas_res + }) + .transpose()? + .map(HashMap::from_iter) + .unwrap_or(default.stackerdb_hint_replicas), ..default }) } @@ -2393,6 +2430,7 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + pub assume_present_anchor_blocks: Option, /// At most, how often should the chain-liveness thread /// wake up the chains-coordinator. Defaults to 300s (5 min). pub chain_liveness_poll_time_secs: Option, @@ -2474,6 +2512,10 @@ impl NodeConfigFile { // miners should always try to mine, even if they don't have the anchored // blocks in the canonical affirmation map. Followers, however, can stall. require_affirmed_anchor_blocks: self.require_affirmed_anchor_blocks.unwrap_or(!miner), + // as of epoch 3.0, all prepare phases have anchor blocks. + // at the start of epoch 3.0, the chain stalls without anchor blocks. + // only set this to false if you're doing some very extreme testing. + assume_present_anchor_blocks: true, // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, @@ -2542,10 +2584,18 @@ pub struct MinerConfigFile { pub subsequent_rejection_pause_ms: Option, pub block_commit_delay_ms: Option, pub tenure_cost_limit_per_block_percentage: Option, + pub tenure_timeout_secs: Option, } impl MinerConfigFile { fn into_config_default(self, miner_default_config: MinerConfig) -> Result { + match &self.mining_key { + Some(_) => {} + None => { + panic!("mining key not set"); + } + } + let mining_key = self .mining_key .as_ref() @@ -2675,6 +2725,7 @@ impl MinerConfigFile { subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), tenure_cost_limit_per_block_percentage, + tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), }) } } diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index bb850a784c..ba4dbf14d2 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -18,6 +18,7 @@ use std::collections::HashSet; use clarity::vm::costs::ExecutionCost; use lazy_static::lazy_static; +pub use stacks_common::consts::MICROSTACKS_PER_STACKS; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; pub use stacks_common::types::StacksEpochId; use stacks_common::types::{EpochList as GenericEpochList, StacksEpoch as GenericStacksEpoch}; @@ -46,7 +47,7 @@ pub use stacks_common::consts::{ NETWORK_ID_TESTNET, PEER_NETWORK_EPOCH, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, - PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_3_1, PEER_VERSION_MAINNET, PEER_VERSION_MAINNET_MAJOR, PEER_VERSION_TESTNET, PEER_VERSION_TESTNET_MAJOR, STACKS_EPOCH_MAX, }; @@ -99,7 +100,11 @@ pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 840_360; /// This is Epoch-3.0, activation height proposed in SIP-021 pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 867_867; +/// This is Epoch-3.1, activation height proposed in SIP-029 +pub const BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT: u64 = 875_000; +/// Bitcoin mainline testnet3 activation heights. +/// TODO: No longer used since testnet3 is dead, so remove. pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = @@ -111,6 +116,7 @@ pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 2_583_893; pub const BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT: u64 = 30_000_000; +pub const BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT: u64 = 30_000_001; /// This constant sets the approximate testnet bitcoin height at which 2.5 Xenon /// was reorged back to 2.5 instantiation. This is only used to calculate the @@ -133,8 +139,6 @@ lazy_static! { pub const BOOT_BLOCK_HASH: BlockHeaderHash = BlockHeaderHash([0xff; 32]); pub const BURNCHAIN_BOOT_CONSENSUS_HASH: ConsensusHash = ConsensusHash([0xff; 20]); -pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; - pub const POX_SUNSET_START: u64 = 100_000; pub const POX_SUNSET_END: u64 = POX_SUNSET_START + 400_000; @@ -298,10 +302,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: BITCOIN_MAINNET_STACKS_31_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]); } @@ -366,10 +377,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: BITCOIN_TESTNET_STACKS_31_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]); } @@ -434,10 +452,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: 7001, - end_height: STACKS_EPOCH_MAX, + end_height: 8001, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: 8001, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]); } @@ -469,6 +494,10 @@ pub static STACKS_EPOCH_2_5_MARKER: u8 = 0x0a; /// *or greater*. pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; +/// Stacks 3.1 epoch marker. All block-commits in 3.1 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_3_1_MARKER: u8 = 0x0c; + #[test] fn test_ord_for_stacks_epoch() { let epochs = &*STACKS_EPOCHS_MAINNET; @@ -648,6 +677,42 @@ fn test_ord_for_stacks_epoch() { epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch25]), Ordering::Greater ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch23]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch24]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch25]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch31].cmp(&epochs[StacksEpochId::Epoch30]), + Ordering::Greater + ); } #[test] @@ -711,6 +776,8 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_3_0(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] + fn unit_test_3_1(epoch_2_0_block_height: u64) -> EpochList; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> EpochList; @@ -1350,6 +1417,135 @@ impl StacksEpochExtension for StacksEpoch { ]) } + #[cfg(test)] + fn unit_test_3_1(first_burnchain_height: u64) -> EpochList { + info!( + "StacksEpoch unit_test_3_1 first_burn_height = {}", + first_burnchain_height + ); + + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: first_burnchain_height + 24, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: first_burnchain_height + 28, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height + 28, + end_height: first_burnchain_height + 32, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 32, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + ]) + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> EpochList { info!( @@ -1488,6 +1684,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), StacksEpochId::Epoch25 => StacksEpoch::unit_test_2_5(first_burnchain_height), StacksEpochId::Epoch30 => StacksEpoch::unit_test_3_0(first_burnchain_height), + StacksEpochId::Epoch31 => StacksEpoch::unit_test_3_1(first_burnchain_height), } } @@ -1542,8 +1739,8 @@ impl StacksEpochExtension for StacksEpoch { .iter() .max() .expect("FATAL: expect at least one epoch"); - if max_epoch.epoch_id == StacksEpochId::Epoch30 { - assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_2_5)); + if max_epoch.epoch_id == StacksEpochId::Epoch31 { + assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_3_0)); } else { assert!( max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 01fcac9e89..a209ef0677 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -48,7 +48,7 @@ use crate::chainstate::stacks::db::test::{ }; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; -use crate::chainstate::stacks::index::{MarfTrieId, TrieHashExtension}; +use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::miner::TransactionResult; use crate::chainstate::stacks::test::codec_all_transactions; use crate::chainstate::stacks::{ @@ -151,7 +151,7 @@ pub fn make_block( &mut chainstate_tx, &new_index_hash, &new_tip_info, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, block_height, ) .unwrap(); @@ -288,7 +288,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -326,7 +326,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -363,7 +363,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -405,7 +405,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -445,7 +445,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -683,7 +683,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -720,7 +720,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -757,7 +757,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -866,7 +866,7 @@ fn test_iterate_candidates_skipped_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -981,7 +981,7 @@ fn test_iterate_candidates_processing_error_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -1096,7 +1096,7 @@ fn test_iterate_candidates_problematic_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -1248,7 +1248,7 @@ fn test_iterate_candidates_concurrent_write_lock() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -2792,7 +2792,7 @@ fn test_filter_txs_by_type() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -2827,7 +2827,7 @@ fn test_filter_txs_by_type() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index bb1cf48f38..cdb3ceb7da 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -230,6 +230,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch25 => ":2.1", // reuse cost estimates in Epoch30 StacksEpochId::Epoch30 => ":2.1", + // reuse cost estimates in Epoch31 + StacksEpochId::Epoch31 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 01f6c32ec7..4ce9ea48cc 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -44,8 +44,8 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE tx_receipts, matured_rewards: vec![], matured_rewards_info: None, - parent_microblocks_cost: ExecutionCost::zero(), - anchored_block_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, + anchored_block_cost: ExecutionCost::ZERO, parent_burn_block_hash: BurnchainHeaderHash([0; 32]), parent_burn_block_height: 1, parent_burn_block_timestamp: 1, diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index eabbb4a148..1ed6b034e5 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -102,7 +102,7 @@ fn make_dummy_transfer_tx() -> StacksTransactionReceipt { tx, vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index 685fc6430a..3bfc4b966a 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -94,7 +94,7 @@ fn make_dummy_transfer_tx(fee: u64) -> StacksTransactionReceipt { tx, vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } @@ -115,7 +115,7 @@ fn make_dummy_cc_tx(fee: u64) -> StacksTransactionReceipt { vec![], Value::okay(Value::Bool(true)).unwrap(), 0, - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 31f97628a6..190ef8a1f0 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -63,6 +63,7 @@ pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; pub mod cli; +pub mod config; pub mod core; pub mod cost_estimates; pub mod deps; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index bcb7dfc964..76a9d63c21 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -49,6 +49,10 @@ use blockstack_lib::chainstate::burn::db::sortdb::{ use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; +use blockstack_lib::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use blockstack_lib::chainstate::nakamoto::shadow::{ + process_shadow_block, shadow_chainstate_repair, +}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ @@ -247,6 +251,56 @@ impl P2PSession { } } +fn open_nakamoto_chainstate_dbs( + chainstate_dir: &str, + network: &str, +) -> (SortitionDB, StacksChainState) { + let (mainnet, chain_id, pox_constants, dirname) = match network { + "mainnet" => ( + true, + CHAIN_ID_MAINNET, + PoxConstants::mainnet_default(), + network, + ), + "krypton" => ( + false, + 0x80000100, + PoxConstants::nakamoto_testnet_default(), + network, + ), + "naka3" => ( + false, + 0x80000000, + PoxConstants::new(20, 5, 3, 100, 0, u64::MAX, u64::MAX, 104, 105, 106, 107), + "nakamoto-neon", + ), + _ => { + panic!("Unrecognized network name '{}'", network); + } + }; + + let chain_state_path = format!("{}/{}/chainstate/", chainstate_dir, dirname); + let sort_db_path = format!("{}/{}/burnchain/sortition/", chainstate_dir, dirname); + + let sort_db = SortitionDB::open(&sort_db_path, true, pox_constants) + .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); + + let (chain_state, _) = StacksChainState::open(mainnet, chain_id, &chain_state_path, None) + .expect("Failed to open stacks chain state"); + + (sort_db, chain_state) +} + +fn check_shadow_network(network: &str) { + if network != "mainnet" && network != "krypton" && network != "naka3" { + eprintln!( + "Unknown network '{}': only support 'mainnet', 'krypton', or 'naka3'", + &network + ); + process::exit(1); + } +} + #[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); @@ -255,6 +309,8 @@ fn main() { process::exit(1); } + let common_opts = cli::drain_common_opts(&mut argv, 1); + if argv[1] == "--version" { println!( "{}", @@ -742,128 +798,7 @@ check if the associated microblocks can be downloaded } if argv[1] == "try-mine" { - if argv.len() < 3 { - eprintln!( - "Usage: {} try-mine [min-fee [max-time]] - -Given a , try to ''mine'' an anchored block. This invokes the miner block -assembly, but does not attempt to broadcast a block commit. This is useful for determining -what transactions a given chain state would include in an anchor block, or otherwise -simulating a miner. -", - argv[0] - ); - process::exit(1); - } - - let start = get_epoch_time_ms(); - let burnchain_path = format!("{}/mainnet/burnchain", &argv[2]); - let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); - let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); - - let mut min_fee = u64::MAX; - let mut max_time = u64::MAX; - - if argv.len() >= 4 { - min_fee = argv[3].parse().expect("Could not parse min_fee"); - } - if argv.len() >= 5 { - max_time = argv[4].parse().expect("Could not parse max_time"); - } - - let sort_db = SortitionDB::open(&sort_db_path, false, PoxConstants::mainnet_default()) - .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); - let chain_id = CHAIN_ID_MAINNET; - let (chain_state, _) = StacksChainState::open(true, chain_id, &chain_state_path, None) - .expect("Failed to open stacks chain state"); - let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) - .expect("Failed to get sortition chain tip"); - - let estimator = Box::new(UnitEstimator); - let metric = Box::new(UnitMetric); - - let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); - - let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) - .unwrap() - .unwrap(); - let parent_header = StacksChainState::get_anchored_block_header_info( - chain_state.db(), - &header_tip.consensus_hash, - &header_tip.anchored_header.block_hash(), - ) - .expect("Failed to load chain tip header info") - .expect("Failed to load chain tip header info"); - - let sk = StacksPrivateKey::new(); - let mut tx_auth = TransactionAuth::from_p2pkh(&sk).unwrap(); - tx_auth.set_origin_nonce(0); - - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Mainnet, - tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), - ); - - coinbase_tx.chain_id = chain_id; - coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut tx_signer = StacksTransactionSigner::new(&coinbase_tx); - tx_signer.sign_origin(&sk).unwrap(); - let coinbase_tx = tx_signer.get_tx().unwrap(); - - let mut settings = BlockBuilderSettings::limited(); - settings.max_miner_time_ms = max_time; - - let result = StacksBlockBuilder::build_anchored_block( - &chain_state, - &sort_db.index_handle(&chain_tip.sortition_id), - &mut mempool_db, - &parent_header, - chain_tip.total_burn, - VRFProof::empty(), - Hash160([0; 20]), - &coinbase_tx, - settings, - None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), - ); - - let stop = get_epoch_time_ms(); - - println!( - "{} mined block @ height = {} off of {} ({}/{}) in {}ms. Min-fee: {}, Max-time: {}", - if result.is_ok() { - "Successfully" - } else { - "Failed to" - }, - parent_header.stacks_block_height + 1, - StacksBlockHeader::make_index_block_hash( - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash() - ), - &parent_header.consensus_hash, - &parent_header.anchored_header.block_hash(), - stop.saturating_sub(start), - min_fee, - max_time - ); - - if let Ok((block, execution_cost, size)) = result { - let mut total_fees = 0; - for tx in block.txs.iter() { - total_fees += tx.get_tx_fee(); - } - println!( - "Block {}: {} uSTX, {} bytes, cost {:?}", - block.block_hash(), - total_fees, - size, - &execution_cost - ); - } - + cli::command_try_mine(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -1166,6 +1101,204 @@ simulating a miner. println!("{:?}", inv); } + if argv[1] == "get-nakamoto-tip" { + if argv.len() < 4 { + eprintln!( + "Usage: {} get-nakamoto-tip CHAINSTATE_DIR NETWORK", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + + check_shadow_network(network); + let (sort_db, chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); + println!("{}", &header.index_block_hash()); + process::exit(0); + } + + if argv[1] == "get-account" { + if argv.len() < 5 { + eprintln!( + "Usage: {} get-account CHAINSTATE_DIR mainnet|krypton ADDRESS [CHAIN_TIP]", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let addr = StacksAddress::from_string(&argv[4]).unwrap(); + let chain_tip: Option = + argv.get(5).map(|tip| StacksBlockId::from_hex(tip).unwrap()); + + check_shadow_network(network); + let (sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + + let chain_tip_header = chain_tip + .map(|tip| { + let header = NakamotoChainState::get_block_header_nakamoto(chain_state.db(), &tip) + .unwrap() + .unwrap(); + header + }) + .unwrap_or_else(|| { + let header = + NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); + header + }); + + let account = + NakamotoBlockBuilder::get_account(&mut chain_state, &sort_db, &addr, &chain_tip_header) + .unwrap(); + println!("{:#?}", &account); + process::exit(0); + } + + if argv[1] == "make-shadow-block" { + if argv.len() < 5 { + eprintln!( + "Usage: {} make-shadow-block CHAINSTATE_DIR NETWORK CHAIN_TIP_HASH [TX...]", + &argv[0] + ); + process::exit(1); + } + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let chain_tip = StacksBlockId::from_hex(argv[4].as_str()).unwrap(); + let txs = argv[5..] + .iter() + .map(|tx_str| { + let tx_bytes = hex_bytes(&tx_str).unwrap(); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + tx + }) + .collect(); + + check_shadow_network(network); + let (sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + let header = NakamotoChainState::get_block_header(chain_state.db(), &chain_tip) + .unwrap() + .unwrap(); + + let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( + &mut chain_state, + &sort_db, + chain_tip, + header.consensus_hash, + txs, + ) + .unwrap(); + + println!("{}", to_hex(&shadow_block.serialize_to_vec())); + process::exit(0); + } + + // Generates the shadow blocks needed to restore this node to working order. + // Automatically inserts and processes them as well. + // Prints out the generated shadow blocks (as JSON) + if argv[1] == "shadow-chainstate-repair" { + if argv.len() < 4 { + eprintln!( + "Usage: {} shadow-chainstate-repair CHAINSTATE_DIR NETWORK", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + + check_shadow_network(network); + + let (mut sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + let shadow_blocks = shadow_chainstate_repair(&mut chain_state, &mut sort_db).unwrap(); + + let shadow_blocks_hex: Vec<_> = shadow_blocks + .into_iter() + .map(|blk| to_hex(&blk.serialize_to_vec())) + .collect(); + + println!("{}", serde_json::to_string(&shadow_blocks_hex).unwrap()); + process::exit(0); + } + + // Inserts and processes shadow blocks generated from `shadow-chainstate-repair` + if argv[1] == "shadow-chainstate-patch" { + if argv.len() < 5 { + eprintln!( + "Usage: {} shadow-chainstate-patch CHAINSTATE_DIR NETWORK SHADOW_BLOCKS_PATH.JSON", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let shadow_blocks_json_path = argv[4].as_str(); + + let shadow_blocks_hex = { + let mut blocks_json_file = + File::open(shadow_blocks_json_path).expect("Unable to open file"); + let mut buffer = vec![]; + blocks_json_file.read_to_end(&mut buffer).unwrap(); + let shadow_blocks_hex: Vec = serde_json::from_slice(&buffer).unwrap(); + shadow_blocks_hex + }; + + let shadow_blocks: Vec<_> = shadow_blocks_hex + .into_iter() + .map(|blk_hex| { + NakamotoBlock::consensus_deserialize(&mut hex_bytes(&blk_hex).unwrap().as_slice()) + .unwrap() + }) + .collect(); + + check_shadow_network(network); + + let (mut sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + for shadow_block in shadow_blocks.into_iter() { + process_shadow_block(&mut chain_state, &mut sort_db, shadow_block).unwrap(); + } + + process::exit(0); + } + + if argv[1] == "add-shadow-block" { + if argv.len() < 5 { + eprintln!( + "Usage: {} add-shadow-block CHAINSTATE_DIR NETWORK SHADOW_BLOCK_HEX", + &argv[0] + ); + process::exit(1); + } + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let block_hex = argv[4].as_str(); + let shadow_block = + NakamotoBlock::consensus_deserialize(&mut hex_bytes(block_hex).unwrap().as_slice()) + .unwrap(); + + assert!(shadow_block.is_shadow_block()); + + check_shadow_network(network); + let (_, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + + let tx = chain_state.staging_db_tx_begin().unwrap(); + tx.add_shadow_block(&shadow_block).unwrap(); + tx.commit().unwrap(); + + process::exit(0); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); @@ -1345,6 +1478,7 @@ simulating a miner. SortitionDB::get_canonical_burn_chain_tip(new_sortition_db.conn()).unwrap(); new_sortition_db .evaluate_sortition( + false, &burn_block_header, blockstack_txs, &burnchain, @@ -1466,41 +1600,17 @@ simulating a miner. } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..], None); + cli::command_replay_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-naka-block" { - let chain_config = - if let Some(network_flag_ix) = argv.iter().position(|arg| arg == "--network") { - let Some(network_choice) = argv.get(network_flag_ix + 1) else { - eprintln!("Must supply network choice after `--network` option"); - process::exit(1); - }; - - let network_config = match network_choice.to_lowercase().as_str() { - "testnet" => cli::StacksChainConfig::default_testnet(), - "mainnet" => cli::StacksChainConfig::default_mainnet(), - other => { - eprintln!("Unknown network choice `{other}`"); - process::exit(1); - } - }; - - argv.remove(network_flag_ix + 1); - argv.remove(network_flag_ix); - - Some(network_config) - } else { - None - }; - - cli::command_replay_block_nakamoto(&argv[1..], chain_config.as_ref()); + cli::command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..], None); + cli::command_replay_mock_mining(&argv[1..], common_opts.config.as_ref()); process::exit(0); } @@ -1842,6 +1952,7 @@ fn analyze_sortition_mev(argv: Vec) { debug!("Re-evaluate sortition at height {}", height); let (next_sn, state_transition) = sortdb .evaluate_sortition( + true, &burn_block.header, burn_block.ops.clone(), &burnchain, @@ -1857,6 +1968,7 @@ fn analyze_sortition_mev(argv: Vec) { let mut sort_tx = sortdb.tx_begin_at_tip(); let tip_pox_id = sort_tx.get_pox_id().unwrap(); let next_sn_nakamoto = BlockSnapshot::make_snapshot_in_epoch( + true, &mut sort_tx, &burnchain, &ancestor_sn.sortition_id, diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 8bcf32ce1d..2cb2847290 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -231,21 +231,31 @@ impl RPCRequestHandler for GetTenuresForkInfo { chainstate, &network.stacks_tip.block_id(), )?); - let handle = sortdb.index_handle(&cursor.sortition_id); let mut depth = 0; while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { - depth += 1; if height_bound >= cursor.block_height { return Err(ChainError::NotInSameFork); } - cursor = handle - .get_last_snapshot_with_sortition(cursor.block_height.saturating_sub(1))?; - results.push(TenureForkingInfo::from_snapshot( - &cursor, - sortdb, - chainstate, - &network.stacks_tip.block_id(), - )?); + cursor = + SortitionDB::get_block_snapshot(sortdb.conn(), &cursor.parent_sortition_id)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + if cursor.sortition + || chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&cursor.consensus_hash)? + { + results.push(TenureForkingInfo::from_snapshot( + &cursor, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + )?); + } + if cursor.sortition { + // don't count shadow blocks towards the depth, since there can be a large + // swath of them. + depth += 1; + } } Ok(results) diff --git a/stackslib/src/net/api/getclaritymarfvalue.rs b/stackslib/src/net/api/getclaritymarfvalue.rs new file mode 100644 index 0000000000..678d4fa46b --- /dev/null +++ b/stackslib/src/net/api/getclaritymarfvalue.rs @@ -0,0 +1,219 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::representations::CONTRACT_PRINCIPAL_REGEX_STRING; +use lazy_static::lazy_static; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::to_hex; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClarityMarfResponse { + pub data: String, + #[serde(rename = "proof")] + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub marf_proof: Option, +} + +#[derive(Clone)] +pub struct RPCGetClarityMarfRequestHandler { + pub marf_key_hash: Option, +} +impl RPCGetClarityMarfRequestHandler { + pub fn new() -> Self { + Self { + marf_key_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetClarityMarfRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v2/clarity/marf/(?P[0-9a-f]{64})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v2/clarity/marf/:marf_key_hash" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let marf_key = if let Some(key_str) = captures.name("marf_key_hash") { + TrieHash::from_hex(key_str.as_str()) + .map_err(|e| Error::Http(400, format!("Invalid hash string: {e:?}")))? + } else { + return Err(Error::Http(404, "Missing `marf_key_hash`".to_string())); + }; + + self.marf_key_hash = Some(marf_key); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetClarityMarfRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.marf_key_hash = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let marf_key_hash = self + .marf_key_hash + .take() + .ok_or(NetError::SendError("`marf_key_hash` not set".to_string()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let with_proof = contents.get_with_proof(); + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof_by_hash(&marf_key_hash) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data_by_hash(&marf_key_hash) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(ClarityMarfResponse { data, marf_proof }) + }) + }, + ) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Marf key hash not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetClarityMarfRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let marf_value: ClarityMarfResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(marf_value)?) + } +} + +impl StacksHttpRequest { + pub fn new_getclaritymarf( + host: PeerHost, + marf_key_hash: TrieHash, + tip_req: TipRequest, + with_proof: bool, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v2/clarity/marf/{}", &marf_key_hash), + HttpRequestContents::new() + .for_tip(tip_req) + .query_arg("proof".into(), if with_proof { "1" } else { "0" }.into()), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_clarity_marf_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ClarityMarfResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getclaritymetadata.rs b/stackslib/src/net/api/getclaritymetadata.rs new file mode 100644 index 0000000000..ee6ec96567 --- /dev/null +++ b/stackslib/src/net/api/getclaritymetadata.rs @@ -0,0 +1,272 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::database::clarity_db::ContractDataVarName; +use clarity::vm::database::StoreType; +use clarity::vm::representations::{ + CONTRACT_NAME_REGEX_STRING, MAX_STRING_LEN, STANDARD_PRINCIPAL_REGEX_STRING, +}; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::ContractName; +use lazy_static::lazy_static; +use regex::{Captures, Regex}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; + +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{ + request, HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; + +lazy_static! { + static ref CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING: String = format!( + "([a-zA-Z]([a-zA-Z0-9]|[-_!?+<>=/*])*|[-+=/*]|[<>]=?){{1,{}}}", + MAX_STRING_LEN + ); + static ref METADATA_KEY_REGEX_STRING: String = format!( + r"vm-metadata::(?P(\d{{1,2}}))::(?P(contract|contract-size|contract-src|contract-data-size|{}))", + *CLARITY_NAME_NO_BOUNDARIES_REGEX_STRING, + ); +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ClarityMetadataResponse { + pub data: String, +} + +#[derive(Clone)] +pub struct RPCGetClarityMetadataRequestHandler { + pub clarity_metadata_key: Option, + pub contract_identifier: Option, +} +impl RPCGetClarityMetadataRequestHandler { + pub fn new() -> Self { + Self { + clarity_metadata_key: None, + contract_identifier: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCGetClarityMetadataRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r"^/v2/clarity/metadata/(?P
{})/(?P{})/(?P(analysis)|({}))$", + *STANDARD_PRINCIPAL_REGEX_STRING, + *CONTRACT_NAME_REGEX_STRING, + *METADATA_KEY_REGEX_STRING + )) + .unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v2/clarity/metadata/:principal/:contract_name/:clarity_metadata_key" + } + + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let contract_identifier = request::get_contract_address(captures, "address", "contract")?; + + let metadata_key = match captures.name("clarity_metadata_key") { + Some(key_str) => key_str.as_str().to_string(), + None => { + return Err(Error::DecodeError( + "Missing `clarity_metadata_key`".to_string(), + )); + } + }; + + if metadata_key != "analysis" { + // Validate that the metadata key is well-formed. It must be of data type: + // DataMapMeta (5) | VariableMeta (6) | FungibleTokenMeta (7) | NonFungibleTokenMeta (8) + // or Contract (9) followed by a valid contract metadata name + match captures + .name("data_type") + .and_then(|data_type| StoreType::try_from(data_type.as_str()).ok()) + { + Some(data_type) => match data_type { + StoreType::DataMapMeta + | StoreType::VariableMeta + | StoreType::FungibleTokenMeta + | StoreType::NonFungibleTokenMeta => {} + StoreType::Contract => { + if captures + .name("var_name") + .and_then(|var_name| { + ContractDataVarName::try_from(var_name.as_str()).ok() + }) + .is_none() + { + return Err(Error::DecodeError( + "Invalid metadata var name".to_string(), + )); + } + } + _ => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + }, + None => { + return Err(Error::DecodeError("Invalid metadata type".to_string())); + } + } + } + + self.contract_identifier = Some(contract_identifier); + self.clarity_metadata_key = Some(metadata_key); + + let contents = HttpRequestContents::new().query_string(query); + Ok(contents) + } +} + +/// Handle the HTTP request +impl RPCRequestHandler for RPCGetClarityMetadataRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.contract_identifier = None; + self.clarity_metadata_key = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let contract_identifier = self.contract_identifier.take().ok_or(NetError::SendError( + "`contract_identifier` not set".to_string(), + ))?; + let clarity_metadata_key = self.clarity_metadata_key.take().ok_or(NetError::SendError( + "`clarity_metadata_key` not set".to_string(), + ))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, &tip)?, + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let data = clarity_db + .store + .get_metadata(&contract_identifier, &clarity_metadata_key) + .ok() + .flatten()?; + + Some(ClarityMetadataResponse { data }) + }) + }, + ) + }); + + let data_resp = match data_opt { + Ok(Some(Some(data))) => data, + Ok(Some(None)) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Metadata not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + Ok(None) | Err(_) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new("Chain tip not found".to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCGetClarityMetadataRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let metadata: ClarityMetadataResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(metadata)?) + } +} + +impl StacksHttpRequest { + pub fn new_getclaritymetadata( + host: PeerHost, + contract_addr: StacksAddress, + contract_name: ContractName, + clarity_metadata_key: String, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!( + "/v2/clarity/metadata/{}/{}/{}", + &contract_addr, &contract_name, &clarity_metadata_key + ), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_clarity_metadata_response(self) -> Result { + let contents = self.get_http_payload_ok()?; + let contents_json: serde_json::Value = contents.try_into()?; + let resp: ClarityMetadataResponse = serde_json::from_value(contents_json) + .map_err(|_e| NetError::DeserializeError("Failed to load from JSON".to_string()))?; + Ok(resp) + } +} diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 9b22d8b82f..b41e516cbf 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -29,7 +29,9 @@ use {serde, serde_json}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn, StacksDBIndexed, +}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; use crate::net::api::getblock_v3::NakamotoBlockStream; @@ -85,6 +87,11 @@ pub struct SortitionInfo { pub consensus_hash: ConsensusHash, /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning /// block or miner was chosen). + /// + /// This will *also* be true if this sortition corresponds to a shadow block. This is because + /// the signer does not distinguish between shadow blocks and blocks with sortitions, so until + /// we can update the signer and this interface, we'll have to report the presence of a shadow + /// block tenure in a way that the signer currently understands. pub was_sortition: bool, /// If sortition occurred, and the miner's VRF key registration /// associated a nakamoto mining pubkey with their commit, this @@ -150,13 +157,41 @@ impl GetSortitionHandler { fn get_sortition_info( sortition_sn: BlockSnapshot, sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip: &StacksBlockId, ) -> Result { + let is_shadow = chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&sortition_sn.consensus_hash)?; let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = - if !sortition_sn.sortition { + if !sortition_sn.sortition && !is_shadow { let handle = sortdb.index_handle(&sortition_sn.sortition_id); let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; (None, None, None, Some(last_sortition.consensus_hash)) + } else if !sortition_sn.sortition && is_shadow { + // this is a shadow tenure. + let parent_tenure_ch = chainstate + .index_conn() + .get_parent_tenure_consensus_hash(tip, &sortition_sn.consensus_hash)? + .ok_or_else(|| DBError::NotFoundError)?; + + let parent_tenure_start_header = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + tip, + &parent_tenure_ch, + )? + .ok_or_else(|| DBError::NotFoundError)?; + + ( + Some(Hash160([0x00; 20])), + Some(parent_tenure_ch.clone()), + Some(BlockHeaderHash( + parent_tenure_start_header.index_block_hash().0, + )), + Some(parent_tenure_ch), + ) } else { let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? .ok_or_else(|| { @@ -211,7 +246,7 @@ impl GetSortitionHandler { sortition_id: sortition_sn.sortition_id, parent_sortition_id: sortition_sn.parent_sortition_id, consensus_hash: sortition_sn.consensus_hash, - was_sortition: sortition_sn.sortition, + was_sortition: sortition_sn.sortition || is_shadow, miner_pk_hash160, stacks_parent_ch, last_sortition_ch, @@ -277,7 +312,7 @@ impl RPCRequestHandler for GetSortitionHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let result = node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { let query_result = match self.query { QuerySpecifier::Latest => Ok(Some(network.burnchain_tip.clone())), QuerySpecifier::ConsensusHash(ref consensus_hash) => { @@ -306,7 +341,12 @@ impl RPCRequestHandler for GetSortitionHandler { } }; let sortition_sn = query_result?.ok_or_else(|| ChainError::NoSuchBlockError)?; - Self::get_sortition_info(sortition_sn, sortdb) + Self::get_sortition_info( + sortition_sn, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + ) }); let block = match result { @@ -334,13 +374,18 @@ impl RPCRequestHandler for GetSortitionHandler { if self.query == QuerySpecifier::LatestAndLast { // if latest **and** last are requested, lookup the sortition info for last_sortition_ch if let Some(last_sortition_ch) = last_sortition_ch { - let result = node.with_node_state(|_, sortdb, _, _, _| { + let result = node.with_node_state(|network, sortdb, chainstate, _, _| { let last_sortition_sn = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), &last_sortition_ch, )? .ok_or_else(|| ChainError::NoSuchBlockError)?; - Self::get_sortition_info(last_sortition_sn, sortdb) + Self::get_sortition_info( + last_sortition_sn, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + ) }); let last_block = match result { Ok(block) => block, diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs index b8801e7d7c..78e6e66851 100644 --- a/stackslib/src/net/api/getstxtransfercost.rs +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -108,7 +108,7 @@ impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { if let Some((_, fee_estimator, metric)) = rpc_args.get_estimators_ref() { // STX transfer transactions have zero runtime cost - let estimated_cost = ExecutionCost::zero(); + let estimated_cost = ExecutionCost::ZERO; let estimations = RPCPostFeeRateRequestHandler::estimate_tx_fee_from_cost_and_length( &preamble, diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 8fc8ee33ba..8d32308d9d 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -43,6 +43,8 @@ pub mod getattachmentsinv; pub mod getblock; pub mod getblock_v3; pub mod getblockbyheight; +pub mod getclaritymarfvalue; +pub mod getclaritymetadata; pub mod getconstantval; pub mod getcontractabi; pub mod getcontractsrc; @@ -94,6 +96,8 @@ impl StacksHttp { self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); self.register_rpc_endpoint(getblock_v3::RPCNakamotoBlockRequestHandler::new()); self.register_rpc_endpoint(getblockbyheight::RPCNakamotoBlockByHeightRequestHandler::new()); + self.register_rpc_endpoint(getclaritymarfvalue::RPCGetClarityMarfRequestHandler::new()); + self.register_rpc_endpoint(getclaritymetadata::RPCGetClarityMetadataRequestHandler::new()); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index daa8aaae3b..c832695103 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -16,6 +16,9 @@ use std::io::{Read, Write}; use std::thread::{self, JoinHandle, Thread}; +#[cfg(any(test, feature = "testing"))] +use std::time::Duration; +use std::time::Instant; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -38,7 +41,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NAKAMOTO_BLOCK_VERSION}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; @@ -65,6 +68,10 @@ use crate::util_lib::db::Error as DBError; #[cfg(any(test, feature = "testing"))] pub static TEST_VALIDATE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Artificial delay to add to block validation. +pub static TEST_VALIDATE_DELAY_DURATION_SECS: std::sync::Mutex> = + std::sync::Mutex::new(None); // This enum is used to supply a `reason_code` for validation // rejection responses. This is serialized as an enum with string @@ -145,6 +152,7 @@ pub struct BlockValidateOk { pub signer_signature_hash: Sha512Trunc256Sum, pub cost: ExecutionCost, pub size: u64, + pub validation_time_ms: u64, } /// This enum is used for serializing the response to block @@ -354,9 +362,15 @@ impl NakamotoBlockProposal { info!("Block validation is no longer stalled due to testing directive."); } } - let ts_start = get_epoch_time_ms(); - // Measure time from start of function - let time_elapsed = || get_epoch_time_ms().saturating_sub(ts_start); + let start = Instant::now(); + + #[cfg(any(test, feature = "testing"))] + { + if let Some(delay) = *TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap() { + warn!("Sleeping for {} seconds to simulate slow processing", delay); + thread::sleep(Duration::from_secs(delay)); + } + } let mainnet = self.chain_id == CHAIN_ID_MAINNET; if self.chain_id != chainstate.chain_id || mainnet != chainstate.mainnet { @@ -374,9 +388,39 @@ impl NakamotoBlockProposal { }); } - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; - let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); - let mut db_handle = sortdb.index_handle(&sort_tip); + // Check block version. If it's less than the compiled-in version, just emit a warning + // because there's a new version of the node / signer binary available that really ought to + // be used (hint, hint) + if self.block.header.version != NAKAMOTO_BLOCK_VERSION { + warn!("Proposed block has unexpected version. Upgrade your node and/or signer ASAP."; + "block.header.version" => %self.block.header.version, + "expected" => %NAKAMOTO_BLOCK_VERSION); + } + + // open sortition view to the current burn view. + // If the block has a TenureChange with an Extend cause, then the burn view is whatever is + // indicated in the TenureChange. + // Otherwise, it's the same as the block's parent's burn view. + let parent_stacks_header = NakamotoChainState::get_block_header( + chainstate.db(), + &self.block.header.parent_block_id, + )? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Invalid parent block".into(), + })?; + + let burn_view_consensus_hash = + NakamotoChainState::get_block_burn_view(sortdb, &self.block, &parent_stacks_header)?; + let sort_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &burn_view_consensus_hash)? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::NoSuchTenure, + reason: "Failed to find sortition for block tenure".to_string(), + })?; + + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip.sortition_id); + let mut db_handle = sortdb.index_handle(&sort_tip.sortition_id); // (For the signer) // Verify that the block's tenure is on the canonical sortition history @@ -403,7 +447,8 @@ impl NakamotoBlockProposal { }; // Static validation checks - NakamotoChainState::validate_nakamoto_block_burnchain( + NakamotoChainState::validate_normal_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), &db_handle, expected_burn_opt, &self.block, @@ -412,14 +457,6 @@ impl NakamotoBlockProposal { )?; // Validate txs against chainstate - let parent_stacks_header = NakamotoChainState::get_block_header( - chainstate.db(), - &self.block.header.parent_block_id, - )? - .ok_or_else(|| BlockValidateRejectReason { - reason_code: ValidateRejectCode::InvalidBlock, - reason: "Invalid parent block".into(), - })?; // Validate the block's timestamp. It must be: // - Greater than the parent block's timestamp @@ -515,6 +552,10 @@ impl NakamotoBlockProposal { } let mut block = builder.mine_nakamoto_block(&mut tenure_tx); + // Override the block version with the one from the proposal. This must be + // done before computing the block hash, because the block hash includes the + // version in its computation. + block.header.version = self.block.header.version; let size = builder.get_bytes_so_far(); let cost = builder.tenure_finish(tenure_tx)?; @@ -545,6 +586,8 @@ impl NakamotoBlockProposal { }); } + let validation_time_ms = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); + info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, @@ -553,7 +596,7 @@ impl NakamotoBlockProposal { "parent_stacks_block_id" => %block.header.parent_block_id, "block_size" => size, "execution_cost" => %cost, - "validation_time_ms" => time_elapsed(), + "validation_time_ms" => validation_time_ms, "tx_fees_microstacks" => block.txs.iter().fold(0, |agg: u64, tx| { agg.saturating_add(tx.get_tx_fee()) }) @@ -563,6 +606,7 @@ impl NakamotoBlockProposal { signer_signature_hash: block.header.signer_signature_hash(), cost, size, + validation_time_ms, }) } } @@ -647,6 +691,12 @@ impl HttpRequest for RPCBlockProposalRequestHandler { } }; + if block_proposal.block.is_shadow_block() { + return Err(Error::DecodeError( + "Shadow blocks cannot be submitted for validation".to_string(), + )); + } + self.block_proposal = Some(block_proposal); Ok(HttpRequestContents::new().query_string(query)) } diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 9bd174d322..aff20d962f 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -70,7 +70,7 @@ impl HttpRequest for RPCPostBlockRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(&format!("^{PATH}$")).unwrap() + Regex::new(&format!("^{}(/)?$", PATH.trim_end_matches('/'))).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/tests/getclaritymarfvalue.rs b/stackslib/src/net/api/tests/getclaritymarfvalue.rs new file mode 100644 index 0000000000..7255d1ee99 --- /dev/null +++ b/stackslib/src/net/api/tests/getclaritymarfvalue.rs @@ -0,0 +1,205 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::database::{ClarityDeserializable, STXBalance}; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{StacksAddress, TrieHash}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let vm_key_epoch = TrieHash::from_key("vm-epoch::epoch-version"); + let vm_key_trip = + TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::1::count"); + let vm_key_quad = + TrieHash::from_key("vm::ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5.counter::0::data::1234"); + let valid_keys = [vm_key_epoch, vm_key_trip, vm_key_quad]; + + for key in valid_keys { + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + key, + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + true, + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + assert_eq!(request.contents().get_with_proof(), true); + + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymarfvalue::RPCGetClarityMarfRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.marf_key_hash, Some(key.clone())); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.marf_key_hash.is_none()); + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query existing marf value + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::bar"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query existing unconfirmed + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world-unconfirmed::1::bar-unconfirmed"), + TipRequest::UseLatestUnconfirmedTip, + true, + ); + requests.push(request); + + // query non-existant var + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key( + "vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world::1::does-not-exist", + ), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query non-existant contract + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.does-not-exist::1::bar"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + // query vm-account balance + let request = StacksHttpRequest::new_getclaritymarf( + addr.into(), + TrieHash::from_key("vm-account::ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R::19"), + TipRequest::UseLatestAnchoredTip, + true, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // existing data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_clarity_marf_response().unwrap(); + assert_eq!(resp.data, "0x0000000000000000000000000000000000"); + assert!(resp.marf_proof.is_some()); + + // unconfirmed data + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + + let resp = response.decode_clarity_marf_response().unwrap(); + assert_eq!(resp.data, "0x0100000000000000000000000000000001"); + assert!(resp.marf_proof.is_some()); + + // no such var + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // no such contract + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // vm-account balance + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_clarity_marf_response().unwrap(); + let balance = STXBalance::deserialize(&resp.data[2..]).unwrap(); + + assert_eq!(balance.amount_unlocked(), 1_000_000_000); + assert_eq!(balance.amount_locked(), 0); +} diff --git a/stackslib/src/net/api/tests/getclaritymetadata.rs b/stackslib/src/net/api/tests/getclaritymetadata.rs new file mode 100644 index 0000000000..495bbb514f --- /dev/null +++ b/stackslib/src/net/api/tests/getclaritymetadata.rs @@ -0,0 +1,373 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::database::{ClaritySerializable, DataMapMetadata, DataVariableMetadata}; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions, TypeSignature}; +use clarity::vm::{ClarityName, ContractName}; +use serde_json::json; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::Error as HttpError; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::{Error as NetError, ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!( + handler.clarity_metadata_key, + Some("vm-metadata::9::contract-size".to_string()) + ); + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_metadata_key.is_none()); +} + +#[test] +fn test_try_parse_invalid_store_type() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::contract-size".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let parsed_request_err = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap_err(); + + assert_eq!( + parsed_request_err, + HttpError::DecodeError("Invalid metadata type".to_string()).into() + ); + handler.restart(); +} + +#[test] +fn test_try_parse_invalid_contract_metadata_var_name() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-invalid-key".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let parsed_request_err = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap_err(); + + assert_eq!( + parsed_request_err, + HttpError::DecodeError("Invalid metadata var name".to_string()).into() + ); + handler.restart(); +} + +#[test] +fn test_try_parse_request_for_analysis() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "analysis".to_string(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])), + ); + assert_eq!( + request.contents().tip_request(), + TipRequest::SpecificTip(StacksBlockId([0x22; 32])) + ); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getclaritymetadata::RPCGetClarityMetadataRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.clarity_metadata_key, Some("analysis".to_string())); + assert_eq!( + handler.contract_identifier, + Some( + QualifiedContractIdentifier::parse( + "ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R.hello-world" + ) + .unwrap() + ) + ); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.clarity_metadata_key.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let mut requests = vec![]; + + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing contract size metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data map metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::5::test-map".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing data var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query undeclared var metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::6::non-existing-var".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing contract size metadata + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::9::contract-size".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query invalid metadata key (wrong store type) + let request = StacksHttpRequest::new_getclaritymetadata( + addr.into(), + StacksAddress::from_string("ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R").unwrap(), + "hello-world".try_into().unwrap(), + "vm-metadata::2::bar".to_string(), + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + let mut responses = test_rpc(function_name!(), requests); + + // unknwnon data var + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); + + // contract size metadata + let response = responses.remove(0); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_clarity_metadata_response().unwrap(); + assert_eq!(resp.data, "1432"); + + // data map metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataMapMetadata { + key_type: TypeSignature::UIntType, + value_type: TypeSignature::UIntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // data var metadata + let response = responses.remove(0); + let resp = response.decode_clarity_metadata_response().unwrap(); + let expected = DataVariableMetadata { + value_type: TypeSignature::IntType, + }; + assert_eq!(resp.data, expected.serialize()); + + // invalid metadata key + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); + + // contract size metadata + let response = responses.remove(0); + assert_eq!( + response.preamble().get_canonical_stacks_tip_height(), + Some(1) + ); + let resp = response.decode_clarity_metadata_response().unwrap(); + assert_eq!(resp.data, "1432"); + + // unknwnon data var + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index ffaa486f27..a3b112d0e3 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -139,7 +139,7 @@ fn test_try_make_response() { let response = responses.remove(0); info!("response: {:?}", &response); let signer_response = response.decode_signer().unwrap(); - assert_eq!(signer_response.blocks_signed, 40); + assert_eq!(signer_response.blocks_signed, 20); // Signer doesn't exist so it should not have signed anything let response = responses.remove(0); diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 40d329686d..c6c62dd1fe 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -47,7 +47,7 @@ use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; use crate::net::rpc::ConversationHttp; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; -use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs_ext; use crate::net::{ Attachment, AttachmentInstance, MemPoolEventDispatcher, RPCHandlerArgs, StackerDBConfig, StacksNodeState, UrlString, @@ -61,6 +61,8 @@ mod getattachmentsinv; mod getblock; mod getblock_v3; mod getblockbyheight; +mod getclaritymarfvalue; +mod getclaritymetadata; mod getconstantval; mod getcontractabi; mod getcontractsrc; @@ -120,7 +122,7 @@ const TEST_CONTRACT: &'static str = " (ok 1))) (begin (map-set unit-map { account: 'ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R } { units: 123 })) - + (define-read-only (ro-confirmed) u1) (define-public (do-test) (ok u0)) @@ -429,7 +431,7 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let mut anchor_size = 0; // make a block @@ -849,8 +851,18 @@ impl<'a> TestRPC<'a> { true, true, true, true, true, true, true, true, true, true, ]]; - let (mut peer, mut other_peers) = - make_nakamoto_peers_from_invs(function_name!(), observer, 10, 3, bitvecs.clone(), 1); + let (mut peer, mut other_peers) = make_nakamoto_peers_from_invs_ext( + function_name!(), + observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(10, 3) + .with_extra_peers(1) + .with_initial_balances(vec![]) + .with_malleablized_blocks(false) + }, + ); let mut other_peer = other_peers.pop().unwrap(); let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); @@ -1034,7 +1046,7 @@ impl<'a> TestRPC<'a> { peer_2.sortdb = Some(peer_2_sortdb); peer_2.stacks_node = Some(peer_2_stacks_node); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + peer_2.mempool = Some(peer_2_mempool); convo_send_recv(&mut convo_2, &mut convo_1); @@ -1043,8 +1055,6 @@ impl<'a> TestRPC<'a> { // hack around the borrow-checker convo_send_recv(&mut convo_1, &mut convo_2); - peer_2.mempool = Some(peer_2_mempool); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); @@ -1066,27 +1076,45 @@ impl<'a> TestRPC<'a> { .unwrap(); } - { - let rpc_args = RPCHandlerArgs::default(); - let mut node_state = StacksNodeState::new( - &mut peer_1.network, - &peer_1_sortdb, - &mut peer_1_stacks_node.chainstate, - &mut peer_1_mempool, - &rpc_args, - false, - ); - convo_1.chat(&mut node_state).unwrap(); - } - - convo_1.try_flush().unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); peer_1.stacks_node = Some(peer_1_stacks_node); - peer_1.mempool = Some(peer_1_mempool); - // should have gotten a reply - let resp_opt = convo_1.try_get_response(); + let resp_opt = loop { + debug!("Peer 1 try get response"); + convo_send_recv(&mut convo_1, &mut convo_2); + { + let peer_1_sortdb = peer_1.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + + let rpc_args = RPCHandlerArgs::default(); + let mut node_state = StacksNodeState::new( + &mut peer_1.network, + &peer_1_sortdb, + &mut peer_1_stacks_node.chainstate, + &mut peer_1_mempool, + &rpc_args, + false, + ); + + convo_1.chat(&mut node_state).unwrap(); + + peer_1.sortdb = Some(peer_1_sortdb); + peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.mempool = Some(peer_1_mempool); + } + + convo_1.try_flush().unwrap(); + + info!("Try get response from request {:?}", &request); + + // should have gotten a reply + let resp_opt = convo_1.try_get_response(); + if resp_opt.is_some() { + break resp_opt; + } + }; + assert!(resp_opt.is_some()); let resp = resp_opt.unwrap(); diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index a8087bf36a..481d0b2047 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -239,7 +239,7 @@ fn test_try_make_response() { let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); - let mut block = { + let mut good_block = { let chainstate = rpc_test.peer_1.chainstate(); let parent_stacks_header = NakamotoChainState::get_block_header(chainstate.db(), &stacks_tip) @@ -315,12 +315,12 @@ fn test_try_make_response() { }; // Increment the timestamp by 1 to ensure it is different from the previous block - block.header.timestamp += 1; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + good_block.header.timestamp += 1; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut good_block); // post the valid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: good_block.clone(), chain_id: 0x80000000, }; @@ -335,12 +335,16 @@ fn test_try_make_response() { requests.push(request); // Set the timestamp to a value in the past - block.header.timestamp -= 10000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + let mut early_time_block = good_block.clone(); + early_time_block.header.timestamp -= 10000; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut early_time_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: early_time_block, chain_id: 0x80000000, }; @@ -355,12 +359,16 @@ fn test_try_make_response() { requests.push(request); // Set the timestamp to a value in the future - block.header.timestamp += 20000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + let mut late_time_block = good_block.clone(); + late_time_block.header.timestamp += 20000; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut late_time_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: late_time_block, chain_id: 0x80000000, }; @@ -378,12 +386,14 @@ fn test_try_make_response() { let observer = ProposalTestObserver::new(); let proposal_observer = Arc::clone(&observer.proposal_observer); + info!("Run requests with observer"); let mut responses = rpc_test.run_with_observer(requests, Some(&observer)); let response = responses.remove(0); - // Wait for the results to be non-empty + // Wait for the results of all 3 requests loop { + info!("Wait for results to be non-empty"); if proposal_observer .lock() .unwrap() @@ -403,7 +413,23 @@ fn test_try_make_response() { let mut results = observer.results.lock().unwrap(); let result = results.remove(0); - assert!(result.is_ok()); + match result { + Ok(postblock_proposal::BlockValidateOk { + signer_signature_hash, + cost, + size, + validation_time_ms, + }) => { + assert_eq!( + signer_signature_hash, + good_block.header.signer_signature_hash() + ); + assert_eq!(cost, ExecutionCost::ZERO); + assert_eq!(size, 180); + assert!(validation_time_ms > 0 && validation_time_ms < 60000); + } + _ => panic!("expected ok"), + } let result = results.remove(0); match result { diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index 5cc652fc83..0b0a95f3a4 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -214,6 +214,66 @@ fn handle_req_accepted() { assert_eq!(resp.stacks_block_id, next_block_id); } +#[test] +fn handle_req_without_trailing_accepted() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let path_without_slash: &str = "/v3/blocks/upload"; + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + let (next_block, ..) = rpc_test.peer_1.single_block_tenure( + &rpc_test.privk1, + |_| {}, + |burn_ops| { + rpc_test.peer_2.next_burnchain_block(burn_ops.clone()); + }, + |_| true, + ); + let next_block_id = next_block.block_id(); + let mut requests = vec![]; + + // post the block + requests.push( + StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + path_without_slash.into(), + HttpRequestContents::new().payload_stacks(&next_block), + ) + .unwrap(), + ); + + // idempotent + requests.push( + StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + path_without_slash.into(), + HttpRequestContents::new().payload_stacks(&next_block), + ) + .unwrap(), + ); + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + info!( + "Response for the request that has the path without the last '/': {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, next_block_id); + + let response = responses.remove(0); + info!( + "Response for the request that has the path without the last '/': {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, next_block_id); +} + #[test] fn handle_req_unknown_burn_block() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 7d45b39769..1d8e5d10d2 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -6375,6 +6375,8 @@ mod test { } } + // TODO: test for has_acceptable_epoch() + #[test] fn convo_process_relayers() { let conn_opts = ConnectionOptions::default(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 4eeec0daaf..0e58adb36e 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::sync::mpsc::{ @@ -24,7 +24,7 @@ use std::time::Duration; use std::{io, net}; use clarity::vm::costs::ExecutionCost; -use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; +use clarity::vm::types::{QualifiedContractIdentifier, BOUND_VALUE_SERIALIZATION_HEX}; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::to_hex; @@ -44,7 +44,8 @@ use crate::net::neighbors::{ WALK_SEED_PROBABILITY, WALK_STATE_TIMEOUT, }; use crate::net::{ - Error as net_error, MessageSequence, Preamble, ProtocolFamily, RelayData, StacksHttp, StacksP2P, + Error as net_error, MessageSequence, NeighborAddress, Preamble, ProtocolFamily, RelayData, + StacksHttp, StacksP2P, }; /// Receiver notification handle. @@ -433,6 +434,8 @@ pub struct ConnectionOptions { pub nakamoto_unconfirmed_downloader_interval_ms: u128, /// The authorization token to enable privileged RPC endpoints pub auth_token: Option, + /// StackerDB replicas to talk to for a particular smart contract + pub stackerdb_hint_replicas: HashMap>, // fault injection /// Disable neighbor walk and discovery @@ -565,6 +568,7 @@ impl std::default::Default for ConnectionOptions { nakamoto_inv_sync_burst_interval_ms: 1_000, // wait 1 second after a sortition before running inventory sync nakamoto_unconfirmed_downloader_interval_ms: 5_000, // run unconfirmed downloader once every 5 seconds auth_token: None, + stackerdb_hint_replicas: HashMap::new(), // no faults on by default disable_neighbor_walk: false, diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 42d228aca1..4c509ed5c1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1184,6 +1184,16 @@ impl NakamotoDownloadStateMachine { continue; } + let _ = downloader + .try_advance_from_chainstate(chainstate) + .map_err(|e| { + warn!( + "Failed to advance downloader in state {} for {}: {:?}", + &downloader.state, &downloader.naddr, &e + ); + e + }); + debug!( "Send request to {} for tenure {:?} (state {})", &naddr, @@ -1301,13 +1311,16 @@ impl NakamotoDownloadStateMachine { fn download_confirmed_tenures( &mut self, network: &mut PeerNetwork, + chainstate: &mut StacksChainState, max_count: usize, ) -> HashMap> { // queue up more downloaders self.update_tenure_downloaders(max_count, &network.current_reward_sets); // run all downloaders - let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); + let new_blocks = self + .tenure_downloads + .run(network, &mut self.neighbor_rpc, chainstate); new_blocks } @@ -1318,7 +1331,7 @@ impl NakamotoDownloadStateMachine { &mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, highest_processed_block_id: Option, ) -> HashMap> { // queue up more downloaders @@ -1340,7 +1353,7 @@ impl NakamotoDownloadStateMachine { // already downloaded all confirmed tenures), so there's no risk of clobberring any other // in-flight requests. let new_confirmed_blocks = if self.tenure_downloads.inflight() > 0 { - self.download_confirmed_tenures(network, 0) + self.download_confirmed_tenures(network, chainstate, 0) } else { HashMap::new() }; @@ -1415,7 +1428,7 @@ impl NakamotoDownloadStateMachine { burnchain_height: u64, network: &mut PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> HashMap> { debug!( @@ -1462,6 +1475,7 @@ impl NakamotoDownloadStateMachine { NakamotoDownloadState::Confirmed => { let new_blocks = self.download_confirmed_tenures( network, + chainstate, usize::try_from(network.get_connection_opts().max_inflight_blocks) .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), ); diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index ba1ac81033..0f4e3d53cb 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -98,6 +98,10 @@ impl WantedTenure { pub struct TenureStartEnd { /// Consensus hash that identifies the start of the tenure pub tenure_id_consensus_hash: ConsensusHash, + /// Consensus hash that identifies the snapshot with the start block ID + pub start_block_snapshot_consensus_hash: ConsensusHash, + /// Consensus hash that identifies the snapshot with the end block ID + pub end_block_snapshot_consensus_hash: ConsensusHash, /// Burnchain block height of tenure ID consensus hash pub tenure_id_burn_block_height: u64, /// Tenure-start block ID @@ -122,7 +126,9 @@ impl TenureStartEnd { pub fn new( tenure_id_consensus_hash: ConsensusHash, tenure_id_burn_block_height: u64, + start_block_snapshot_consensus_hash: ConsensusHash, start_block_id: StacksBlockId, + end_block_snapshot_consensus_hash: ConsensusHash, end_block_id: StacksBlockId, start_reward_cycle: u64, end_reward_cycle: u64, @@ -131,7 +137,9 @@ impl TenureStartEnd { Self { tenure_id_consensus_hash, tenure_id_burn_block_height, + start_block_snapshot_consensus_hash, start_block_id, + end_block_snapshot_consensus_hash, end_block_id, start_reward_cycle, end_reward_cycle, @@ -219,7 +227,9 @@ impl TenureStartEnd { let tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), wt.burn_height, + wt_start.tenure_id_consensus_hash.clone(), wt_start.winning_block_id.clone(), + wt_end.tenure_id_consensus_hash.clone(), wt_end.winning_block_id.clone(), rc, rc, @@ -328,7 +338,9 @@ impl TenureStartEnd { let mut tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), wt.burn_height, + wt_start.tenure_id_consensus_hash.clone(), wt_start.winning_block_id.clone(), + wt_end.tenure_id_consensus_hash.clone(), wt_end.winning_block_id.clone(), rc, pox_constants diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 4c5efaccdd..e2716e8252 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -43,7 +43,7 @@ use crate::chainstate::nakamoto::{ use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, TransactionPayload, }; use crate::core::{ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, @@ -119,9 +119,13 @@ impl fmt::Display for NakamotoTenureDownloadState { pub struct NakamotoTenureDownloader { /// Consensus hash that identifies this tenure pub tenure_id_consensus_hash: ConsensusHash, + /// Consensus hash that identifies the snapshot from whence we obtained tenure_start_block_id + pub start_block_snapshot_consensus_hash: ConsensusHash, /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and /// sortition DB. pub tenure_start_block_id: StacksBlockId, + /// Consensus hash that identifies the snapshot from whence we obtained tenure_end_block_id + pub end_block_snapshot_consensus_hash: ConsensusHash, /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID /// for some other tenure). Learned from the inventory state machine and sortition DB. pub tenure_end_block_id: StacksBlockId, @@ -150,19 +154,27 @@ pub struct NakamotoTenureDownloader { impl NakamotoTenureDownloader { pub fn new( tenure_id_consensus_hash: ConsensusHash, + start_block_snapshot_consensus_hash: ConsensusHash, tenure_start_block_id: StacksBlockId, + end_block_snapshot_consensus_hash: ConsensusHash, tenure_end_block_id: StacksBlockId, naddr: NeighborAddress, start_signer_keys: RewardSet, end_signer_keys: RewardSet, ) -> Self { debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + "Instantiate downloader to {}-{} for tenure {}: {}-{}", + &naddr, + &tenure_id_consensus_hash, + &start_block_snapshot_consensus_hash, + &tenure_start_block_id, + &tenure_end_block_id, ); Self { tenure_id_consensus_hash, + start_block_snapshot_consensus_hash, tenure_start_block_id, + end_block_snapshot_consensus_hash, tenure_end_block_id, naddr, start_signer_keys, @@ -270,7 +282,9 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidState); }; - if self.tenure_end_block_id != tenure_end_block.header.block_id() { + if self.tenure_end_block_id != tenure_end_block.header.block_id() + && self.tenure_end_block_id != StacksBlockId([0x00; 32]) + { // not the block we asked for warn!("Invalid tenure-end block: unexpected"; "tenure_id" => %self.tenure_id_consensus_hash, @@ -541,6 +555,177 @@ impl NakamotoTenureDownloader { Ok(Some(request)) } + /// Advance the state of the downloader from chainstate, if possible. + /// For example, a tenure-start or tenure-end block may have been pushed to us already (or they + /// may be shadow blocks) + pub fn try_advance_from_chainstate( + &mut self, + chainstate: &mut StacksChainState, + ) -> Result<(), NetError> { + loop { + match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock( + start_block_id, + start_request_time, + ) => { + if chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&self.start_block_snapshot_consensus_hash)? + { + debug!( + "Tenure {} start-block confirmed by shadow tenure {}", + &self.tenure_id_consensus_hash, + &self.start_block_snapshot_consensus_hash + ); + let Some(shadow_block) = chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block( + &self.start_block_snapshot_consensus_hash, + )? + else { + warn!( + "No tenure-start block for shadow tenure {}", + &self.start_block_snapshot_consensus_hash + ); + break; + }; + + // the coinbase of a tenure-start block of a shadow tenure contains the + // block-id of the parent tenure's start block (i.e. the information that + // would have been gleaned from a block-commit, if there was one). + let Some(shadow_coinbase) = shadow_block.get_coinbase_tx() else { + warn!("Shadow block {} has no coinbase", &shadow_block.block_id()); + break; + }; + + let TransactionPayload::Coinbase(coinbase_payload, ..) = + &shadow_coinbase.payload + else { + warn!( + "Shadow block {} coinbase tx is not a Coinbase", + &shadow_block.block_id() + ); + break; + }; + + let tenure_start_block_id = StacksBlockId(coinbase_payload.0.clone()); + + info!( + "Tenure {} starts at shadow tenure-start {}, not {}", + &self.tenure_id_consensus_hash, &tenure_start_block_id, &start_block_id + ); + self.tenure_start_block_id = tenure_start_block_id.clone(); + self.state = NakamotoTenureDownloadState::GetTenureStartBlock( + tenure_start_block_id, + start_request_time, + ); + if let Some((tenure_start_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&self.tenure_start_block_id)? + { + // normal block on disk + self.try_accept_tenure_start_block(tenure_start_block)?; + } + } else if let Some((tenure_start_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&start_block_id)? + { + // we have downloaded this block already + self.try_accept_tenure_start_block(tenure_start_block)?; + } else { + break; + } + if let NakamotoTenureDownloadState::GetTenureStartBlock(..) = &self.state { + break; + } + } + NakamotoTenureDownloadState::GetTenureEndBlock( + end_block_id, + start_request_time, + ) => { + if chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&self.end_block_snapshot_consensus_hash)? + { + debug!( + "Tenure {} end-block confirmed by shadow tenure {}", + &self.tenure_id_consensus_hash, &self.end_block_snapshot_consensus_hash + ); + let Some(shadow_block) = chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block( + &self.end_block_snapshot_consensus_hash, + )? + else { + warn!( + "No tenure-start block for shadow tenure {}", + &self.end_block_snapshot_consensus_hash + ); + break; + }; + + // the coinbase of a tenure-start block of a shadow tenure contains the + // block-id of the parent tenure's start block (i.e. the information that + // would have been gleaned from a block-commit, if there was one). + let Some(shadow_coinbase) = shadow_block.get_coinbase_tx() else { + warn!("Shadow block {} has no coinbase", &shadow_block.block_id()); + break; + }; + + let TransactionPayload::Coinbase(coinbase_payload, ..) = + &shadow_coinbase.payload + else { + warn!( + "Shadow block {} coinbase tx is not a Coinbase", + &shadow_block.block_id() + ); + break; + }; + + let tenure_end_block_id = StacksBlockId(coinbase_payload.0.clone()); + + info!( + "Tenure {} ends at shadow tenure-start {}, not {}", + &self.tenure_id_consensus_hash, &tenure_end_block_id, &end_block_id + ); + self.tenure_end_block_id = tenure_end_block_id.clone(); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock( + tenure_end_block_id, + start_request_time, + ); + if let Some((tenure_end_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&self.tenure_end_block_id)? + { + // normal block on disk + self.try_accept_tenure_end_block(&tenure_end_block)?; + } + } else if let Some((tenure_end_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&end_block_id)? + { + // normal block on disk + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + break; + }; + if let NakamotoTenureDownloadState::GetTenureEndBlock(..) = &self.state { + break; + } + } + NakamotoTenureDownloadState::GetTenureBlocks(..) => { + // TODO: look at the chainstate and find out what we don't have to download + // TODO: skip shadow tenures + break; + } + NakamotoTenureDownloadState::Done => { + break; + } + } + } + Ok(()) + } + /// Begin the next download request for this state machine. The request will be sent to the /// data URL corresponding to self.naddr. /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index b5514558b8..08714f5cbf 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -513,7 +513,9 @@ impl NakamotoTenureDownloaderSet { let tenure_download = NakamotoTenureDownloader::new( ch.clone(), + tenure_info.start_block_snapshot_consensus_hash.clone(), tenure_info.start_block_id.clone(), + tenure_info.end_block_snapshot_consensus_hash.clone(), tenure_info.end_block_id.clone(), naddr.clone(), start_reward_set.clone(), @@ -540,6 +542,7 @@ impl NakamotoTenureDownloaderSet { &mut self, network: &mut PeerNetwork, neighbor_rpc: &mut NeighborRPC, + chainstate: &mut StacksChainState, ) -> HashMap> { let addrs: Vec<_> = self.peers.keys().cloned().collect(); let mut finished = vec![]; @@ -565,6 +568,17 @@ impl NakamotoTenureDownloaderSet { finished_tenures.push(CompletedTenure::from(downloader)); continue; } + + let _ = downloader + .try_advance_from_chainstate(chainstate) + .map_err(|e| { + warn!( + "Failed to advance downloader in state {} for {}: {:?}", + &downloader.state, &downloader.naddr, &e + ); + e + }); + debug!( "Send request to {naddr} for tenure {} (state {})", &downloader.tenure_id_consensus_hash, &downloader.state diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index ddfd35fa97..9a9ee51b07 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -735,7 +735,9 @@ impl NakamotoUnconfirmedTenureDownloader { ); let ntd = NakamotoTenureDownloader::new( tenure_tip.parent_consensus_hash.clone(), + tenure_tip.consensus_hash.clone(), tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.consensus_hash.clone(), tenure_tip.tenure_start_block_id.clone(), self.naddr.clone(), confirmed_signer_keys.clone(), @@ -777,6 +779,44 @@ impl NakamotoUnconfirmedTenureDownloader { } } + /// Advance the state of the downloader from chainstate, if possible. + /// For example, a tenure-start block may have been pushed to us already (or it + /// may be a shadow block) + pub fn try_advance_from_chainstate( + &mut self, + chainstate: &StacksChainState, + ) -> Result<(), NetError> { + loop { + match self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // gotta send that request + break; + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(start_block_id) => { + // if we have this, then load it up + let Some((tenure_start_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&start_block_id)? + else { + break; + }; + self.try_accept_unconfirmed_tenure_start_block(tenure_start_block)?; + if let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) = &self.state { + break; + } + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + // TODO: look at the chainstate and find out what we don't have to download + break; + } + NakamotoUnconfirmedDownloadState::Done => { + break; + } + } + } + Ok(()) + } + /// Begin the next download request for this state machine. /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The /// caller should try this again until it gets one of the other possible return values. It's diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index c58355a6a9..9b2dd1e106 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1197,8 +1197,9 @@ impl StacksHttp { let (response_preamble, response_contents) = match request_result { Ok((rp, rc)) => (rp, rc), Err(NetError::Http(e)) => { + debug!("RPC handler for {} failed: {:?}", decoded_path, &e); return StacksHttpResponse::new_error(&request_preamble, &*e.into_http_error()) - .try_into_contents() + .try_into_contents(); } Err(e) => { warn!("Irrecoverable error when handling request"; "path" => %request_preamble.path_and_query_str, "error" => %e); diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index d5b08f56d2..e832b70184 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -660,10 +660,12 @@ impl NakamotoTenureInv { match reply.payload { StacksMessageType::NakamotoInv(inv_data) => { debug!( - "{:?}: got NakamotoInv: {:?}", + "{:?}: got NakamotoInv from {:?}: {:?}", network.get_local_peer(), + &self.neighbor_address, &inv_data ); + let ret = self.merge_tenure_inv(inv_data.tenures, self.reward_cycle()); self.next_reward_cycle(); return Ok(ret); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 389d565af5..4af4d2a397 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3141,7 +3141,7 @@ pub mod test { &mut stacks_node.chainstate, &sortdb, old_stackerdb_configs, - config.connection_opts.num_neighbors, + &config.connection_opts, ) .expect("Failed to refresh stackerdb configs"); @@ -4061,6 +4061,22 @@ pub mod test { self.sortdb.as_ref().unwrap() } + pub fn with_dbs(&mut self, f: F) -> R + where + F: FnOnce(&mut TestPeer, &mut SortitionDB, &mut TestStacksNode, &mut MemPoolDB) -> R, + { + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let mut mempool = self.mempool.take().unwrap(); + + let res = f(self, &mut sortdb, &mut stacks_node, &mut mempool); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + self.mempool = Some(mempool); + res + } + pub fn with_db_state(&mut self, f: F) -> Result where F: FnOnce( @@ -4726,6 +4742,9 @@ pub mod test { all_blocks: Vec, expected_siblings: usize, ) { + if !self.mine_malleablized_blocks { + return; + } for block in all_blocks.iter() { let sighash = block.header.signer_signature_hash(); let siblings = self diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 71ca82f8bf..13f7ad7fac 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -841,6 +841,9 @@ impl PeerNetwork { ) -> usize { let mut count = 0; for (_, convo) in self.peers.iter() { + if !convo.is_authenticated() { + continue; + } if !convo.is_outbound() { continue; } @@ -4158,7 +4161,7 @@ impl PeerNetwork { chainstate, sortdb, stacker_db_configs, - self.connection_opts.num_neighbors, + &self.connection_opts, )?; Ok(()) } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index b5fbf76cf4..b93171916c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -933,6 +933,11 @@ impl Relayer { &obtained_method; "block_id" => %block.header.block_id(), ); + if block.is_shadow_block() { + // drop, since we can get these from ourselves when downloading a tenure that ends in + // a shadow block. + return Ok(BlockAcceptResponse::AlreadyStored); + } if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { return Ok(BlockAcceptResponse::Rejected( @@ -1072,7 +1077,7 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - reward_set, + &reward_set, obtained_method, )?; staging_db_tx.commit()?; diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 97f8214913..fbc1f28245 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -285,6 +285,94 @@ impl StackerDBConfig { Ok(ret) } + /// Evaluate contract-given hint-replicas + fn eval_hint_replicas( + contract_id: &QualifiedContractIdentifier, + hint_replicas_list: Vec, + ) -> Result, NetError> { + let mut hint_replicas = vec![]; + for hint_replica_value in hint_replicas_list.into_iter() { + let hint_replica_data = hint_replica_value.expect_tuple()?; + + let addr_byte_list = hint_replica_data + .get("addr") + .expect("FATAL: missing 'addr'") + .clone() + .expect_list()?; + let port = hint_replica_data + .get("port") + .expect("FATAL: missing 'port'") + .clone() + .expect_u128()?; + let pubkey_hash_bytes = hint_replica_data + .get("public-key-hash") + .expect("FATAL: missing 'public-key-hash") + .clone() + .expect_buff_padded(20, 0)?; + + let mut addr_bytes = vec![]; + for byte_val in addr_byte_list.into_iter() { + let byte = byte_val.expect_u128()?; + if byte > (u8::MAX as u128) { + let reason = format!( + "Contract {} stipulates an addr byte above u8::MAX", + contract_id + ); + warn!("{}", &reason); + return Err(NetError::InvalidStackerDBContract( + contract_id.clone(), + reason, + )); + } + addr_bytes.push(byte as u8); + } + if addr_bytes.len() != 16 { + let reason = format!( + "Contract {} did not stipulate a full 16-octet IP address", + contract_id + ); + warn!("{}", &reason); + return Err(NetError::InvalidStackerDBContract( + contract_id.clone(), + reason, + )); + } + + if port < 1024 || port > u128::from(u16::MAX - 1) { + let reason = format!( + "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", + contract_id + ); + warn!("{}", &reason); + return Err(NetError::InvalidStackerDBContract( + contract_id.clone(), + reason, + )); + } + // NOTE: port is now known to be in range [1024, 65535] + + let mut pubkey_hash_slice = [0u8; 20]; + pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); + + let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); + if peer_addr.is_in_private_range() { + debug!( + "Ignoring private IP address '{}' in hint-replicas", + &peer_addr.to_socketaddr(port as u16) + ); + continue; + } + + let naddr = NeighborAddress { + addrbytes: peer_addr, + port: port as u16, + public_key_hash: Hash160(pubkey_hash_slice), + }; + hint_replicas.push(naddr); + } + Ok(hint_replicas) + } + /// Evaluate the contract to get its config fn eval_config( chainstate: &mut StacksChainState, @@ -293,6 +381,7 @@ impl StackerDBConfig { tip: &StacksBlockId, signers: Vec<(StacksAddress, u32)>, local_max_neighbors: u64, + local_hint_replicas: Option>, ) -> Result { let value = chainstate.eval_read_only(burn_dbconn, tip, contract_id, "(stackerdb-get-config)")?; @@ -394,91 +483,17 @@ impl StackerDBConfig { max_neighbors = u128::from(local_max_neighbors); } - let hint_replicas_list = config_tuple - .get("hint-replicas") - .expect("FATAL: missing 'hint-replicas'") - .clone() - .expect_list()?; - let mut hint_replicas = vec![]; - for hint_replica_value in hint_replicas_list.into_iter() { - let hint_replica_data = hint_replica_value.expect_tuple()?; - - let addr_byte_list = hint_replica_data - .get("addr") - .expect("FATAL: missing 'addr'") + let hint_replicas = if let Some(replicas) = local_hint_replicas { + replicas.clone() + } else { + let hint_replicas_list = config_tuple + .get("hint-replicas") + .expect("FATAL: missing 'hint-replicas'") .clone() .expect_list()?; - let port = hint_replica_data - .get("port") - .expect("FATAL: missing 'port'") - .clone() - .expect_u128()?; - let pubkey_hash_bytes = hint_replica_data - .get("public-key-hash") - .expect("FATAL: missing 'public-key-hash") - .clone() - .expect_buff_padded(20, 0)?; - let mut addr_bytes = vec![]; - for byte_val in addr_byte_list.into_iter() { - let byte = byte_val.expect_u128()?; - if byte > (u8::MAX as u128) { - let reason = format!( - "Contract {} stipulates an addr byte above u8::MAX", - contract_id - ); - warn!("{}", &reason); - return Err(NetError::InvalidStackerDBContract( - contract_id.clone(), - reason, - )); - } - addr_bytes.push(byte as u8); - } - if addr_bytes.len() != 16 { - let reason = format!( - "Contract {} did not stipulate a full 16-octet IP address", - contract_id - ); - warn!("{}", &reason); - return Err(NetError::InvalidStackerDBContract( - contract_id.clone(), - reason, - )); - } - - if port < 1024 || port > u128::from(u16::MAX - 1) { - let reason = format!( - "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", - contract_id - ); - warn!("{}", &reason); - return Err(NetError::InvalidStackerDBContract( - contract_id.clone(), - reason, - )); - } - // NOTE: port is now known to be in range [1024, 65535] - - let mut pubkey_hash_slice = [0u8; 20]; - pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); - - let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); - if peer_addr.is_in_private_range() { - debug!( - "Ignoring private IP address '{}' in hint-replicas", - &peer_addr.to_socketaddr(port as u16) - ); - continue; - } - - let naddr = NeighborAddress { - addrbytes: peer_addr, - port: port as u16, - public_key_hash: Hash160(pubkey_hash_slice), - }; - hint_replicas.push(naddr); - } + Self::eval_hint_replicas(contract_id, hint_replicas_list)? + }; Ok(StackerDBConfig { chunk_size: chunk_size as u64, @@ -497,6 +512,7 @@ impl StackerDBConfig { sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, max_neighbors: u64, + local_hint_replicas: Option>, ) -> Result { let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? @@ -578,6 +594,7 @@ impl StackerDBConfig { &chain_tip_hash, signers, max_neighbors, + local_hint_replicas, )?; Ok(config) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index bbbec21290..9d1b25af51 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -133,6 +133,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::MINERS_NAME; use crate::chainstate::stacks::db::StacksChainState; +use crate::net::connection::ConnectionOptions; use crate::net::neighbors::NeighborComms; use crate::net::p2p::PeerNetwork; use crate::net::{ @@ -285,8 +286,9 @@ impl StackerDBs { chainstate: &mut StacksChainState, sortdb: &SortitionDB, stacker_db_configs: HashMap, - num_neighbors: u64, + connection_opts: &ConnectionOptions, ) -> Result, net_error> { + let num_neighbors = connection_opts.num_neighbors; let existing_contract_ids = self.get_stackerdb_contract_ids()?; let mut new_stackerdb_configs = HashMap::new(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; @@ -314,6 +316,10 @@ impl StackerDBs { &sortdb, &stackerdb_contract_id, num_neighbors, + connection_opts + .stackerdb_hint_replicas + .get(&stackerdb_contract_id) + .cloned(), ) .unwrap_or_else(|e| { if matches!(e, net_error::NoSuchStackerDB(_)) && stackerdb_contract_id.is_boot() diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index a075d7b974..cff4ca1059 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -528,7 +528,7 @@ fn test_valid_and_invalid_stackerdb_configs() { ContractName::try_from(format!("test-{}", i)).unwrap(), ); peer.with_db_state(|sortdb, chainstate, _, _| { - match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32) { + match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32, None) { Ok(config) => { let expected = result .clone() @@ -551,3 +551,122 @@ fn test_valid_and_invalid_stackerdb_configs() { .unwrap(); } } + +#[test] +fn test_hint_replicas_override() { + let AUTO_UNLOCK_HEIGHT = 12; + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants.reward_cycle_length = 5; + burnchain.pox_constants.prepare_length = 2; + burnchain.pox_constants.anchor_threshold = 1; + burnchain.pox_constants.v1_unlock_height = AUTO_UNLOCK_HEIGHT + EMPTY_SORTITIONS; + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let epochs = StacksEpoch::all(0, 0, EMPTY_SORTITIONS as u64 + 10); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "test_valid_and_invalid_stackerdb_configs", + Some(epochs.clone()), + Some(&observer), + ); + + let contract_owner = keys.pop().unwrap(); + let contract_id = QualifiedContractIdentifier::new( + StacksAddress::from_public_keys( + 26, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&contract_owner)], + ) + .unwrap() + .into(), + ContractName::try_from("test-0").unwrap(), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let override_replica = NeighborAddress { + addrbytes: PeerAddress([2u8; 16]), + port: 123, + public_key_hash: Hash160([3u8; 20]), + }; + + let mut coinbase_nonce = 0; + let mut txs = vec![]; + + let config_contract = r#" + (define-public (stackerdb-get-signer-slots) + (ok (list { signer: 'ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B, num-slots: u3 }))) + + (define-public (stackerdb-get-config) + (ok { + chunk-size: u123, + write-freq: u4, + max-writes: u56, + max-neighbors: u7, + hint-replicas: (list + { + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), + port: u8901, + public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 + }) + })) + "#; + + let expected_config = StackerDBConfig { + chunk_size: 123, + signers: vec![( + StacksAddress { + version: 26, + bytes: Hash160::from_hex("b4fdae98b64b9cd6c9436f3b965558966afe890b").unwrap(), + }, + 3, + )], + write_freq: 4, + max_writes: 56, + hint_replicas: vec![override_replica.clone()], + max_neighbors: 7, + }; + + let tx = make_smart_contract("test-0", &config_contract, &contract_owner, 0, 10000); + txs.push(tx); + + peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + peer.with_db_state(|sortdb, chainstate, _, _| { + match StackerDBConfig::from_smart_contract( + chainstate, + sortdb, + &contract_id, + 32, + Some(vec![override_replica.clone()]), + ) { + Ok(config) => { + assert_eq!(config, expected_config); + } + Err(e) => { + panic!("Unexpected error: {:?}", &e); + } + } + Ok(()) + }) + .unwrap(); +} diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index e2bea6fd50..a479dad07a 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -33,18 +33,23 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::SortitionHandle; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, TokenTransferMemo, - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, + CoinbasePayload, Error as ChainstateError, StacksTransaction, TenureChangeCause, + TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; use crate::net::inv::nakamoto::NakamotoTenureInv; -use crate::net::test::{dns_thread_start, TestEventObserver}; -use crate::net::tests::inv::nakamoto::{make_nakamoto_peer_from_invs, peer_get_nakamoto_invs}; +use crate::net::test::{dns_thread_start, to_addr, TestEventObserver}; +use crate::net::tests::inv::nakamoto::{ + make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs, +}; use crate::net::tests::{NakamotoBootPlan, TestPeer}; use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB}; use crate::stacks_common::types::Address; @@ -97,6 +102,45 @@ impl NakamotoDownloadStateMachine { } } +impl<'a> NakamotoStagingBlocksConnRef<'a> { + pub fn load_nakamoto_tenure( + &self, + tip: &StacksBlockId, + ) -> Result>, ChainstateError> { + let Some((block, ..)) = self.get_nakamoto_block(tip)? else { + return Ok(None); + }; + if block.is_wellformed_tenure_start_block().map_err(|_| { + ChainstateError::InvalidStacksBlock("Malformed tenure-start block".into()) + })? { + // we're done + return Ok(Some(vec![block])); + } + + // this is an intermediate block + let mut tenure = vec![]; + let mut cursor = block.header.parent_block_id.clone(); + tenure.push(block); + loop { + let Some((block, _)) = self.get_nakamoto_block(&cursor)? else { + return Ok(None); + }; + + let is_tenure_start = block.is_wellformed_tenure_start_block().map_err(|e| { + ChainstateError::InvalidStacksBlock("Malformed tenure-start block".into()) + })?; + cursor = block.header.parent_block_id.clone(); + tenure.push(block); + + if is_tenure_start { + break; + } + } + tenure.reverse(); + Ok(Some(tenure)) + } +} + #[test] fn test_nakamoto_tenure_downloader() { let ch = ConsensusHash([0x11; 20]); @@ -240,8 +284,10 @@ fn test_nakamoto_tenure_downloader() { }; let mut td = NakamotoTenureDownloader::new( + tenure_start_block.header.consensus_hash.clone(), tenure_start_block.header.consensus_hash.clone(), tenure_start_block.header.block_id(), + next_tenure_start_block.header.consensus_hash.clone(), next_tenure_start_block.header.block_id(), naddr.clone(), reward_set.clone(), @@ -361,6 +407,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ); let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + peer.mine_malleablized_blocks = false; let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); @@ -2161,7 +2208,9 @@ fn test_nakamoto_download_run_2_peers() { "Booting peer's stacks tip is now {:?}", &boot_peer.network.stacks_tip ); - if stacks_tip_ch == canonical_stacks_tip_ch { + if stacks_tip_ch == canonical_stacks_tip_ch + && stacks_tip_bhh == canonical_stacks_tip_bhh + { break; } } @@ -2249,6 +2298,793 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch + && stacks_tip_bhh == canonical_stacks_tip_bhh + { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test the case where one or more blocks from tenure _T_ get orphend by a tenure-start block in +/// tenure _T + 1_. The unconfirmed downloader should be able to handle this case. +#[test] +fn test_nakamoto_microfork_download_run_2_peers() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let rc_len = 10u64; + + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a microfork + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + // load the full tenure for this tip + let mut naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + // make a microfork -- orphan naka_tip_tenure.last() + naka_tip_tenure.pop(); + + debug!("test: mine off of tenure"); + debug!( + "test: first {}: {:?}", + &naka_tip_tenure.first().as_ref().unwrap().block_id(), + &naka_tip_tenure.first().as_ref().unwrap() + ); + debug!( + "test: last {}: {:?}", + &naka_tip_tenure.last().as_ref().unwrap().block_id(), + &naka_tip_tenure.last().as_ref().unwrap() + ); + + peer.mine_nakamoto_on(naka_tip_tenure); + let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced fork {}: {:?}", + &fork_naka_block.block_id(), + &fork_naka_block + ); + + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![fork_naka_block.clone()]); + let (fork_naka_block_2, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: confirmed fork with {}: {:?}", + &fork_naka_block_2.block_id(), + &fork_naka_block_2 + ); + + peer.refresh_burnchain_view(); + + // get reward cyclce data + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + assert_eq!(tip.block_height, 53); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch + && stacks_tip_bhh == canonical_stacks_tip_bhh + { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test booting up a node where there is one shadow block in the prepare phase, as well as some +/// blocks that mine atop it. +#[test] +fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let bitvecs = vec![vec![true, true, false, false]]; + + let rc_len = 10u64; + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a shadow block + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + let naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + peer.mine_nakamoto_on(naka_tip_tenure); + let shadow_block = peer.make_shadow_tenure(None); + debug!( + "test: produced shadow block {}: {:?}", + &shadow_block.block_id(), + &shadow_block + ); + + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![shadow_block.clone()]); + let (next_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + + for _ in 0..9 { + let (next_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + /* + assert_eq!( + tip.block_height, + 56 + ); + */ + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + + { + let mut node = boot_peer.stacks_node.take().unwrap(); + let tx = node.chainstate.staging_db_tx_begin().unwrap(); + tx.add_shadow_block(&shadow_block).unwrap(); + tx.commit().unwrap(); + boot_peer.stacks_node = Some(node); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test booting up a node where the whole prepare phase is shadow blocks +#[test] +fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let bitvecs = vec![vec![true, true]]; + + let rc_len = 10u64; + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a shadow block + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + let naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + peer.mine_nakamoto_on(naka_tip_tenure); + + let mut shadow_blocks = vec![]; + for _ in 0..10 { + let shadow_block = peer.make_shadow_tenure(None); + debug!( + "test: produced shadow block {}: {:?}", + &shadow_block.block_id(), + &shadow_block + ); + shadow_blocks.push(shadow_block.clone()); + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![shadow_block.clone()]); + } + + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok((next_block, ..)) => { + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + for _ in 0..10 { + let (next_block, ..) = + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok(x) => x, + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + continue; + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + { + let mut node = boot_peer.stacks_node.take().unwrap(); + let tx = node.chainstate.staging_db_tx_begin().unwrap(); + for shadow_block in shadow_blocks.into_iter() { + tx.add_shadow_block(&shadow_block).unwrap(); + } + tx.commit().unwrap(); + boot_peer.stacks_node = Some(node); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test booting up a node where multiple reward cycles are shadow blocks +#[test] +fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let bitvecs = vec![vec![true, true]]; + + let rc_len = 10u64; + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a shadow block + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + let naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + peer.mine_nakamoto_on(naka_tip_tenure); + + let mut shadow_blocks = vec![]; + for _ in 0..30 { + let shadow_block = peer.make_shadow_tenure(None); + debug!( + "test: produced shadow block {}: {:?}", + &shadow_block.block_id(), + &shadow_block + ); + shadow_blocks.push(shadow_block.clone()); + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![shadow_block.clone()]); + } + + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok((next_block, ..)) => { + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + for _ in 0..10 { + let (next_block, ..) = + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok(x) => x, + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + continue; + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + assert_eq!(tip.block_height, 84); + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + { + let mut node = boot_peer.stacks_node.take().unwrap(); + let tx = node.chainstate.staging_db_tx_begin().unwrap(); + for shadow_block in shadow_blocks.into_iter() { + tx.add_shadow_block(&shadow_block).unwrap(); + } + tx.commit().unwrap(); + boot_peer.stacks_node = Some(node); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + // start running that peer so we can boot off of it let (term_sx, term_rx) = sync_channel(1); thread::scope(|s| { diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index fac9623d3f..5f889cde3e 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -404,15 +404,12 @@ pub fn make_nakamoto_peers_from_invs<'a>( bitvecs: Vec>, num_peers: usize, ) -> (TestPeer<'a>, Vec>) { - inner_make_nakamoto_peers_from_invs( - test_name, - observer, - rc_len, - prepare_len, - bitvecs, - num_peers, - vec![], - ) + make_nakamoto_peers_from_invs_ext(test_name, observer, bitvecs, |boot_plan| { + boot_plan + .with_pox_constants(rc_len, prepare_len) + .with_extra_peers(num_peers) + .with_initial_balances(vec![]) + }) } /// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into @@ -426,31 +423,26 @@ pub fn make_nakamoto_peers_from_invs_and_balances<'a>( num_peers: usize, initial_balances: Vec<(PrincipalData, u64)>, ) -> (TestPeer<'a>, Vec>) { - inner_make_nakamoto_peers_from_invs( - test_name, - observer, - rc_len, - prepare_len, - bitvecs, - num_peers, - initial_balances, - ) + make_nakamoto_peers_from_invs_ext(test_name, observer, bitvecs, |boot_plan| { + boot_plan + .with_pox_constants(rc_len, prepare_len) + .with_extra_peers(num_peers) + .with_initial_balances(initial_balances) + }) } /// Make peers from inventories and balances -fn inner_make_nakamoto_peers_from_invs<'a>( +/// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into +/// the peers here. However, it appears unavoidable to the borrow-checker. +pub fn make_nakamoto_peers_from_invs_ext<'a, F>( test_name: &str, observer: &'a TestEventObserver, - rc_len: u32, - prepare_len: u32, bitvecs: Vec>, - num_peers: usize, - mut initial_balances: Vec<(PrincipalData, u64)>, -) -> (TestPeer<'a>, Vec>) { - for bitvec in bitvecs.iter() { - assert_eq!(bitvec.len() as u32, rc_len); - } - + boot_config: F, +) -> (TestPeer<'a>, Vec>) +where + F: FnOnce(NakamotoBootPlan) -> NakamotoBootPlan, +{ let private_key = StacksPrivateKey::from_seed(&[2]); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -461,6 +453,7 @@ fn inner_make_nakamoto_peers_from_invs<'a>( .unwrap(); let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let mut initial_balances = vec![(addr.to_account_principal(), 1_000_000)]; let mut sender_nonce = 0; @@ -525,14 +518,13 @@ fn inner_make_nakamoto_peers_from_invs<'a>( 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, ]); - initial_balances.push((addr.into(), 1_000_000)); - let plan = NakamotoBootPlan::new(test_name) - .with_private_key(private_key) - .with_pox_constants(rc_len, prepare_len) - .with_initial_balances(initial_balances) - .with_extra_peers(num_peers) - .with_test_signers(test_signers) - .with_test_stackers(test_stackers); + let mut plan = boot_config( + NakamotoBootPlan::new(test_name) + .with_private_key(private_key) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers), + ); + plan.initial_balances.append(&mut initial_balances); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(observer)); (peer, other_peers) @@ -2382,3 +2374,87 @@ fn test_nakamoto_make_tenure_inv_from_old_tips() { assert_eq!(bits, expected_bits[0..bit_len]); } } + +#[test] +fn test_nakamoto_invs_shadow_blocks() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let mut bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + let mut expected_ids = vec![]; + + // construct and add shadow blocks to this peer's chainstate + peer.refresh_burnchain_view(); + let shadow_block = peer.make_shadow_tenure(None); + expected_ids.push(shadow_block.block_id()); + peer.mine_nakamoto_on(vec![shadow_block]); + + peer.refresh_burnchain_view(); + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + expected_ids.push(naka_block.block_id()); + peer.mine_nakamoto_on(vec![naka_block]); + + peer.refresh_burnchain_view(); + let shadow_block = peer.make_shadow_tenure(None); + expected_ids.push(shadow_block.block_id()); + peer.mine_nakamoto_on(vec![shadow_block]); + + peer.refresh_burnchain_view(); + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + expected_ids.push(naka_block.block_id()); + peer.mine_nakamoto_on(vec![naka_block]); + + peer.refresh_burnchain_view(); + let shadow_block = peer.make_shadow_tenure(None); + expected_ids.push(shadow_block.block_id()); + peer.mine_nakamoto_on(vec![shadow_block]); + + peer.refresh_burnchain_view(); + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + expected_ids.push(naka_block.block_id()); + peer.mine_nakamoto_on(vec![naka_block]); + + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + // the inv should show `true` for each shadow tenure + bitvecs.push(vec![true, true, true, true, true, true]); + check_inv_messages(bitvecs, 10, nakamoto_start, reward_cycle_invs); + + // shadow blocks are part of the history + peer.refresh_burnchain_view(); + let tip = peer.network.stacks_tip.block_id(); + + let mut stored_block_ids = vec![]; + let mut cursor = tip; + for _ in 0..expected_ids.len() { + let block = peer + .chainstate() + .nakamoto_blocks_db() + .get_nakamoto_block(&cursor) + .unwrap() + .unwrap() + .0; + stored_block_ids.push(block.block_id()); + cursor = block.header.parent_block_id; + } + + stored_block_ids.reverse(); + assert_eq!(stored_block_ids, expected_ids); +} diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 7a44a56788..d3f30aca19 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -973,63 +973,68 @@ pub fn test_mempool_storage_nakamoto() { StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); let mempool_txs = RefCell::new(vec![]); - let blocks_and_sizes = peer.make_nakamoto_tenure_and( - tenure_change_tx, - coinbase_tx, - &mut test_signers, - |_| {}, - |miner, chainstate, sortdb, blocks_so_far| { - let mut txs = vec![]; - if blocks_so_far.len() < num_blocks { - let account = get_account(chainstate, sortdb, &addr); - - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 200, - 200, - &recipient_addr, - ); - txs.push(stx_transfer.clone()); - (*mempool_txs.borrow_mut()).push(stx_transfer.clone()); - all_txs.push(stx_transfer.clone()); - } - txs - }, - |_| { - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + let blocks_and_sizes = peer + .make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_| {}, + |miner, chainstate, sortdb, blocks_so_far| { + let mut txs = vec![]; + if blocks_so_far.len() < num_blocks { + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 200, + 200, + &recipient_addr, + ); + txs.push(stx_transfer.clone()); + (*mempool_txs.borrow_mut()).push(stx_transfer.clone()); + all_txs.push(stx_transfer.clone()); + } + txs + }, + |_| { + let tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let sort_tip = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tip.consensus_hash, + ) .unwrap() .unwrap(); - let sort_tip = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip.consensus_hash) + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sort_tip.block_height) .unwrap() .unwrap(); - let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sort_tip.block_height) - .unwrap() - .unwrap(); - - // submit each transaction to the mempool - for mempool_tx in (*mempool_txs.borrow()).as_slice() { - mempool - .submit( - &mut chainstate, - &sortdb, - &tip.consensus_hash, - &tip.anchored_header.block_hash(), - &mempool_tx, - None, - &epoch.block_limit, - &epoch.epoch_id, - ) - .unwrap(); - } - (*mempool_txs.borrow_mut()).clear(); - true - }, - ); + // submit each transaction to the mempool + for mempool_tx in (*mempool_txs.borrow()).as_slice() { + mempool + .submit( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + &mempool_tx, + None, + &epoch.block_limit, + &epoch.epoch_id, + ) + .unwrap(); + } + + (*mempool_txs.borrow_mut()).clear(); + true + }, + ) + .unwrap(); total_blocks += num_blocks; } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index d9c7402bf8..53d6ec9fa1 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -105,6 +105,8 @@ pub struct NakamotoBootPlan { pub num_peers: usize, /// Whether to add an initial balance for `private_key`'s account pub add_default_balance: bool, + /// Whether or not to produce malleablized blocks + pub malleablized_blocks: bool, pub network_id: u32, } @@ -121,6 +123,7 @@ impl NakamotoBootPlan { observer: Some(TestEventObserver::new()), num_peers: 0, add_default_balance: true, + malleablized_blocks: true, network_id: TestPeerConfig::default().network_id, } } @@ -177,6 +180,11 @@ impl NakamotoBootPlan { self } + pub fn with_malleablized_blocks(mut self, malleablized_blocks: bool) -> Self { + self.malleablized_blocks = malleablized_blocks; + self + } + /// This is the first tenure in which nakamoto blocks will be built. /// However, it is also the last sortition for an epoch 2.x block. pub fn nakamoto_start_burn_height(pox_consts: &PoxConstants) -> u64 { @@ -347,7 +355,7 @@ impl NakamotoBootPlan { fn boot_nakamoto_peers<'a>( mut self, observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + ) -> (TestPeer<'a>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); @@ -406,6 +414,8 @@ impl NakamotoBootPlan { peer_config.burnchain.pox_constants = self.pox_constants.clone(); let mut peer = TestPeer::new_with_observer(peer_config.clone(), observer); + peer.mine_malleablized_blocks = self.malleablized_blocks; + let mut other_peers = vec![]; for i in 0..self.num_peers { let mut other_config = peer_config.clone(); @@ -416,7 +426,11 @@ impl NakamotoBootPlan { other_config.private_key = StacksPrivateKey::from_seed(&(i as u128).to_be_bytes()); other_config.add_neighbor(&peer.to_neighbor()); - other_peers.push(TestPeer::new_with_observer(other_config, None)); + + let mut other_peer = TestPeer::new_with_observer(other_config, None); + other_peer.mine_malleablized_blocks = self.malleablized_blocks; + + other_peers.push(other_peer); } self.advance_to_nakamoto(&mut peer, &mut other_peers); @@ -652,7 +666,7 @@ impl NakamotoBootPlan { self, boot_plan: Vec, observer: Option<&'a TestEventObserver>, - ) -> (TestPeer<'a>, Vec) { + ) -> (TestPeer<'a>, Vec>) { let test_signers = self.test_signers.clone(); let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); diff --git a/stx-genesis/chainstate-test.txt b/stx-genesis/chainstate-test.txt index 614cf3d9f4..6eedf241d1 100644 --- a/stx-genesis/chainstate-test.txt +++ b/stx-genesis/chainstate-test.txt @@ -69,4 +69,5 @@ SM1ZH700J7CEDSEHM5AJ4C4MKKWNESTS35DD3SZM5,13888889,2267 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,45467 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,6587 SM260QHD6ZM2KKPBKZB8PFE5XWP0MHSKTD1B7BHYR,208333333,2267 +SP2CTPPV8BHBVSQR727A3MK00ZD85RNY903KAG9F3,12345678,35 -----END STX VESTING----- \ No newline at end of file diff --git a/stx-genesis/chainstate-test.txt.sha256 b/stx-genesis/chainstate-test.txt.sha256 index 56782ae494..69ac95c254 100644 --- a/stx-genesis/chainstate-test.txt.sha256 +++ b/stx-genesis/chainstate-test.txt.sha256 @@ -1 +1 @@ -014402b47d53b0716402c172fa746adf308b03a826ebea91944a5eb6a304a823 \ No newline at end of file +088c3caea982a8f6f74dda48ec5f06f51f7605def9760a971b1acd763ee6b7cf \ No newline at end of file diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 0c68d22ee7..e902140428 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -14,7 +14,6 @@ serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} -toml = "0.5.6" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" diff --git a/testnet/stacks-node/conf b/testnet/stacks-node/conf new file mode 120000 index 0000000000..94edd3b5d4 --- /dev/null +++ b/testnet/stacks-node/conf @@ -0,0 +1 @@ +../../stackslib/conf/ \ No newline at end of file diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 727483886e..f3aaa95ab5 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -48,6 +48,12 @@ use stacks::chainstate::burn::Opcodes; use stacks::chainstate::coordinator::comm::CoordinatorChannels; #[cfg(test)] use stacks::chainstate::stacks::address::PoxAddress; +use stacks::config::BurnchainConfig; +#[cfg(test)] +use stacks::config::{ + OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, +}; use stacks::core::{EpochList, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; @@ -74,12 +80,6 @@ use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; -#[cfg(test)] -use crate::config::{ - OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, - OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, -}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2806,13 +2806,13 @@ mod tests { use std::io::Write; use stacks::burnchains::BurnchainSigner; + use stacks::config::DEFAULT_SATS_PER_VB; use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::*; - use crate::config::DEFAULT_SATS_PER_VB; #[test] fn test_get_satoshis_per_byte() { diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 15adebef95..9a2811d1b5 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -274,6 +274,7 @@ impl BurnchainController for MocknetController { .unwrap(); let new_chain_tip = burn_tx .process_block_ops( + false, &self.burnchain, &chain_tip.block_snapshot, &next_block_header, diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 88bfc8dae7..2f71838adb 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -18,7 +18,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use std::thread::sleep; use std::time::Duration; @@ -26,6 +26,8 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +#[cfg(any(test, feature = "testing"))] +use lazy_static::lazy_static; use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; @@ -49,6 +51,7 @@ use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::mempool::{MemPoolDropReason, MemPoolEventDispatcher, ProposalCallbackReceiver}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ @@ -59,6 +62,8 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +#[cfg(any(test, feature = "testing"))] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -68,7 +73,11 @@ use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; -use super::config::{EventKeyType, EventObserverConfig}; +#[cfg(any(test, feature = "testing"))] +lazy_static! { + /// Do not announce a signed/mined block to the network when set to true. + pub static ref TEST_SKIP_BLOCK_ANNOUNCEMENT: TestFlag = TestFlag::default(); +} #[derive(Debug, Clone)] struct EventObserver { @@ -107,17 +116,8 @@ pub const PATH_BLOCK_PROCESSED: &str = "new_block"; pub const PATH_ATTACHMENT_PROCESSED: &str = "attachments/new"; pub const PATH_PROPOSAL_RESPONSE: &str = "proposal_response"; -pub static STACKER_DB_CHANNEL: StackerDBChannel = StackerDBChannel::new(); - /// This struct receives StackerDB event callbacks without registering -/// over the JSON/RPC interface. To ensure that any event observer -/// uses the same channel, we use a lazy_static global for the channel (this -/// implements a singleton using STACKER_DB_CHANNEL). -/// -/// This is in place because a Nakamoto miner needs to receive -/// StackerDB events. It could either poll the database (seems like a -/// bad idea) or listen for events. Registering for RPC callbacks -/// seems bad. So instead, it uses a singleton sync channel. +/// over the JSON/RPC interface. pub struct StackerDBChannel { sender_info: Mutex>, } @@ -923,6 +923,8 @@ pub struct EventDispatcher { /// Index into `registered_observers` that will receive block proposal events (Nakamoto and /// later) block_proposal_observers_lookup: HashSet, + /// Channel for sending StackerDB events to the miner coordinator + pub stackerdb_channel: Arc>, } /// This struct is used specifically for receiving proposal responses. @@ -1115,6 +1117,7 @@ impl Default for EventDispatcher { impl EventDispatcher { pub fn new() -> EventDispatcher { EventDispatcher { + stackerdb_channel: Arc::new(Mutex::new(StackerDBChannel::new())), registered_observers: vec![], contract_events_observers_lookup: HashMap::new(), assets_observers_lookup: HashMap::new(), @@ -1305,6 +1308,11 @@ impl EventDispatcher { let mature_rewards = serde_json::Value::Array(mature_rewards_vec); + #[cfg(any(test, feature = "testing"))] + if test_skip_block_announcement(&block) { + return; + } + for (observer_id, filtered_events_ids) in dispatch_matrix.iter().enumerate() { let filtered_events: Vec<_> = filtered_events_ids .iter() @@ -1544,7 +1552,11 @@ impl EventDispatcher { let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); - let interested_receiver = STACKER_DB_CHANNEL.is_active(&contract_id); + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel mutex"); + let interested_receiver = stackerdb_channel.is_active(&contract_id); if interested_observers.is_empty() && interested_receiver.is_none() { return; } @@ -1697,6 +1709,18 @@ impl EventDispatcher { } } +#[cfg(any(test, feature = "testing"))] +fn test_skip_block_announcement(block: &StacksBlockEventData) -> bool { + if TEST_SKIP_BLOCK_ANNOUNCEMENT.get() { + warn!( + "Skipping new block announcement due to testing directive"; + "block_hash" => %block.block_hash + ); + return true; + } + false +} + #[cfg(test)] mod test { use std::net::TcpListener; @@ -1732,8 +1756,8 @@ mod test { let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); let parent_burn_block_height = 0; let parent_burn_block_timestamp = 0; - let anchored_consumed = ExecutionCost::zero(); - let mblock_confirmed_consumed = ExecutionCost::zero(); + let anchored_consumed = ExecutionCost::ZERO; + let mblock_confirmed_consumed = ExecutionCost::ZERO; let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); @@ -1802,8 +1826,8 @@ mod test { let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); let parent_burn_block_height = 0; let parent_burn_block_timestamp = 0; - let anchored_consumed = ExecutionCost::zero(); - let mblock_confirmed_consumed = ExecutionCost::zero(); + let anchored_consumed = ExecutionCost::ZERO; + let mblock_confirmed_consumed = ExecutionCost::ZERO; let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index c285c6a168..2a9a601723 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -10,10 +10,10 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::config::MinerConfig; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; -use crate::config::MinerConfig; use crate::neon::Counters; use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 4fa1c5e5a7..7916de9d00 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -16,8 +16,6 @@ use stacks_common::util::hash::hex_bytes; pub mod monitoring; pub mod burnchains; -pub mod chain_data; -pub mod config; pub mod event_dispatcher; pub mod genesis_data; pub mod globals; @@ -41,19 +39,19 @@ use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvi use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::DummyEventDispatcher; use stacks::chainstate::stacks::db::StacksChainState; +use stacks::config::chain_data::MinerStats; +pub use stacks::config::{Config, ConfigFile}; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; pub use self::burnchains::{ BitcoinRegtestController, BurnchainController, BurnchainTip, MocknetController, }; -pub use self::config::{Config, ConfigFile}; pub use self::event_dispatcher::EventDispatcher; pub use self::keychain::Keychain; pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; -use crate::chain_data::MinerStats; use crate::neon_node::{BlockMinerThread, TipCandidate}; use crate::run_loop::boot_nakamoto; diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index edaf12e98b..09f8c7285f 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -28,6 +28,8 @@ use stacks::monitoring::update_active_miners_count_gauge; use stacks::net::atlas::AtlasConfig; use stacks::net::relay::Relayer; use stacks::net::stackerdb::StackerDBs; +use stacks::net::Error as NetError; +use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::SortitionId; use stacks_common::types::StacksEpochId; @@ -42,7 +44,8 @@ use crate::run_loop::RegisteredKey; pub mod miner; pub mod peer; pub mod relayer; -pub mod sign_coordinator; +pub mod signer_coordinator; +pub mod stackerdb_listener; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; @@ -71,46 +74,71 @@ pub struct StacksNode { } /// Types of errors that can arise during Nakamoto StacksNode operation -#[derive(Debug)] +#[derive(thiserror::Error, Debug)] pub enum Error { /// Can't find the block sortition snapshot for the chain tip + #[error("Can't find the block sortition snapshot for the chain tip")] SnapshotNotFoundForChainTip, /// The burnchain tip changed while this operation was in progress + #[error("The burnchain tip changed while this operation was in progress")] BurnchainTipChanged, /// The Stacks tip changed while this operation was in progress + #[error("The Stacks tip changed while this operation was in progress")] StacksTipChanged, /// Signers rejected a block + #[error("Signers rejected a block")] SignersRejected, /// Error while spawning a subordinate thread + #[error("Error while spawning a subordinate thread: {0}")] SpawnError(std::io::Error), /// Injected testing errors + #[error("Injected testing errors")] FaultInjection, /// This miner was elected, but another sortition occurred before mining started + #[error("This miner was elected, but another sortition occurred before mining started")] MissedMiningOpportunity, /// Attempted to mine while there was no active VRF key + #[error("Attempted to mine while there was no active VRF key")] NoVRFKeyActive, /// The parent block or tenure could not be found + #[error("The parent block or tenure could not be found")] ParentNotFound, /// Something unexpected happened (e.g., hash mismatches) + #[error("Something unexpected happened (e.g., hash mismatches)")] UnexpectedChainState, /// A burnchain operation failed when submitting it to the burnchain + #[error("A burnchain operation failed when submitting it to the burnchain: {0}")] BurnchainSubmissionFailed(BurnchainsError), /// A new parent has been discovered since mining started + #[error("A new parent has been discovered since mining started")] NewParentDiscovered, /// A failure occurred while constructing a VRF Proof + #[error("A failure occurred while constructing a VRF Proof")] BadVrfConstruction, - CannotSelfSign, - MiningFailure(ChainstateError), + #[error("A failure occurred while mining: {0}")] + MiningFailure(#[from] ChainstateError), /// The miner didn't accept their own block + #[error("The miner didn't accept their own block: {0}")] AcceptFailure(ChainstateError), + #[error("A failure occurred while signing a miner's block: {0}")] MinerSignatureError(&'static str), + #[error("A failure occurred while signing a signer's block: {0}")] SignerSignatureError(String), /// A failure occurred while configuring the miner thread + #[error("A failure occurred while configuring the miner thread: {0}")] MinerConfigurationFailed(&'static str), /// An error occurred while operating as the signing coordinator + #[error("An error occurred while operating as the signing coordinator: {0}")] SigningCoordinatorFailure(String), // The thread that we tried to send to has closed + #[error("The thread that we tried to send to has closed")] ChannelClosed, + /// DBError wrapper + #[error("DBError: {0}")] + DBError(#[from] DBError), + /// NetError wrapper + #[error("NetError: {0}")] + NetError(#[from] NetError), } impl StacksNode { diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 411e4f3be8..d9edf97e90 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -47,8 +47,8 @@ use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; use super::relayer::RelayerThread; -use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; +use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; use crate::neon_node; use crate::run_loop::nakamoto::Globals; @@ -139,11 +139,17 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, + /// Number of blocks mined since a tenure change/extend was attempted + mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner burn_election_block: BlockSnapshot, - /// Current burnchain tip + /// Current burnchain tip as of the last TenureChange + /// * if the last tenure-change was a BlockFound, then this is the same as the + /// `burn_election_block`. + /// * otherwise, if the last tenure-change is an Extend, then this is the sortition of the burn + /// view consensus hash in the TenureChange burn_block: BlockSnapshot, /// The start of the parent tenure for this tenure parent_tenure_id: StacksBlockId, @@ -154,6 +160,8 @@ pub struct BlockMinerThread { /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, signer_set_cache: Option, + /// The time at which tenure change/extend was attempted + tenure_change_time: Instant, } impl BlockMinerThread { @@ -172,6 +180,7 @@ impl BlockMinerThread { keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), last_block_mined: None, + mined_blocks: 0, registered_key, burn_election_block, burn_block, @@ -180,6 +189,7 @@ impl BlockMinerThread { reason, p2p_handle: rt.get_p2p_handle(), signer_set_cache: None, + tenure_change_time: Instant::now(), } } @@ -256,7 +266,7 @@ impl BlockMinerThread { globals.block_miner(); let prior_miner_result = prior_miner .join() - .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + .map_err(|_| ChainstateError::MinerAborted)?; if let Err(e) = prior_miner_result { // it's okay if the prior miner thread exited with an error. // in many cases this is expected (i.e., a burnchain block occurred) @@ -285,178 +295,260 @@ impl BlockMinerThread { if let Some(prior_miner) = prior_miner { Self::stop_miner(&self.globals, prior_miner)?; } - let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) - .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; + let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true)?; let mut last_block_rejected = false; + let reward_set = self.load_signer_set()?; + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let sortdb = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + // Start the signer coordinator + let mut coordinator = SignerCoordinator::new( + self.event_dispatcher.stackerdb_channel.clone(), + self.globals.should_keep_running.clone(), + &reward_set, + &burn_tip, + &self.burnchain, + miner_privkey, + &self.config, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; + // now, actually run this tenure loop { - #[cfg(test)] - if *TEST_MINE_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Mining is stalled due to testing directive"); - while *TEST_MINE_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - warn!("Mining is no longer stalled due to testing directive. Continuing..."); + if let Err(e) = self.miner_main_loop( + &mut coordinator, + &sortdb, + &mut stackerdbs, + &mut last_block_rejected, + &reward_set, + ) { + // Before stopping this miner, shutdown the coordinator thread. + coordinator.shutdown(); + return Err(e); } - let new_block = loop { - // If we're mock mining, we may not have processed the block that the - // actual tenure winner committed to yet. So, before attempting to - // mock mine, check if the parent is processed. - if self.config.get_node_config(false).mock_mining { - let burn_db_path = self.config.get_burn_db_file_path(); - let mut burn_db = SortitionDB::open( - &burn_db_path, - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - let burn_tip_changed = self.check_burn_tip_changed(&burn_db); - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - match burn_tip_changed - .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) - { - Ok(..) => {} - Err(NakamotoNodeError::ParentNotFound) => { - info!("Mock miner has not processed parent block yet, sleeping and trying again"); - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - continue; - } - Err(e) => { - warn!("Mock miner failed to load parent info: {e:?}"); - return Err(e); - } - } - } + } + } - match self.mine_block() { - Ok(x) => { - if !self.validate_timestamp(&x)? { - info!("Block mined too quickly. Will try again."; - "block_timestamp" => x.header.timestamp, - ); - continue; - } - break Some(x); - } - Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { - info!("Miner interrupted while mining, will try again"); - // sleep, and try again. if the miner was interrupted because the burnchain - // view changed, the next `mine_block()` invocation will error + /// The main loop for the miner thread. This is where the miner will mine + /// blocks and then attempt to sign and broadcast them. + fn miner_main_loop( + &mut self, + coordinator: &mut SignerCoordinator, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + last_block_rejected: &mut bool, + reward_set: &RewardSet, + ) -> Result<(), NakamotoNodeError> { + #[cfg(test)] + if *TEST_MINE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Mining is stalled due to testing directive"); + while *TEST_MINE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Mining is no longer stalled due to testing directive. Continuing..."); + } + let new_block = loop { + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. + if self.config.get_node_config(false).mock_mining { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + let burn_tip_changed = self.check_burn_tip_changed(&burn_db); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + match burn_tip_changed + .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) + { + Ok(..) => {} + Err(NakamotoNodeError::ParentNotFound) => { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); continue; } - Err(NakamotoNodeError::MiningFailure( - ChainstateError::NoTransactionsToMine, - )) => { - debug!("Miner did not find any transactions to mine"); - break None; - } Err(e) => { - warn!("Failed to mine block: {e:?}"); - - // try again, in case a new sortition is pending - self.globals - .raise_initiative(format!("MiningFailure: {e:?}")); - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::MinerAborted, - )); + warn!("Mock miner failed to load parent info: {e:?}"); + return Err(e); } } - }; - - if let Some(mut new_block) = new_block { - Self::fault_injection_block_broadcast_stall(&new_block); - let (reward_set, signer_signature) = match self - .gather_signatures(&mut new_block, &mut stackerdbs) - { - Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - _ => { - // Sleep for a bit to allow signers to catch up - let pause_ms = if last_block_rejected { - self.config.miner.subsequent_rejection_pause_ms - } else { - self.config.miner.first_rejection_pause_ms - }; - - error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - thread::sleep(Duration::from_millis(pause_ms)); - last_block_rejected = true; - continue; - } - }, - }; - last_block_rejected = false; + } - new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { - warn!("Error accepting own block: {e:?}. Will try mining again."); + match self.mine_block(coordinator) { + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + continue; + } + break Some(x); + } + Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { + info!("Miner interrupted while mining, will try again"); + // sleep, and try again. if the miner was interrupted because the burnchain + // view changed, the next `mine_block()` invocation will error + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); continue; - } else { - info!( - "Miner: Block signed by signer set and broadcasted"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "stacks_block_hash" => %new_block.header.block_hash(), - "stacks_block_id" => %new_block.header.block_id(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); } + Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { + debug!("Miner did not find any transactions to mine"); + break None; + } + Err(e) => { + warn!("Failed to mine block: {e:?}"); - // update mined-block counters and mined-tenure counters - self.globals.counters.bump_naka_mined_blocks(); - if self.last_block_mined.is_some() { - // this is the first block of the tenure, bump tenure counter - self.globals.counters.bump_naka_mined_tenures(); + // try again, in case a new sortition is pending + self.globals + .raise_initiative(format!("MiningFailure: {e:?}")); + return Err(ChainstateError::MinerAborted.into()); } + } + }; - // wake up chains coordinator - Self::fault_injection_block_announce_stall(&new_block); - self.globals.coord().announce_new_stacks_block(); + if let Some(mut new_block) = new_block { + Self::fault_injection_block_broadcast_stall(&new_block); + let signer_signature = match self.propose_block( + coordinator, + &mut new_block, + sortdb, + stackerdbs, + ) { + Ok(x) => x, + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + // Sleep for a bit to allow signers to catch up + let pause_ms = if *last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + thread::sleep(Duration::from_millis(pause_ms)); + *last_block_rejected = true; + return Ok(()); + } + }, + }; + *last_block_rejected = false; - self.last_block_mined = Some(new_block); + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { + warn!("Error accepting own block: {e:?}. Will try mining again."); + return Ok(()); + } else { + info!( + "Miner: Block signed by signer set and broadcasted"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "stacks_block_hash" => %new_block.header.block_hash(), + "stacks_block_id" => %new_block.header.block_id(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); } - let Ok(sort_db) = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) else { - error!("Failed to open sortition DB. Will try mining again."); - continue; - }; + // update mined-block counters and mined-tenure counters + self.globals.counters.bump_naka_mined_blocks(); + if self.last_block_mined.is_some() { + // this is the first block of the tenure, bump tenure counter + self.globals.counters.bump_naka_mined_tenures(); + } - let wait_start = Instant::now(); - while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - if self.check_burn_tip_changed(&sort_db).is_err() { - return Err(NakamotoNodeError::BurnchainTipChanged); - } + // wake up chains coordinator + Self::fault_injection_block_announce_stall(&new_block); + self.globals.coord().announce_new_stacks_block(); + + self.last_block_mined = Some(new_block); + self.mined_blocks += 1; + } + + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + return Ok(()); + }; + + let wait_start = Instant::now(); + while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + if self.check_burn_tip_changed(&sort_db).is_err() { + return Err(NakamotoNodeError::BurnchainTipChanged); } } + + Ok(()) + } + + fn propose_block( + &self, + coordinator: &mut SignerCoordinator, + new_block: &mut NakamotoBlock, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + ) -> Result, NakamotoNodeError> { + if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually propose the block. + return Ok(Vec::new()); + } + + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + coordinator.propose_block( + new_block, + &self.burn_block, + &self.burnchain, + sortdb, + &mut chain_state, + stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + ) } /// Load the signer set active for this miner's blocks. This is the @@ -524,67 +616,6 @@ impl BlockMinerThread { Ok(reward_set) } - /// Gather a list of signatures from the signers for the block - fn gather_signatures( - &mut self, - new_block: &mut NakamotoBlock, - stackerdbs: &mut StackerDBs, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open sortition DB. Cannot mine! {e:?}" - )) - })?; - - let reward_set = self.load_signer_set()?; - - if self.config.get_node_config(false).mock_mining { - return Ok((reward_set, Vec::new())); - } - - let mut coordinator = SignCoordinator::new( - &reward_set, - miner_privkey, - &self.config, - self.globals.should_keep_running.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; - - let mut chain_state = - neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - - let signature = coordinator.run_sign_v0( - new_block, - &self.burn_block, - &self.burnchain, - &sort_db, - &mut chain_state, - stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - )?; - - Ok((reward_set, signature)) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { @@ -609,7 +640,7 @@ impl BlockMinerThread { sort_db: &SortitionDB, chain_state: &mut StacksChainState, block: &NakamotoBlock, - reward_set: RewardSet, + reward_set: &RewardSet, ) -> Result<(), ChainstateError> { if Self::fault_injection_skip_block_broadcast() { warn!( @@ -666,9 +697,20 @@ impl BlockMinerThread { fn broadcast( &mut self, block: NakamotoBlock, - reward_set: RewardSet, + reward_set: &RewardSet, stackerdbs: &StackerDBs, ) -> Result<(), NakamotoNodeError> { + if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually broadcast the block. + return Ok(()); + } + + if self.config.miner.mining_key.is_none() { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let sort_db = SortitionDB::open( @@ -678,12 +720,6 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - if self.config.miner.mining_key.is_none() { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - // push block via p2p block push self.broadcast_p2p(&sort_db, &mut chain_state, &block, reward_set) .map_err(NakamotoNodeError::AcceptFailure)?; @@ -703,7 +739,7 @@ impl BlockMinerThread { let miners_contract_id = boot_code_id(MINERS_NAME, chain_state.mainnet); let mut miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( miner_privkey, &sort_db, &self.burn_block, @@ -995,8 +1031,12 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. - fn mine_block(&mut self) -> Result { + fn mine_block( + &mut self, + coordinator: &mut SignerCoordinator, + ) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); + info!("Miner: Mining block"); let burn_db_path = self.config.get_burn_db_file_path(); let reward_set = self.load_signer_set()?; @@ -1039,6 +1079,7 @@ impl BlockMinerThread { &parent_block_info, vrf_proof, target_epoch_id, + coordinator, )?; parent_block_info.stacks_parent_header.microblock_tail = None; @@ -1051,9 +1092,7 @@ impl BlockMinerThread { ) { // treat a too-soon-to-mine block as an interrupt: this will let the caller sleep and then re-evaluate // all the pre-mining checks (burnchain tip changes, signal interrupts, etc.) - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::MinerAborted, - )); + return Err(ChainstateError::MinerAborted.into()); } // build the block itself @@ -1081,13 +1120,11 @@ impl BlockMinerThread { ) { error!("Relayer: Failure mining anchored block: {e}"); } - NakamotoNodeError::MiningFailure(e) + e })?; if block.txs.is_empty() { - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::NoTransactionsToMine, - )); + return Err(ChainstateError::NoTransactionsToMine.into()); } let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key @@ -1124,24 +1161,49 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Create the tenure start info for the block we're going to build fn make_tenure_start_info( - &self, + &mut self, chainstate: &StacksChainState, parent_block_info: &ParentStacksBlockInfo, vrf_proof: VRFProof, target_epoch_id: StacksEpochId, + coordinator: &mut SignerCoordinator, ) -> Result { let current_miner_nonce = parent_block_info.coinbase_nonce; - let Some(parent_tenure_info) = &parent_block_info.parent_tenure else { - return Ok(NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - }); + let parent_tenure_info = match &parent_block_info.parent_tenure { + Some(info) => info.clone(), + None => { + // We may be able to extend the current tenure + if self.last_block_mined.is_none() { + debug!("Miner: No parent tenure and no last block mined"); + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + ParentTenureInfo { + parent_tenure_blocks: self.mined_blocks, + parent_tenure_consensus_hash: self.burn_election_block.consensus_hash, + } + } }; if self.last_block_mined.is_some() { - return Ok(NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - }); + // Check if we can extend the current tenure + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); + if get_epoch_time_secs() <= tenure_extend_timestamp + && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout + { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + info!("Miner: Time-based tenure extend"; + "current_timestamp" => get_epoch_time_secs(), + "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_change_time_elapsed" => self.tenure_change_time.elapsed().as_secs(), + "tenure_timeout_secs" => self.config.miner.tenure_timeout.as_secs(), + ); + self.tenure_extend_reset(); } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); @@ -1208,6 +1270,14 @@ impl BlockMinerThread { Ok(()) } } + + fn tenure_extend_reset(&mut self) { + self.tenure_change_time = Instant::now(); + self.reason = MinerReason::Extended { + burn_view_consensus_hash: self.burn_block.consensus_hash, + }; + self.mined_blocks = 0; + } } impl ParentStacksBlockInfo { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 7c8dc6f2c5..8cc1293acd 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -31,14 +31,14 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; -use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::core::STACKS_EPOCH_3_1_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; use stacks::net::p2p::NetworkHandle; @@ -382,20 +382,50 @@ impl RelayerThread { /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for /// the next block-commit. pub(crate) fn choose_miner_directive( - config: &Config, - sortdb: &SortitionDB, + &self, sn: BlockSnapshot, won_sortition: bool, committed_index_hash: StacksBlockId, ) -> Option { + let (cur_stacks_tip_ch, cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + + let stacks_tip = StacksBlockId::new(&cur_stacks_tip_ch, &cur_stacks_tip_bh); + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &stacks_tip, + &cur_stacks_tip_ch, + ) + .expect( + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}", + ) + .expect("Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}"); + let directive = if sn.sortition { Some( - if won_sortition || config.get_node_config(false).mock_mining { + if won_sortition || self.config.get_node_config(false).mock_mining { + info!("Relayer: Won sortition; begin tenure."); MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, } + } else if committed_index_hash + != highest_tenure_start_block_header.index_block_hash() + { + info!( + "Relayer: Winner of sortition {} did not commit to the correct parent tenure. Attempt to continue tenure.", + &sn.consensus_hash + ); + // We didn't win the sortition, but the miner that did win + // did not commit to the correct parent tenure. This means + // it will be unable to produce a valid block, so we should + // continue our tenure. + MinerDirective::ContinueTenure { + new_burn_view: sn.consensus_hash, + } } else { + info!("Relayer: Stop tenure"); MinerDirective::StopTenure }, ) @@ -404,16 +434,16 @@ impl RelayerThread { // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so // right now since this sortition has no winner. let (cur_stacks_tip_ch, _cur_stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cur_stacks_tip_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &cur_stacks_tip_ch) .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); let cur_epoch = - SortitionDB::get_stacks_epoch(sortdb.conn(), stacks_tip_sn.block_height) + SortitionDB::get_stacks_epoch(self.sortdb.conn(), stacks_tip_sn.block_height) .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); @@ -424,6 +454,7 @@ impl RelayerThread { ); None } else { + info!("Relayer: No sortition; continue tenure."); Some(MinerDirective::ContinueTenure { new_burn_view: sn.consensus_hash, }) @@ -480,13 +511,7 @@ impl RelayerThread { return Ok(None); } - let directive_opt = Self::choose_miner_directive( - &self.config, - &self.sortdb, - sn, - won_sortition, - committed_index_hash, - ); + let directive_opt = self.choose_miner_directive(sn, won_sortition, committed_index_hash); Ok(directive_opt) } @@ -557,6 +582,7 @@ impl RelayerThread { tip_block_ch: &ConsensusHash, tip_block_bh: &BlockHeaderHash, ) -> Result { + let tip_block_id = StacksBlockId::new(&tip_block_ch, &tip_block_bh); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; @@ -630,18 +656,41 @@ impl RelayerThread { return Err(NakamotoNodeError::ParentNotFound); }; - // find the parent block-commit of this commit + // find the parent block-commit of this commit, so we can find the parent vtxindex + // if the parent is a shadow block, then the vtxindex would be 0. let commit_parent_block_burn_height = tip_tenure_sortition.block_height; - let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( - self.sortdb.conn(), - &tip_tenure_sortition.winning_block_txid, - &tip_tenure_sortition.sortition_id, - ) else { - error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %tip_block_ch); - return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); - }; + let commit_parent_winning_vtxindex = if let Ok(Some(parent_winning_tx)) = + SortitionDB::get_block_commit( + self.sortdb.conn(), + &tip_tenure_sortition.winning_block_txid, + &tip_tenure_sortition.sortition_id, + ) { + parent_winning_tx.vtxindex + } else { + debug!( + "{}/{} ({}) must be a shadow block, since it has no block-commit", + &tip_block_bh, &tip_block_ch, &tip_block_id + ); + let Ok(Some(parent_version)) = + NakamotoChainState::get_nakamoto_block_version(self.chainstate.db(), &tip_block_id) + else { + error!( + "Relayer: Failed to lookup block version of {}", + &tip_block_id + ); + return Err(NakamotoNodeError::ParentNotFound); + }; - let commit_parent_winning_vtxindex = parent_winning_tx.vtxindex; + if !NakamotoBlockHeader::is_shadow_block_version(parent_version) { + error!( + "Relayer: parent block-commit of {} not found, and it is not a shadow block", + &tip_block_id + ); + return Err(NakamotoNodeError::ParentNotFound); + } + + 0 + }; // epoch in which this commit will be sent (affects how the burnchain client processes it) let Ok(Some(target_epoch)) = @@ -689,7 +738,7 @@ impl RelayerThread { key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), - memo: vec![STACKS_EPOCH_3_0_MARKER], + memo: vec![STACKS_EPOCH_3_1_MARKER], new_seed: VRFSeed::from_proof(&tip_vrf_proof), parent_block_ptr: u32::try_from(commit_parent_block_burn_height) .expect("FATAL: burn block height exceeded u32"), @@ -852,104 +901,187 @@ impl RelayerThread { Ok(()) } + /// Get the public key hash for the mining key. + fn get_mining_key_pkh(&self) -> Option { + let Some(ref mining_key) = self.config.miner.mining_key else { + return None; + }; + Some(Hash160::from_node_public_key( + &StacksPublicKey::from_private(mining_key), + )) + } + + /// Get the tenure-start block header hash of a given consensus hash. + /// For Nakamoto blocks, this is the first block in the tenure identified by the consensus + /// hash. + /// For epoch2 blocks, this is simply the block whose winning sortition happened in the + /// sortition identified by the consensus hash. + /// + /// `tip_block_id` is the chain tip from which to perform the query. + fn get_tenure_bhh( + &self, + tip_block_id: &StacksBlockId, + ch: &ConsensusHash, + ) -> Result { + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + tip_block_id, + &ch, + )? + .ok_or_else(|| { + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {tip_block_id}" + ); + NakamotoNodeError::ParentNotFound + })?; + Ok(BlockHeaderHash( + highest_tenure_start_block_header.index_block_hash().0, + )) + } + + /// Determine the type of tenure change to issue based on whether this + /// miner was the last successful miner (miner of the canonical tip). + fn determine_tenure_type( + &self, + canonical_snapshot: BlockSnapshot, + last_snapshot: BlockSnapshot, + new_burn_view: ConsensusHash, + mining_pkh: Hash160, + ) -> (StacksBlockId, BlockSnapshot, MinerReason) { + if canonical_snapshot.miner_pk_hash != Some(mining_pkh) { + debug!("Relayer: Miner was not the last successful miner. Issue a new tenure change payload."); + ( + StacksBlockId(last_snapshot.winning_stacks_block_hash.0), + last_snapshot, + MinerReason::EmptyTenure, + ) + } else { + debug!("Relayer: Miner was the last successful miner. Issue a tenure extend from the chain tip."); + ( + self.sortdb.get_canonical_stacks_tip_block_id(), + canonical_snapshot, + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + }, + ) + } + } + + /// Get the block snapshot of the most recent sortition that committed to + /// the canonical tip. If the latest sortition did not commit to the + /// canonical tip, then the tip's tenure is the last good sortition. + fn get_last_good_block_snapshot( + &self, + burn_tip: &BlockSnapshot, + highest_tenure_bhh: &BlockHeaderHash, + canonical_stacks_tip_ch: &ConsensusHash, + ) -> Result { + let ih = self.sortdb.index_handle(&burn_tip.sortition_id); + let sn = ih + .get_last_snapshot_with_sortition(burn_tip.block_height) + .map_err(|e| { + error!("Relayer: failed to get last snapshot with sortition: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + if &sn.winning_stacks_block_hash != highest_tenure_bhh { + info!( + "Relayer: Sortition winner is not committed to the canonical tip; allowing last miner to extend"; + "burn_block_height" => burn_tip.block_height, + "consensus_hash" => %burn_tip.consensus_hash, + ); + + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), canonical_stacks_tip_ch) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + }) + } else { + Ok(sn) + } + } + + /// Attempt to continue a miner's tenure into the next burn block. + /// This is allowed if the miner won the last good sortition and one of the + /// following conditions is met: + /// - There was no sortition in the latest burn block + /// - The winner of the latest sortition did not commit to the canonical tip + /// - The winner of the latest sortition did not mine any blocks within the + /// timeout period (not yet implemented) fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {e:?}"); return Ok(()); } debug!("Relayer: successfully stopped tenure."); - // Check if we should undergo a tenure change to switch to the new burn view + + // Get the necessary snapshots and state let burn_tip = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for new burn view: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view)? .ok_or_else(|| { error!("Relayer: failed to get block snapshot for new burn view"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; - let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); let canonical_stacks_tip = StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - - let Some(ref mining_key) = self.config.miner.mining_key else { + let Some(mining_pkh) = self.get_mining_key_pkh() else { return Ok(()); }; - let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); - - // If we won the last sortition, then we should start a new tenure off of it. - let last_block_election_snapshot = { - let ih = self.sortdb.index_handle(&burn_tip.sortition_id); - ih.get_last_snapshot_with_sortition(burn_tip.block_height) - .map_err(|e| { - error!("Relayer: failed to get last snapshot with sortition: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - }; + let highest_tenure_bhh = + self.get_tenure_bhh(&canonical_stacks_tip, &canonical_stacks_tip_ch)?; + let last_good_block_election_snapshot = self.get_last_good_block_snapshot( + &burn_tip, + &highest_tenure_bhh, + &canonical_stacks_tip_ch, + )?; - let won_last_sortition = last_block_election_snapshot.miner_pk_hash == Some(mining_pkh); - debug!( - "Relayer: Current burn block had no sortition. Checking for tenure continuation."; + let won_last_sortition = + last_good_block_election_snapshot.miner_pk_hash == Some(mining_pkh); + info!( + "Relayer: Current burn block had no sortition or a bad sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, - "last_block_election_snapshot.consensus_hash" => %last_block_election_snapshot.consensus_hash, - "last_block_election_snapshot.miner_pk_hash" => ?last_block_election_snapshot.miner_pk_hash, + "last_good_block_election_snapshot.consensus_hash" => %last_good_block_election_snapshot.consensus_hash, + "last_good_block_election_snapshot.miner_pk_hash" => ?last_good_block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, "burn_view_ch" => %new_burn_view, ); if !won_last_sortition { + info!("Relayer: Did not win the last sortition. Cannot continue tenure."); return Ok(()); } - let canonical_block_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for canonical tip"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - - let won_canonical_block_snapshot = - canonical_block_snapshot.miner_pk_hash == Some(mining_pkh); + let canonical_snapshot = SortitionDB::get_block_snapshot_consensus( + self.sortdb.conn(), + &canonical_stacks_tip_ch, + )? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + let (parent_tenure_start, block_election_snapshot, reason) = self.determine_tenure_type( + canonical_snapshot, + last_good_block_election_snapshot, + new_burn_view, + mining_pkh, + ); - let (parent_tenure_start, block_election_snapshot, reason) = - if !won_canonical_block_snapshot { - debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); - ( - StacksBlockId(last_block_election_snapshot.winning_stacks_block_hash.0), - last_block_election_snapshot, - MinerReason::EmptyTenure, - ) - } else { - debug!("Relayer: Successfully issued a tenure change payload in its tenure. Issue a continue extend from the chain tip."); - ( - canonical_stacks_tip, //For tenure extend, we should be extending off the canonical tip - canonical_block_snapshot, - MinerReason::Extended { - burn_view_consensus_hash: new_burn_view, - }, - ) - }; - match self.start_new_tenure( + if let Err(e) = self.start_new_tenure( parent_tenure_start, block_election_snapshot, burn_tip, reason, ) { - Ok(()) => { - debug!("Relayer: successfully started new tenure."); - } - Err(e) => { - error!("Relayer: Failed to start new tenure: {e:?}"); - } + error!("Relayer: Failed to start new tenure: {e:?}"); + } else { + debug!("Relayer: successfully started new tenure."); } Ok(()) } @@ -1029,11 +1161,11 @@ impl RelayerThread { tip_block_ch: ConsensusHash, tip_block_bh: BlockHeaderHash, ) -> Result<(), NakamotoNodeError> { - let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; if self.fault_injection_skip_block_commit() { warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); return Ok(()); } + let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; // last chance -- is this still the stacks tip? let (cur_stacks_tip_ch, cur_stacks_tip_bh) = diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs deleted file mode 100644 index 14eeef20b9..0000000000 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ /dev/null @@ -1,601 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::BTreeMap; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::Receiver; -use std::sync::Arc; -use std::time::Duration; - -use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::{ - BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, -}; -use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; -use stacks::burnchains::Burnchain; -use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; -use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::libstackerdb::StackerDBChunkData; -use stacks::net::stackerdb::StackerDBs; -use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; -use stacks::util::secp256k1::MessageSignature; -use stacks::util_lib::boot::boot_code_id; -use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; - -use super::Error as NakamotoNodeError; -use crate::event_dispatcher::STACKER_DB_CHANNEL; -use crate::neon::Counters; -use crate::Config; - -/// Fault injection flag to prevent the miner from seeing enough signer signatures. -/// Used to test that the signers will broadcast a block if it gets enough signatures -#[cfg(test)] -pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); - -/// How long should the coordinator poll on the event receiver before -/// waking up to check timeouts? -static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); - -/// The `SignCoordinator` struct sole function is to serve as the coordinator for Nakamoto block signing. -/// This struct is used by Nakamoto miners to act as the coordinator for the blocks they produce. -pub struct SignCoordinator { - receiver: Option>, - message_key: StacksPrivateKey, - is_mainnet: bool, - miners_session: StackerDBSession, - signer_entries: HashMap, - weight_threshold: u32, - total_weight: u32, - keep_running: Arc, - pub next_signer_bitvec: BitVec<4000>, -} - -impl Drop for SignCoordinator { - fn drop(&mut self) { - STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( - "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", - )); - } -} - -impl SignCoordinator { - /// * `reward_set` - the active reward set data, used to construct the signer - /// set parameters. - /// * `aggregate_public_key` - the active aggregate key for this cycle - pub fn new( - reward_set: &RewardSet, - message_key: StacksPrivateKey, - config: &Config, - keep_running: Arc, - ) -> Result { - let is_mainnet = config.is_mainnet(); - let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {reward_set:?}"); - return Err(ChainstateError::NoRegisteredSigners(0)); - }; - - let signer_entries = SignerEntries::parse(is_mainnet, reward_set_signers).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Failed to parse NakamotoSignerEntries: {e:?}" - )) - })?; - let rpc_socket = config - .node - .get_rpc_loopback() - .ok_or_else(|| ChainstateError::MinerAborted)?; - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - - let next_signer_bitvec: BitVec<4000> = BitVec::zeros( - reward_set_signers - .clone() - .len() - .try_into() - .expect("FATAL: signer set length greater than u16"), - ) - .expect("FATAL: unable to construct initial bitvec for signer set"); - - debug!( - "Initializing miner/coordinator"; - "num_signers" => signer_entries.signer_pks.len(), - "signer_public_keys" => ?signer_entries.signer_pks, - ); - - let total_weight = reward_set.total_signing_weight().map_err(|e| { - warn!("Failed to calculate total weight for the reward set: {e:?}"); - ChainstateError::NoRegisteredSigners(0) - })?; - - let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; - - let signer_public_keys = reward_set_signers - .iter() - .cloned() - .enumerate() - .map(|(idx, signer)| { - let Ok(slot_id) = u32::try_from(idx) else { - return Err(ChainstateError::InvalidStacksBlock( - "Signer index exceeds u32".into(), - )); - }; - Ok((slot_id, signer)) - }) - .collect::, ChainstateError>>()?; - #[cfg(test)] - { - // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - use crate::tests::nakamoto_integrations::TEST_SIGNING; - if TEST_SIGNING.lock().unwrap().is_some() { - debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - let sign_coordinator = Self { - message_key, - receiver: Some(receiver), - is_mainnet, - miners_session, - next_signer_bitvec, - signer_entries: signer_public_keys, - weight_threshold: threshold, - total_weight, - keep_running, - }; - return Ok(sign_coordinator); - } - } - - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - - Ok(Self { - receiver: Some(receiver), - message_key, - is_mainnet, - miners_session, - next_signer_bitvec, - signer_entries: signer_public_keys, - weight_threshold: threshold, - total_weight, - keep_running, - }) - } - - /// Send a message over the miners contract using a `StacksPrivateKey` - #[allow(clippy::too_many_arguments)] - pub fn send_miners_message( - miner_sk: &StacksPrivateKey, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: M, - miner_slot_id: MinerSlotID, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - election_sortition: &ConsensusHash, - ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) - .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? - else { - return Err("No slot for miner".into()); - }; - - let slot_id = slot_range - .start - .saturating_add(miner_slot_id.to_u8().into()); - if !slot_range.contains(&slot_id) { - return Err("Not enough slots for miner messages".into()); - } - // Get the LAST slot version number written to the DB. If not found, use 0. - // Add 1 to get the NEXT version number - // Note: we already check above for the slot's existence - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let slot_version = stackerdbs - .get_slot_version(&miners_contract_id, slot_id) - .map_err(|e| format!("Failed to read slot version: {e:?}"))? - .unwrap_or(0) - .saturating_add(1); - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); - chunk - .sign(miner_sk) - .map_err(|_| "Failed to sign StackerDB chunk")?; - - match miners_session.put_chunk(&chunk) { - Ok(ack) => { - if ack.accepted { - debug!("Wrote message to stackerdb: {ack:?}"); - Ok(()) - } else { - Err(format!("{ack:?}")) - } - } - Err(e) => Err(format!("{e:?}")), - } - } - - /// Do we ignore signer signatures? - #[cfg(test)] - fn fault_injection_ignore_signatures() -> bool { - if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { - return true; - } - false - } - - #[cfg(not(test))] - fn fault_injection_ignore_signatures() -> bool { - false - } - - /// Check if the tenure needs to change - fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { - let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - - if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { - info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); - true - } else { - false - } - } - - /// Start gathering signatures for a Nakamoto block. - /// This function begins by sending a `BlockProposal` message - /// to the signers, and then waits for the signers to respond - /// with their signatures. It does so in two ways, concurrently: - /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. - /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. - // Mutants skip here: this function is covered via integration tests, - // which the mutation testing does not see. - #[cfg_attr(test, mutants::skip)] - #[allow(clippy::too_many_arguments)] - pub fn run_sign_v0( - &mut self, - block: &NakamotoBlock, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - chain_state: &mut StacksChainState, - stackerdbs: &StackerDBs, - counters: &Counters, - election_sortition: &ConsensusHash, - ) -> Result, NakamotoNodeError> { - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - - let block_proposal = BlockProposal { - block: block.clone(), - burn_height: burn_tip.block_height, - reward_cycle: reward_cycle_id, - }; - - let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); - debug!("Sending block proposal message to signers"; - "signer_signature_hash" => %block.header.signer_signature_hash(), - ); - Self::send_miners_message::( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - block_proposal_message, - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortition, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - counters.bump_naka_proposed_blocks(); - - #[cfg(test)] - { - info!( - "SignCoordinator: sent block proposal to .miners, waiting for test signing channel" - ); - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signatures); - } - } - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - let mut total_weight_signed: u32 = 0; - let mut total_reject_weight: u32 = 0; - let mut responded_signers = HashSet::new(); - let mut gathered_signatures = BTreeMap::new(); - - info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; - "threshold" => self.weight_threshold, - ); - - loop { - // look in the nakamoto staging db -- a block can only get stored there if it has - // enough signing weight to clear the threshold - if let Ok(Some((stored_block, _sz))) = chain_state - .nakamoto_blocks_db() - .get_nakamoto_block(&block.block_id()) - .map_err(|e| { - warn!( - "Failed to query chainstate for block {}: {e:?}", - &block.block_id() - ); - e - }) - { - debug!("SignCoordinator: Found signatures in relayed block"); - counters.bump_naka_signer_pushed_blocks(); - return Ok(stored_block.header.signer_signature); - } - - if Self::check_burn_tip_changed(sortdb, burn_tip) { - debug!("SignCoordinator: Exiting due to new burnchain tip"); - return Err(NakamotoNodeError::BurnchainTipChanged); - } - - // one of two things can happen: - // * we get enough signatures from stackerdb from the signers, OR - // * we see our block get processed in our chainstate (meaning, the signers broadcasted - // the block and our node got it and processed it) - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - // was the node asked to stop? - if !self.keep_running.load(Ordering::SeqCst) { - info!("SignerCoordinator: received node exit request. Aborting"); - return Err(NakamotoNodeError::ChannelClosed); - } - - // check to see if this event we got is a signer event - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - - let modified_slots = &event.modified_slots.clone(); - - let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - let slot_ids = modified_slots - .iter() - .map(|chunk| chunk.slot_id) - .collect::>(); - - debug!("SignCoordinator: Received messages from signers"; - "count" => messages.len(), - "slot_ids" => ?slot_ids, - "threshold" => self.weight_threshold - ); - - for (message, slot_id) in messages.into_iter().zip(slot_ids) { - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); - }; - let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) - else { - return Err(NakamotoNodeError::SignerSignatureError( - "Failed to parse signer public key".into(), - )); - }; - - if responded_signers.contains(&signer_pubkey) { - debug!( - "Signer {slot_id} already responded for block {}. Ignoring {message:?}.", block.header.signer_signature_hash(); - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - match message { - SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { - let BlockAccepted { - signer_signature_hash: response_hash, - signature, - metadata, - } = accepted; - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != response_hash { - warn!( - "Processed signature for a different block. Will try to continue."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "response_hash" => %response_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash, - "server_version" => %metadata.server_version - ); - continue; - } - debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); - let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) - else { - warn!("Got invalid signature from a signer. Ignoring."); - continue; - }; - if !valid_sig { - warn!( - "Processed signature but didn't validate over the expected block. Ignoring"; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - - if Self::fault_injection_ignore_signatures() { - warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } - - info!("SignCoordinator: Signature Added to block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id(), - "server_version" => metadata.server_version, - ); - gathered_signatures.insert(slot_id, signature); - responded_signers.insert(signer_pubkey); - } - SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != rejected_data.signer_signature_hash { - warn!( - "Processed rejection for a different block. Will try to continue."; - "block_signer_signature_hash" => %block_sighash, - "rejected_data.signer_signature_hash" => %rejected_data.signer_signature_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - ); - continue; - } - let rejected_pubkey = match rejected_data.recover_public_key() { - Ok(rejected_pubkey) => { - if rejected_pubkey != signer_pubkey { - warn!("Recovered public key from rejected data does not match signer's public key. Ignoring."); - continue; - } - rejected_pubkey - } - Err(e) => { - warn!("Failed to recover public key from rejected data: {e:?}. Ignoring."); - continue; - } - }; - responded_signers.insert(rejected_pubkey); - debug!( - "Signer {slot_id} rejected our block {}/{}", - &block.header.consensus_hash, - &block.header.block_hash() - ); - total_reject_weight = total_reject_weight - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); - - if total_reject_weight.saturating_add(self.weight_threshold) - > self.total_weight - { - debug!( - "{total_reject_weight}/{} signers vote to reject our block {}/{}", - self.total_weight, - &block.header.consensus_hash, - &block.header.block_hash() - ); - counters.bump_naka_rejected_blocks(); - return Err(NakamotoNodeError::SignersRejected); - } - continue; - } - SignerMessageV0::BlockProposal(_) => { - debug!("Received block proposal message. Ignoring."); - continue; - } - SignerMessageV0::BlockPushed(_) => { - debug!("Received block pushed message. Ignoring."); - continue; - } - SignerMessageV0::MockSignature(_) - | SignerMessageV0::MockProposal(_) - | SignerMessageV0::MockBlock(_) => { - debug!("Received mock message. Ignoring."); - continue; - } - }; - } - // After gathering all signatures, return them if we've hit the threshold - if total_weight_signed >= self.weight_threshold { - info!("SignCoordinator: Received enough signatures. Continuing."; - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - return Ok(gathered_signatures.values().cloned().collect()); - } - } - } -} diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs new file mode 100644 index 0000000000..70c9aab190 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -0,0 +1,376 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::atomic::AtomicBool; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; + +use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::codec::StacksMessageCodec; +use stacks::libstackerdb::StackerDBChunkData; +use stacks::net::stackerdb::StackerDBs; +use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; +use stacks::util::hash::Sha512Trunc256Sum; +use stacks::util::secp256k1::MessageSignature; +use stacks::util_lib::boot::boot_code_id; + +use super::stackerdb_listener::StackerDBListenerComms; +use super::Error as NakamotoNodeError; +use crate::event_dispatcher::StackerDBChannel; +use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; +use crate::neon::Counters; +use crate::Config; + +/// The state of the signer database listener, used by the miner thread to +/// interact with the signer listener. +pub struct SignerCoordinator { + /// The private key used to sign messages from the miner + message_key: StacksPrivateKey, + /// Is this mainnet? + is_mainnet: bool, + /// The session for writing to the miners contract in the stackerdb + miners_session: StackerDBSession, + /// The total weight of all signers + total_weight: u32, + /// The weight threshold for block approval + weight_threshold: u32, + /// Interface to the StackerDB listener thread's data + stackerdb_comms: StackerDBListenerComms, + /// Keep running flag for the signer DB listener thread + keep_running: Arc, + /// Handle for the signer DB listener thread + listener_thread: Option>, +} + +impl SignerCoordinator { + /// Create a new `SignerCoordinator` instance. + /// This will spawn a new thread to listen for messages from the signer DB. + pub fn new( + stackerdb_channel: Arc>, + node_keep_running: Arc, + reward_set: &RewardSet, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + message_key: StacksPrivateKey, + config: &Config, + ) -> Result { + info!("SignerCoordinator: starting up"); + let keep_running = Arc::new(AtomicBool::new(true)); + + // Create the stacker DB listener + let mut listener = StackerDBListener::new( + stackerdb_channel, + node_keep_running.clone(), + keep_running.clone(), + reward_set, + burn_tip, + burnchain, + )?; + let is_mainnet = config.is_mainnet(); + let rpc_socket = config + .node + .get_rpc_loopback() + .ok_or_else(|| ChainstateError::MinerAborted)?; + let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); + let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + + let mut sc = Self { + message_key, + is_mainnet, + miners_session, + total_weight: listener.total_weight, + weight_threshold: listener.weight_threshold, + stackerdb_comms: listener.get_comms(), + keep_running, + listener_thread: None, + }; + + // Spawn the signer DB listener thread + let listener_thread = std::thread::Builder::new() + .name("stackerdb_listener".to_string()) + .spawn(move || { + if let Err(e) = listener.run() { + error!("StackerDBListener: exited with error: {e:?}"); + } + }) + .map_err(|e| { + error!("Failed to spawn stackerdb_listener thread: {e:?}"); + ChainstateError::MinerAborted + })?; + + sc.listener_thread = Some(listener_thread); + + Ok(sc) + } + + /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] + pub fn send_miners_message( + miner_sk: &StacksPrivateKey, + sortdb: &SortitionDB, + tip: &BlockSnapshot, + stackerdbs: &StackerDBs, + message: M, + miner_slot_id: MinerSlotID, + is_mainnet: bool, + miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, + ) -> Result<(), String> { + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) + .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? + else { + return Err("No slot for miner".into()); + }; + + let slot_id = slot_range + .start + .saturating_add(miner_slot_id.to_u8().into()); + if !slot_range.contains(&slot_id) { + return Err("Not enough slots for miner messages".into()); + } + // Get the LAST slot version number written to the DB. If not found, use 0. + // Add 1 to get the NEXT version number + // Note: we already check above for the slot's existence + let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); + let slot_version = stackerdbs + .get_slot_version(&miners_contract_id, slot_id) + .map_err(|e| format!("Failed to read slot version: {e:?}"))? + .unwrap_or(0) + .saturating_add(1); + let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); + chunk + .sign(miner_sk) + .map_err(|_| "Failed to sign StackerDB chunk")?; + + match miners_session.put_chunk(&chunk) { + Ok(ack) => { + if ack.accepted { + debug!("Wrote message to stackerdb: {ack:?}"); + Ok(()) + } else { + Err(format!("{ack:?}")) + } + } + Err(e) => Err(format!("{e:?}")), + } + } + + /// Propose a Nakamoto block and gather signatures for it. + /// This function begins by sending a `BlockProposal` message to the + /// signers, and then it waits for the signers to respond with their + /// signatures. It does so in two ways, concurrently: + /// * It waits for the signer DB listener to collect enough signatures to + /// accept or reject the block + /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. + // Mutants skip here: this function is covered via integration tests, + // which the mutation testing does not see. + #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] + pub fn propose_block( + &mut self, + block: &NakamotoBlock, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + sortdb: &SortitionDB, + chain_state: &mut StacksChainState, + stackerdbs: &StackerDBs, + counters: &Counters, + election_sortition: &ConsensusHash, + ) -> Result, NakamotoNodeError> { + // Add this block to the block status map. + self.stackerdb_comms.insert_block(&block.header); + + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + + let block_proposal = BlockProposal { + block: block.clone(), + burn_height: burn_tip.block_height, + reward_cycle: reward_cycle_id, + }; + + let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => %block.header.signer_signature_hash(), + ); + Self::send_miners_message::( + &self.message_key, + sortdb, + burn_tip, + stackerdbs, + block_proposal_message, + MinerSlotID::BlockProposal, + self.is_mainnet, + &mut self.miners_session, + election_sortition, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + + #[cfg(test)] + { + info!( + "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } + } + + self.get_block_status( + &block.header.signer_signature_hash(), + &block.block_id(), + chain_state, + sortdb, + burn_tip, + counters, + ) + } + + /// Get the block status for a given block hash. + /// If we have not yet received enough signatures for this block, this + /// method will block until we do. If this block shows up in the staging DB + /// before we have enough signatures, we will return the signatures from + /// there. If a new burnchain tip is detected, we will return an error. + fn get_block_status( + &self, + block_signer_sighash: &Sha512Trunc256Sum, + block_id: &StacksBlockId, + chain_state: &mut StacksChainState, + sortdb: &SortitionDB, + burn_tip: &BlockSnapshot, + counters: &Counters, + ) -> Result, NakamotoNodeError> { + loop { + let block_status = match self.stackerdb_comms.wait_for_block_status( + block_signer_sighash, + EVENT_RECEIVER_POLL, + |status| { + status.total_weight_signed < self.weight_threshold + && status + .total_reject_weight + .saturating_add(self.weight_threshold) + <= self.total_weight + }, + )? { + Some(status) => status, + None => { + // If we just received a timeout, we should check if the burnchain + // tip has changed or if we received this signed block already in + // the staging db. + debug!("SignerCoordinator: Timeout waiting for block signatures"); + + // Look in the nakamoto staging db -- a block can only get stored there + // if it has enough signing weight to clear the threshold. + if let Ok(Some((stored_block, _sz))) = chain_state + .nakamoto_blocks_db() + .get_nakamoto_block(block_id) + .map_err(|e| { + warn!( + "Failed to query chainstate for block: {e:?}"; + "block_id" => %block_id, + "block_signer_sighash" => %block_signer_sighash, + ); + e + }) + { + debug!("SignCoordinator: Found signatures in relayed block"); + counters.bump_naka_signer_pushed_blocks(); + return Ok(stored_block.header.signer_signature); + } + + if Self::check_burn_tip_changed(sortdb, burn_tip) { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + + continue; + } + }; + + if block_status + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + info!( + "{}/{} signers vote to reject block", + block_status.total_reject_weight, self.total_weight; + "block_signer_sighash" => %block_signer_sighash, + ); + counters.bump_naka_rejected_blocks(); + return Err(NakamotoNodeError::SignersRejected); + } else if block_status.total_weight_signed >= self.weight_threshold { + info!("Received enough signatures, block accepted"; + "block_signer_sighash" => %block_signer_sighash, + ); + return Ok(block_status.gathered_signatures.values().cloned().collect()); + } else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Unblocked without reaching the threshold".into(), + )); + } + } + } + + /// Get the timestamp at which at least 70% of the signing power should be + /// willing to accept a time-based tenure extension. + pub fn get_tenure_extend_timestamp(&self) -> u64 { + self.stackerdb_comms + .get_tenure_extend_timestamp(self.weight_threshold) + } + + /// Check if the tenure needs to change + fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { + info!("SignerCoordinator: Cancel signature aggregation; burnchain tip has changed"); + true + } else { + false + } + } + + pub fn shutdown(&mut self) { + if let Some(listener_thread) = self.listener_thread.take() { + info!("SignerCoordinator: shutting down stacker db listener thread"); + self.keep_running + .store(false, std::sync::atomic::Ordering::Relaxed); + if let Err(e) = listener_thread.join() { + error!("Failed to join signer listener thread: {e:?}"); + } + debug!("SignerCoordinator: stacker db listener thread has shut down"); + } + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs new file mode 100644 index 0000000000..834c59fa95 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -0,0 +1,556 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc::Receiver; +#[cfg(test)] +use std::sync::LazyLock; +use std::sync::{Arc, Condvar, Mutex}; +use std::time::Duration; + +use hashbrown::{HashMap, HashSet}; +use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage as SignerMessageV0}; +use libsigner::SignerEvent; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::nakamoto::NakamotoBlockHeader; +use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, SIGNERS_NAME}; +use stacks::chainstate::stacks::events::StackerDBChunksEvent; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::types::chainstate::StacksPublicKey; +use stacks::types::PublicKey; +use stacks::util::get_epoch_time_secs; +use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; +use stacks::util::secp256k1::MessageSignature; +#[cfg(test)] +use stacks_common::util::tests::TestFlag; + +use super::Error as NakamotoNodeError; +use crate::event_dispatcher::StackerDBChannel; + +#[cfg(test)] +/// Fault injection flag to prevent the miner from seeing enough signer signatures. +/// Used to test that the signers will broadcast a block if it gets enough signatures +pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFlag::default); + +/// How long should the coordinator poll on the event receiver before +/// waking up to check timeouts? +pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); + +#[derive(Debug, Clone)] +pub struct BlockStatus { + pub responded_signers: HashSet, + pub gathered_signatures: BTreeMap, + pub total_weight_signed: u32, + pub total_reject_weight: u32, +} + +#[derive(Debug, Clone)] +pub(crate) struct TimestampInfo { + pub timestamp: u64, + pub weight: u32, +} + +/// The listener for the StackerDB, which listens for messages from the +/// signers and tracks the state of block signatures and idle timestamps. +pub struct StackerDBListener { + /// Channel to communicate with StackerDB + stackerdb_channel: Arc>, + /// Receiver end of the StackerDB events channel + receiver: Option>, + /// Flag to shut the node down + node_keep_running: Arc, + /// Flag to shut the listener down + keep_running: Arc, + /// The signer set for this tenure (0 or 1) + signer_set: u32, + /// The total weight of all signers + pub(crate) total_weight: u32, + /// The weight threshold for block approval + pub(crate) weight_threshold: u32, + /// The signer entries for this tenure (keyed by slot_id) + signer_entries: HashMap, + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + pub(crate) blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + pub(crate) signer_idle_timestamps: Arc>>, +} + +/// Interface for other threads to retrieve info from the StackerDBListener +pub struct StackerDBListenerComms { + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + signer_idle_timestamps: Arc>>, +} + +impl StackerDBListener { + pub fn new( + stackerdb_channel: Arc>, + node_keep_running: Arc, + keep_running: Arc, + reward_set: &RewardSet, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + ) -> Result { + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); + if replaced_other { + warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); + } + + let total_weight = reward_set.total_signing_weight().map_err(|e| { + warn!("Failed to calculate total weight for the reward set: {e:?}"); + ChainstateError::NoRegisteredSigners(0) + })?; + + let weight_threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; + + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + let signer_set = + u32::try_from(reward_cycle_id % 2).expect("FATAL: reward cycle id % 2 exceeds u32"); + + let Some(ref reward_set_signers) = reward_set.signers else { + error!("Could not initialize signing coordinator for reward set without signer"); + debug!("reward set: {reward_set:?}"); + return Err(ChainstateError::NoRegisteredSigners(0)); + }; + + let signer_entries = reward_set_signers + .iter() + .cloned() + .enumerate() + .map(|(idx, signer)| { + let Ok(slot_id) = u32::try_from(idx) else { + return Err(ChainstateError::InvalidStacksBlock( + "Signer index exceeds u32".into(), + )); + }; + Ok((slot_id, signer)) + }) + .collect::, ChainstateError>>()?; + + Ok(Self { + stackerdb_channel, + receiver: Some(receiver), + node_keep_running, + keep_running, + signer_set, + total_weight, + weight_threshold, + signer_entries, + blocks: Arc::new((Mutex::new(HashMap::new()), Condvar::new())), + signer_idle_timestamps: Arc::new(Mutex::new(HashMap::new())), + }) + } + + pub fn get_comms(&self) -> StackerDBListenerComms { + StackerDBListenerComms { + blocks: self.blocks.clone(), + signer_idle_timestamps: self.signer_idle_timestamps.clone(), + } + } + + /// Run the StackerDB listener. + pub fn run(&mut self) -> Result<(), NakamotoNodeError> { + info!("StackerDBListener: Starting up"); + + let Some(receiver) = &self.receiver else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDBListener: Failed to obtain the StackerDB event receiver".into(), + )); + }; + + loop { + // was the node asked to stop? + if !self.node_keep_running.load(Ordering::SeqCst) { + info!("StackerDBListener: received node exit request. Aborting"); + return Ok(()); + } + + // was the listener asked to stop? + if !self.keep_running.load(Ordering::SeqCst) { + info!("StackerDBListener: received listener exit request. Aborting"); + return Ok(()); + } + + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + debug!("StackerDBListener: No StackerDB event received. Checking flags and polling again."); + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + warn!("StackerDBListener: StackerDB event receiver disconnected"); + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )); + } + }; + + // check to see if this event we got is a signer event + let is_signer_event = + event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + + if !is_signer_event { + debug!("StackerDBListener: Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + continue; + } + + let modified_slots = &event.modified_slots.clone(); + + let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { + warn!("StackerDBListener: Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + }) else { + continue; + }; + let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + debug!("StackerDBListener: Received signer event other than a signer message. Ignoring."); + continue; + }; + if signer_set != self.signer_set { + debug!( + "StackerDBListener: Received signer event for other reward cycle. Ignoring." + ); + continue; + }; + let slot_ids = modified_slots + .into_iter() + .map(|chunk| chunk.slot_id) + .collect::>(); + + debug!("StackerDBListener: Received messages from signers"; + "count" => messages.len(), + "slot_ids" => ?slot_ids, + ); + + for (message, slot_id) in messages.into_iter().zip(slot_ids) { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + + match message { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: block_sighash, + signature, + metadata, + response_data, + } = accepted; + let tenure_extend_timestamp = response_data.tenure_extend_timestamp; + + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let Some(block) = blocks.get_mut(&block_sighash) else { + info!( + "StackerDBListener: Received signature for block that we did not request. Ignoring."; + "signature" => %signature, + "block_signer_sighash" => %block_sighash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; + }; + + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!( + "StackerDBListener: Got invalid signature from a signer. Ignoring." + ); + continue; + }; + if !valid_sig { + warn!( + "StackerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + + if Self::fault_injection_ignore_signatures() { + warn!("StackerDBListener: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + ); + continue; + } + + if !block.gathered_signatures.contains_key(&slot_id) { + block.total_weight_signed = block + .total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + + info!("StackerDBListener: Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + "tenure_extend_timestamp" => tenure_extend_timestamp, + "server_version" => metadata.server_version, + ); + block.gathered_signatures.insert(slot_id, signature); + block.responded_signers.insert(signer_pubkey); + + if block.total_weight_signed >= self.weight_threshold { + // Signal to anyone waiting on this block that we have enough signatures + cvar.notify_all(); + } + + // Update the idle timestamp for this signer + self.update_idle_timestamp( + signer_pubkey, + tenure_extend_timestamp, + signer_entry.weight, + ); + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let Some(block) = blocks.get_mut(&rejected_data.signer_signature_hash) + else { + info!( + "StackerDBListener: Received rejection for block that we did not request. Ignoring."; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; + }; + + let rejected_pubkey = match rejected_data.recover_public_key() { + Ok(rejected_pubkey) => { + if rejected_pubkey != signer_pubkey { + warn!("StackerDBListener: Recovered public key from rejected data does not match signer's public key. Ignoring."); + continue; + } + rejected_pubkey + } + Err(e) => { + warn!("StackerDBListener: Failed to recover public key from rejected data: {e:?}. Ignoring."); + continue; + } + }; + block.responded_signers.insert(rejected_pubkey); + block.total_reject_weight = block + .total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + info!("StackerDBListener: Signer rejected block"; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_pubkey" => rejected_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %rejected_data.signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + "reason" => rejected_data.reason, + "reason_code" => %rejected_data.reason_code, + "tenure_extend_timestamp" => rejected_data.response_data.tenure_extend_timestamp, + "server_version" => rejected_data.metadata.server_version, + ); + + if block + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + // Signal to anyone waiting on this block that we have enough rejections + cvar.notify_all(); + } + + // Update the idle timestamp for this signer + self.update_idle_timestamp( + signer_pubkey, + rejected_data.response_data.tenure_extend_timestamp, + signer_entry.weight, + ); + } + SignerMessageV0::BlockProposal(_) => { + debug!("Received block proposal message. Ignoring."); + } + SignerMessageV0::BlockPushed(_) => { + debug!("Received block pushed message. Ignoring."); + } + SignerMessageV0::MockSignature(_) + | SignerMessageV0::MockProposal(_) + | SignerMessageV0::MockBlock(_) => { + debug!("Received mock message. Ignoring."); + } + }; + } + } + } + + fn update_idle_timestamp(&self, signer_pubkey: StacksPublicKey, timestamp: u64, weight: u32) { + let mut idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock idle timestamps"); + + // Check the current timestamp for the given signer_pubkey + if let Some(existing_info) = idle_timestamps.get(&signer_pubkey) { + // Only update if the new timestamp is greater + if timestamp <= existing_info.timestamp { + return; // Exit early if the new timestamp is not greater + } + } + + // Update the map with the new timestamp and weight + let timestamp_info = TimestampInfo { timestamp, weight }; + idle_timestamps.insert(signer_pubkey, timestamp_info); + } + + /// Do we ignore signer signatures? + #[cfg(test)] + fn fault_injection_ignore_signatures() -> bool { + TEST_IGNORE_SIGNERS.get() + } + + #[cfg(not(test))] + fn fault_injection_ignore_signatures() -> bool { + false + } +} + +impl Drop for StackerDBListener { + fn drop(&mut self) { + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock stackerdb channel"); + stackerdb_channel.replace_receiver(self.receiver.take().expect( + "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", + )); + } +} + +impl StackerDBListenerComms { + /// Insert a block into the block status map with initial values. + pub fn insert_block(&self, block: &NakamotoBlockHeader) { + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + let block_status = BlockStatus { + responded_signers: HashSet::new(), + gathered_signatures: BTreeMap::new(), + total_weight_signed: 0, + total_reject_weight: 0, + }; + blocks.insert(block.signer_signature_hash(), block_status); + } + + /// Get the status for `block` from the Stacker DB listener. + /// If the block is not found in the map, return an error. + /// If the block is found, call `condition` to check if the block status + /// satisfies the condition. + /// If the condition is satisfied, return the block status as + /// `Ok(Some(status))`. + /// If the condition is not satisfied, wait for it to be satisfied. + /// If the timeout is reached, return `Ok(None)`. + pub fn wait_for_block_status( + &self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + condition: F, + ) -> Result, NakamotoNodeError> + where + F: Fn(&BlockStatus) -> bool, + { + let (lock, cvar) = &*self.blocks; + let blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let (guard, timeout_result) = cvar + .wait_timeout_while(blocks, timeout, |map| { + let Some(status) = map.get(block_signer_sighash) else { + return true; + }; + condition(status) + }) + .expect("FATAL: failed to wait on block status cond var"); + + // If we timed out, return None + if timeout_result.timed_out() { + return Ok(None); + } + match guard.get(block_signer_sighash) { + Some(status) => Ok(Some(status.clone())), + None => Err(NakamotoNodeError::SigningCoordinatorFailure( + "Block not found in status map".into(), + )), + } + } + + /// Get the timestamp at which at least 70% of the signing power should be + /// willing to accept a time-based tenure extension. + pub fn get_tenure_extend_timestamp(&self, weight_threshold: u32) -> u64 { + let signer_idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock signer idle timestamps"); + debug!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); + let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); + idle_timestamps.sort_by_key(|info| info.timestamp); + let mut weight_sum = 0; + for info in idle_timestamps { + weight_sum += info.weight; + if weight_sum >= weight_threshold { + debug!("SignerCoordinator: 70% threshold reached for tenure extension timestamp"; + "tenure_extend_timestamp" => info.timestamp, + "tenure_extend_in" => (info.timestamp as i64 - get_epoch_time_secs() as i64) + ); + return info.timestamp; + } + } + + // We don't have enough information to reach a 70% threshold at any + // time, so return u64::MAX to indicate that we should not extend the + // tenure. + u64::MAX + } +} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b688db100d..2d4dc7fadd 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -187,6 +187,8 @@ use stacks::chainstate::stacks::{ StacksMicroblock, StacksPublicKey, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; +use stacks::config::chain_data::MinerStats; +use stacks::config::NodeConfig; use stacks::core::mempool::MemPoolDB; use stacks::core::{EpochList, FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; @@ -220,10 +222,8 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError}; -use crate::chain_data::MinerStats; -use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; -use crate::nakamoto_node::sign_coordinator::SignCoordinator; +use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -611,8 +611,7 @@ impl MicroblockMinerThread { match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { Ok(Some(_)) => { let parent_index_hash = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::zero() - { + let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::ZERO { // unknown cost, or this is idempotent. StacksChainState::get_stacks_block_anchored_cost( chainstate.db(), @@ -2364,7 +2363,7 @@ impl BlockMinerThread { let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2392,7 +2391,7 @@ impl BlockMinerThread { }; info!("Sending mock block to stackerdb: {mock_block:?}"); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2845,7 +2844,7 @@ impl RelayerThread { miner_tip: None, last_microblock_tenure_time: 0, microblock_deadline: 0, - microblock_stream_cost: ExecutionCost::zero(), + microblock_stream_cost: ExecutionCost::ZERO, relayer, @@ -3503,7 +3502,7 @@ impl RelayerThread { if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); - self.microblock_stream_cost = ExecutionCost::zero(); + self.microblock_stream_cost = ExecutionCost::ZERO; } self.miner_tip = best_tip; } @@ -4810,7 +4809,7 @@ impl StacksNode { &mut chainstate, &sortdb, stackerdb_configs, - config.connection_options.num_neighbors, + &config.connection_options, ) .unwrap(); diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 648c6d7470..171ebcb2cb 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -22,9 +22,8 @@ use std::{fs, thread}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::core::StacksEpochExtension; use stacks::net::p2p::PeerNetwork; -use stacks_common::types::{StacksEpoch, StacksEpochId}; +use stacks_common::types::StacksEpochId; use crate::event_dispatcher::EventDispatcher; use crate::globals::NeonGlobals; @@ -233,10 +232,7 @@ impl BootRunLoop { fn reached_epoch_30_transition(config: &Config) -> Result { let burn_height = Self::get_burn_height(config)?; - let epochs = StacksEpoch::get_epochs( - config.burnchain.get_bitcoin_network().1, - config.burnchain.epochs.as_ref(), - ); + let epochs = config.burnchain.get_epoch_list(); let epoch_3 = epochs .get(StacksEpochId::Epoch30) .ok_or("No Epoch-3.0 defined")?; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 7990c04332..819ace144c 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -199,8 +199,8 @@ pub fn announce_boot_receipts( block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, - &ExecutionCost::zero(), - &ExecutionCost::zero(), + &ExecutionCost::ZERO, + &ExecutionCost::ZERO, pox_constants, &None, &None, diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 16f5a12b2d..335fb325d8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -319,6 +319,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..4ecc84b73b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,6 +21,8 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +#[cfg(test)] +use stacks::util::tests::TestFlag; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -73,36 +75,24 @@ impl Default for RunLoopCounter { } } -#[cfg(test)] -impl std::ops::Deref for RunLoopCounter { - type Target = Arc; - - fn deref(&self) -> &Self::Target { - &self.0 +impl RunLoopCounter { + #[cfg(test)] + pub fn get(&self) -> u64 { + self.0.load(Ordering::SeqCst) } -} -#[cfg(test)] -#[derive(Clone)] -pub struct TestFlag(pub Arc>>); - -#[cfg(test)] -impl Default for TestFlag { - fn default() -> Self { - Self(Arc::new(std::sync::Mutex::new(None))) + #[cfg(test)] + pub fn load(&self, ordering: Ordering) -> u64 { + self.0.load(ordering) } } #[cfg(test)] -impl TestFlag { - /// Set the test flag to the given value - pub fn set(&self, value: bool) { - *self.0.lock().unwrap() = Some(value); - } +impl std::ops::Deref for RunLoopCounter { + type Target = Arc; - /// Get the test flag value. Defaults to false if the flag is not set. - pub fn get(&self) -> bool { - self.0.lock().unwrap().unwrap_or(false) + fn deref(&self) -> &Self::Target { + &self.0 } } @@ -123,7 +113,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] - pub naka_skip_commit_op: TestFlag, + pub naka_skip_commit_op: TestFlag, } impl Counters { @@ -625,6 +615,7 @@ impl RunLoop { let mut fee_estimator = moved_config.make_fee_estimator(); let coord_config = ChainsCoordinatorConfig { + assume_present_anchor_blocks: moved_config.node.assume_present_anchor_blocks, always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, require_affirmed_anchor_blocks: moved_config .node @@ -1156,19 +1147,8 @@ impl RunLoop { let mut sortition_db_height = rc_aligned_height; let mut burnchain_height = sortition_db_height; - let mut num_sortitions_in_last_cycle = 1; // prepare to fetch the first reward cycle! - let mut target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(burnchain_height) - .expect("BUG: block height is not in a reward cycle") - + 1, - ), - burnchain.get_headers_height() - 1, - ); - debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1196,17 +1176,13 @@ impl RunLoop { let remote_chain_height = burnchain.get_headers_height() - 1; - // wait for the p2p state-machine to do at least one pass - debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); - - // wait until it's okay to process the next reward cycle's sortitions - let ibd = match self.get_pox_watchdog().pox_sync_wait( + // wait until it's okay to process the next reward cycle's sortitions. + let (ibd, target_burnchain_block_height) = match self.get_pox_watchdog().pox_sync_wait( &burnchain_config, &burnchain_tip, remote_chain_height, - num_sortitions_in_last_cycle, ) { - Ok(ibd) => ibd, + Ok(x) => x, Err(e) => { debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; @@ -1220,9 +1196,6 @@ impl RunLoop { 0.0 }; - // will recalculate this in the following loop - num_sortitions_in_last_cycle = 0; - // Download each burnchain block and process their sortitions. This, in turn, will // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and // process them. This loop runs for one reward cycle, so that the next pass of the @@ -1270,8 +1243,6 @@ impl RunLoop { "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); - let mut sort_count = 0; - debug!("Runloop: block mining until we process all sortitions"); signal_mining_blocked(globals.get_miner_status()); @@ -1289,9 +1260,6 @@ impl RunLoop { "Failed to find block in fork processed by burnchain indexer", ) }; - if block.sortition { - sort_count += 1; - } let sortition_id = &block.sortition_id; @@ -1338,9 +1306,8 @@ impl RunLoop { debug!("Runloop: enable miner after processing sortitions"); signal_mining_ready(globals.get_miner_status()); - num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height})" ); sortition_db_height = next_sortition_height; @@ -1359,22 +1326,6 @@ impl RunLoop { } } - // advance one reward cycle at a time. - // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. - // Otherwise, this is burnchain_tip + reward_cycle_len - let next_target_burnchain_block_height = cmp::min( - burnchain_config.reward_cycle_to_block_height( - burnchain_config - .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: burnchain height before system start") - + 1, - ), - remote_chain_height, - ); - - debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); - target_burnchain_block_height = next_target_burnchain_block_height; - if sortition_db_height >= burnchain_height && !ibd { let canonical_stacks_tip_height = SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 395d829c8f..488234d21d 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -1,20 +1,28 @@ -use std::collections::VecDeque; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use stacks::burnchains::{Burnchain, Error as burnchain_error}; -use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::burnchains::BurnchainTip; use crate::Config; -// amount of time to wait for an inv or download sync to complete. -// These _really should_ complete before the PoX sync watchdog permits processing the next reward -// cycle, so this number is intentionally high (like, there's something really wrong with your -// network if your node is actualy waiting a day in-between reward cycles). -const SYNC_WAIT_SECS: u64 = 24 * 3600; - #[derive(Clone)] pub struct PoxSyncWatchdogComms { /// how many passes in the p2p state machine have taken place since startup? @@ -56,22 +64,6 @@ impl PoxSyncWatchdogComms { self.last_ibd.load(Ordering::SeqCst) } - /// Wait for at least one inv-sync state-machine passes - pub fn wait_for_inv_sync_pass(&self, timeout: u64) -> Result { - let current = self.get_inv_sync_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_inv_sync_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one inv-sync pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { let deadline = secs + get_epoch_time_secs(); while get_epoch_time_secs() < deadline { @@ -83,21 +75,6 @@ impl PoxSyncWatchdogComms { Ok(()) } - pub fn wait_for_download_pass(&self, timeout: u64) -> Result { - let current = self.get_download_passes(); - - let now = get_epoch_time_secs(); - while current >= self.get_download_passes() { - if now + timeout < get_epoch_time_secs() { - debug!("PoX watchdog comms: timed out waiting for one download pass"); - return Ok(false); - } - self.interruptable_sleep(1)?; - std::hint::spin_loop(); - } - Ok(true) - } - pub fn should_keep_running(&self) -> bool { self.should_keep_running.load(Ordering::SeqCst) } @@ -124,82 +101,25 @@ impl PoxSyncWatchdogComms { /// unless it's reasonably sure that it has processed all Stacks blocks for this reward cycle. /// This struct monitors the Stacks chainstate to make this determination. pub struct PoxSyncWatchdog { - /// number of attachable but unprocessed staging blocks over time - new_attachable_blocks: VecDeque, - /// number of newly-processed staging blocks over time - new_processed_blocks: VecDeque, - /// last time we asked for attachable blocks - last_attachable_query: u64, - /// last time we asked for processed blocks - last_processed_query: u64, - /// number of samples to take - max_samples: u64, - /// maximum number of blocks to count per query (affects performance!) - max_staging: u64, - /// when did we first start watching? - watch_start_ts: u64, - /// when did we first see a flatline in block-processing rate? - last_block_processed_ts: u64, - /// estimated time for a block to get downloaded. Used to infer how long to wait for the first - /// blocks to show up when waiting for this reward cycle. - estimated_block_download_time: f64, - /// estimated time for a block to get processed -- from when it shows up as attachable to when - /// it shows up as processed. Used to infer how long to wait for the last block to get - /// processed before unblocking burnchain sync for the next reward cycle. - estimated_block_process_time: f64, - /// time between burnchain syncs in stead state + /// time between burnchain syncs in steady state steady_state_burnchain_sync_interval: u64, - /// when to re-sync under steady state - steady_state_resync_ts: u64, - /// chainstate handle - chainstate: StacksChainState, /// handle to relayer thread that informs the watchdog when the P2P state-machine does stuff relayer_comms: PoxSyncWatchdogComms, /// should this sync watchdog always download? used in integration tests. unconditionally_download: bool, } -const PER_SAMPLE_WAIT_MS: u64 = 1000; - impl PoxSyncWatchdog { pub fn new( config: &Config, watchdog_comms: PoxSyncWatchdogComms, ) -> Result { - let mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let chainstate_path = config.get_chainstate_path_str(); let burnchain_poll_time = config.burnchain.poll_time_secs; - let download_timeout = config.connection_options.timeout; - let max_samples = config.node.pox_sync_sample_secs; let unconditionally_download = config.node.pox_sync_sample_secs == 0; - let marf_opts = config.node.get_marf_opts(); - - let (chainstate, _) = - match StacksChainState::open(mainnet, chain_id, &chainstate_path, Some(marf_opts)) { - Ok(cs) => cs, - Err(e) => { - return Err(format!( - "Failed to open chainstate at '{chainstate_path}': {e:?}" - )); - } - }; Ok(PoxSyncWatchdog { unconditionally_download, - new_attachable_blocks: VecDeque::new(), - new_processed_blocks: VecDeque::new(), - last_attachable_query: 0, - last_processed_query: 0, - max_samples, - max_staging: 10, - watch_start_ts: 0, - last_block_processed_ts: 0, - estimated_block_download_time: download_timeout as f64, - estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, - steady_state_resync_ts: 0, - chainstate, relayer_comms: watchdog_comms, }) } @@ -208,39 +128,9 @@ impl PoxSyncWatchdog { self.relayer_comms.clone() } - /// How many recently-added Stacks blocks are in an attachable state, up to $max_staging? - fn count_attachable_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_attachable_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_attachable_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_attachable_query = get_epoch_time_secs(); - Ok(cnt) - } - - /// How many recently-processed Stacks blocks are there, up to $max_staging? - /// ($max_staging is necessary to limit the runtime of this method, since the underlying SQL - /// uses COUNT(*), which in Sqlite is a _O(n)_ operation for _n_ rows) - fn count_processed_stacks_blocks(&mut self) -> Result { - // number of staging blocks that have arrived since the last sortition - let cnt = StacksChainState::count_processed_staging_blocks( - self.chainstate.db(), - self.max_staging, - self.last_processed_query, - ) - .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; - - self.last_processed_query = get_epoch_time_secs(); - Ok(cnt) - } - /// Are we in the initial burnchain block download? i.e. is the burn tip snapshot far enough away /// from the burnchain height that we should be eagerly downloading snapshots? - pub fn infer_initial_burnchain_block_download( + fn infer_initial_burnchain_block_download( burnchain: &Burnchain, last_processed_height: u64, burnchain_height: u64, @@ -261,182 +151,23 @@ impl PoxSyncWatchdog { ibd } - /// Calculate the first derivative of a list of points - fn derivative(sample_list: &VecDeque) -> Vec { - let mut deltas = vec![]; - let mut prev = 0; - for (i, sample) in sample_list.iter().enumerate() { - if i == 0 { - prev = *sample; - continue; - } - let delta = *sample - prev; - prev = *sample; - deltas.push(delta); - } - deltas - } - - /// Is a derivative approximately flat, with a maximum absolute deviation from 0? - /// Return whether or not the sample is mostly flat, and how many points were over the given - /// error bar in either direction. - fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { - let mut total_deviates = 0; - let mut ret = true; - for d in deriv.iter() { - if d.abs() > error { - total_deviates += 1; - ret = false; - } - } - (ret, total_deviates) - } - - /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &[i64]) -> f64 { - // take average with low and high pass - let mut min = i64::MAX; - let mut max = i64::MIN; - for s in samples.iter() { - if *s < 0 { - // nonsensical result (e.g. due to clock drift?) - continue; - } - if *s < min { - min = *s; - } - if *s > max { - max = *s; - } - } - - let mut count = 0; - let mut sum = 0; - for s in samples.iter() { - if *s < 0 { - // nonsensical result - continue; - } - if *s == min { - continue; - } - if *s == max { - continue; - } - count += 1; - sum += *s; - } - - if count == 0 { - // no viable samples - 1.0 - } else { - (sum as f64) / (count as f64) - } - } - - /// estimate how long a block remains in an unprocessed state - fn estimate_block_process_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_wait_times = - StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) - .expect("BUG: failed to query chainstate block-processing times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) - } - - /// estimate how long a block takes to download - fn estimate_block_download_time( - chainstate: &StacksChainState, - burnchain: &Burnchain, - tip_height: u64, - ) -> f64 { - let this_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); - let prev_reward_cycle = this_reward_cycle.saturating_sub(1); - - let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); - let end_height = burnchain.reward_cycle_to_block_height(this_reward_cycle); - - if this_reward_cycle > 0 { - assert!(start_height < end_height); - } else { - // no samples yet - return 1.0; - } - - let block_download_times = StacksChainState::measure_block_download_time( - chainstate.db(), - start_height, - end_height, - ) - .expect("BUG: failed to query chainstate block-download times"); - - PoxSyncWatchdog::hilo_filter_avg(&block_download_times) - } - - /// Reset internal state. Performed when it's okay to begin syncing the burnchain. - /// Updates estimate for block-processing time and block-downloading time. - fn reset(&mut self, burnchain: &Burnchain, tip_height: u64) { - // find the average (with low/high pass filter) time a block spends in the DB without being - // processed, during this reward cycle - self.estimated_block_process_time = - PoxSyncWatchdog::estimate_block_process_time(&self.chainstate, burnchain, tip_height); - - // find the average (with low/high pass filter) time a block spends downloading - self.estimated_block_download_time = - PoxSyncWatchdog::estimate_block_download_time(&self.chainstate, burnchain, tip_height); - - debug!( - "Estimated block download time: {}s. Estimated block processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time - ); - - self.new_attachable_blocks.clear(); - self.new_processed_blocks.clear(); - self.last_block_processed_ts = 0; - self.watch_start_ts = 0; - self.steady_state_resync_ts = 0; - } - - /// Wait until all of the Stacks blocks for the given reward cycle are seemingly downloaded and - /// processed. Do so by watching the _rate_ at which attachable Stacks blocks arrive and get - /// processed. - /// Returns whether or not we're still in the initial block download -- i.e. true if we're - /// still downloading burnchain blocks, or we haven't reached steady-state block-processing. + /// Wait until the next PoX anchor block arrives. + /// We know for a fact that they all exist for Epochs 2.5 and earlier, in both mainnet and + /// testnet. + /// Return (still-in-ibd?, maximum-burnchain-sync-height) on success. pub fn pox_sync_wait( &mut self, burnchain: &Burnchain, burnchain_tip: &BurnchainTip, // this is the highest burnchain snapshot we've sync'ed to burnchain_height: u64, // this is the absolute burnchain block height - num_sortitions_in_last_cycle: u64, - ) -> Result { - if self.watch_start_ts == 0 { - self.watch_start_ts = get_epoch_time_secs(); - } - if self.steady_state_resync_ts == 0 { - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - } + ) -> Result<(bool, u64), burnchain_error> { + let burnchain_rc = burnchain + .block_height_to_reward_cycle(burnchain_height) + .expect("FATAL: burnchain height is before system start"); + + let sortition_rc = burnchain + .block_height_to_reward_cycle(burnchain_tip.block_snapshot.block_height) + .expect("FATAL: sortition height is before system start"); let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( burnchain, @@ -444,220 +175,23 @@ impl PoxSyncWatchdog { burnchain_height, ); - // unconditionally download the first reward cycle - if burnchain_tip.block_snapshot.block_height - < burnchain.first_block_height + (burnchain.pox_constants.reward_cycle_length as u64) - { - debug!("PoX watchdog in first reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); + let max_sync_height = if sortition_rc < burnchain_rc { + burnchain + .reward_cycle_to_block_height(sortition_rc + 1) + .min(burnchain_height) + } else { + burnchain_tip + .block_snapshot + .block_height + .max(burnchain_height) + }; + self.relayer_comms.set_ibd(ibbd); + if !self.unconditionally_download { self.relayer_comms .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - - return Ok(ibbd); - } - - if self.unconditionally_download { - debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - - let mut waited = false; - if ibbd { - // we are far behind the burnchain tip (i.e. not in the last reward cycle), - // so make sure the downloader knows about blocks it doesn't have yet so we can go and - // fetch its blocks before proceeding. - if num_sortitions_in_last_cycle > 0 { - debug!("PoX watchdog: Wait for at least one inventory state-machine pass..."); - self.relayer_comms.wait_for_inv_sync_pass(SYNC_WAIT_SECS)?; - waited = true; - } else { - debug!("PoX watchdog: In initial block download, and no sortitions to consider in this reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - return Ok(ibbd); - } - } else { - debug!("PoX watchdog: not in initial burn block download, so not waiting for an inventory state-machine pass"); } - if burnchain_tip.block_snapshot.block_height - + (burnchain.pox_constants.reward_cycle_length as u64) - >= burnchain_height - { - // unconditionally download if we're within the last reward cycle (after the poll timeout) - if !waited { - debug!( - "PoX watchdog in last reward cycle -- sync after {} seconds", - self.steady_state_burnchain_sync_interval - ); - self.relayer_comms.set_ibd(ibbd); - - self.relayer_comms - .interruptable_sleep(self.steady_state_burnchain_sync_interval)?; - } else { - debug!("PoX watchdog in last reward cycle -- sync immediately"); - self.relayer_comms.set_ibd(ibbd); - } - return Ok(ibbd); - } - - // have we reached steady-state behavior? i.e. have we stopped processing both burnchain - // and Stacks blocks? - let mut steady_state = false; - debug!("PoX watchdog: Wait until chainstate reaches steady-state block-processing..."); - - let ibbd = loop { - if !self.relayer_comms.should_keep_running() { - break false; - } - let ibbd = PoxSyncWatchdog::infer_initial_burnchain_block_download( - burnchain, - burnchain_tip.block_snapshot.block_height, - burnchain_height, - ); - - let expected_first_block_deadline = - self.watch_start_ts + (self.estimated_block_download_time as u64); - let expected_last_block_deadline = self.last_block_processed_ts - + (self.estimated_block_download_time as u64) - + (self.estimated_block_process_time as u64); - - match ( - self.count_attachable_stacks_blocks(), - self.count_processed_stacks_blocks(), - ) { - (Ok(num_available), Ok(num_processed)) => { - self.new_attachable_blocks.push_back(num_available as i64); - self.new_processed_blocks.push_back(num_processed as i64); - - if (self.new_attachable_blocks.len() as u64) > self.max_samples { - self.new_attachable_blocks.pop_front(); - } - if (self.new_processed_blocks.len() as u64) > self.max_samples { - self.new_processed_blocks.pop_front(); - } - - if (self.new_attachable_blocks.len() as u64) < self.max_samples - || (self.new_processed_blocks.len() as u64) < self.max_samples - { - // still getting initial samples - if self.new_processed_blocks.len() % 10 == 0 { - debug!( - "PoX watchdog: Still warming up: {} out of {} samples...", - &self.new_attachable_blocks.len(), - &self.max_samples - ); - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && get_epoch_time_secs() < expected_first_block_deadline - { - // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if self.watch_start_ts > 0 - && (self.new_attachable_blocks.len() as u64) < self.max_samples - && self.watch_start_ts - + self.max_samples - + self.steady_state_burnchain_sync_interval - * (burnchain.stable_confirmations as u64) - < get_epoch_time_secs() - { - debug!( - "PoX watchdog: could not calculate {} samples in {} seconds. Assuming suspend/resume, or assuming load is too high.", - self.max_samples, - self.max_samples + self.steady_state_burnchain_sync_interval * (burnchain.stable_confirmations as u64) - ); - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - - self.watch_start_ts = get_epoch_time_secs(); - self.steady_state_resync_ts = - get_epoch_time_secs() + self.steady_state_burnchain_sync_interval; - continue; - } - - // take first derivative of samples -- see if the download and processing rate has gone to 0 - let attachable_delta = PoxSyncWatchdog::derivative(&self.new_attachable_blocks); - let processed_delta = PoxSyncWatchdog::derivative(&self.new_processed_blocks); - - let (flat_attachable, attachable_deviants) = - PoxSyncWatchdog::is_mostly_flat(&attachable_delta, 0); - let (flat_processed, processed_deviants) = - PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - - debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", - self.estimated_block_download_time, self.estimated_block_process_time); - - if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { - // we're flat-lining -- this may be the end of this cycle - self.last_block_processed_ts = get_epoch_time_secs(); - } - - if self.last_block_processed_ts > 0 - && get_epoch_time_secs() < expected_last_block_deadline - { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, self.estimated_block_process_time); - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - - if ibbd { - // doing initial burnchain block download right now. - // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor - // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); - - if !flat_attachable || !flat_processed { - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } - } else { - let now = get_epoch_time_secs(); - if now < self.steady_state_resync_ts { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state; waiting until at least {} before burnchain synchronization", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - sleep_ms(PER_SAMPLE_WAIT_MS); - continue; - } else { - // steady state - if !steady_state { - debug!("PoX watchdog: In steady-state, but ready burnchain synchronization as of {}", self.steady_state_resync_ts); - steady_state = flat_attachable && flat_processed; - } - } - } - } - (err_attach, err_processed) => { - // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); - panic!(); - } - }; - - if ibbd || !steady_state { - debug!("PoX watchdog: Wait for at least one downloader state-machine pass before resetting..."); - self.relayer_comms.wait_for_download_pass(SYNC_WAIT_SECS)?; - } else { - debug!("PoX watchdog: in steady-state, so not waiting for download pass"); - } - - self.reset(burnchain, burnchain_tip.block_snapshot.block_height); - break ibbd; - }; - - let ret = ibbd || !steady_state; - self.relayer_comms.set_ibd(ret); - Ok(ret) + Ok((ibbd, max_sync_height)) } } diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3e69ac18cc..ef193f56f7 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -7,12 +7,12 @@ use stacks::chainstate::burn::operations::BlockstackOperationType::{ LeaderBlockCommit, LeaderKeyRegister, }; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::InitialBalance; use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index b305a7429a..e555b6a8aa 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -13,6 +13,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -22,7 +23,6 @@ use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRF use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ @@ -1123,8 +1123,8 @@ fn bigger_microblock_streams_in_2_05() { sleep_ms(120_000); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut epoch_20_stream_cost = ExecutionCost::zero(); - let mut epoch_205_stream_cost = ExecutionCost::zero(); + let mut epoch_20_stream_cost = ExecutionCost::ZERO; + let mut epoch_205_stream_cost = ExecutionCost::ZERO; // max == largest number of transactions per stream in a given epoch (2.0 or 2.05) // total == number of transactions across all streams in a given epoch (2.0 or 2.05) @@ -1155,7 +1155,7 @@ fn bigger_microblock_streams_in_2_05() { eprintln!("{}", transactions.len()); let mut num_big_microblock_txs = 0; - let mut total_execution_cost = ExecutionCost::zero(); + let mut total_execution_cost = ExecutionCost::ZERO; for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -1204,7 +1204,7 @@ fn bigger_microblock_streams_in_2_05() { epoch_20_stream_cost = total_execution_cost; break; } - if in_205 && total_execution_cost.exceeds(&ExecutionCost::zero()) { + if in_205 && total_execution_cost.exceeds(&ExecutionCost::ZERO) { have_confirmed_205_stream = true; epoch_205_stream_cost = total_execution_cost; break; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 55d3ee0b7b..d50cac0117 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::miner::{ }; use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{Config, InitialBalance}; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ @@ -35,7 +36,6 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; use crate::burnchains::bitcoin_regtest_controller::UTXO; -use crate::config::{Config, InitialBalance}; use crate::neon::RunLoopCounter; use crate::operations::BurnchainOpSigner; use crate::stacks_common::address::AddressHashMode; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3bf521d7cb..493fb36fcd 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -8,6 +8,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -17,7 +18,6 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; use super::neon_integrations::get_account; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon_node::StacksNode; use crate::stacks_common::types::Address; use crate::stacks_common::util::hash::bytes_to_hex; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 92b6a97b8f..085e5a49cb 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -18,10 +18,10 @@ use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::*; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 5e4ff9852a..8780d08012 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -26,6 +26,7 @@ use stacks::chainstate::stacks::boot::RawRewardSetEntry; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -35,7 +36,6 @@ use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -use crate::config::InitialBalance; use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 0d3087fa0d..e840b0fcd3 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -17,13 +17,13 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::config::InitialBalance; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::{next_block_and, wait_for}; +use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 79c3394352..7f893835d1 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -24,6 +24,7 @@ use stacks::chainstate::stacks::{ }; use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; +use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, @@ -40,7 +41,6 @@ use super::{ make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, SK_3, }; -use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5d712ad550..13923a847a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,6 +29,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; +use rusqlite::OptionalExtension; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -37,8 +38,9 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::OnChainRewardSetProvider; -use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use stacks::chainstate::nakamoto::coordinator::{load_nakamoto_reward_set, TEST_COORDINATOR_STALL}; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use stacks::chainstate::nakamoto::shadow::shadow_chainstate_repair; use stacks::chainstate::nakamoto::test_signers::TestSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; @@ -55,12 +57,13 @@ use stacks::chainstate::stacks::{ TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, }; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, - PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_TESTNET, }; use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; @@ -70,7 +73,7 @@ use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; -use stacks::types::chainstate::StacksBlockId; +use stacks::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -84,15 +87,15 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, }; -use stacks_common::types::StacksPublicKeyBuffer; +use stacks_common::types::{set_test_coinbase_schedule, CoinbaseInterval, StacksPublicKeyBuffer}; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; +use stacks_signer::v0::SpawnedSigner; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -104,6 +107,7 @@ use crate::tests::neon_integrations::{ get_neighbors, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, wait_for_runloop, }; +use crate::tests::signer::SignerTest; use crate::tests::{ gen_random_port, get_chain_info, make_contract_call, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, to_addr, @@ -114,7 +118,7 @@ pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { - pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ + pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 10] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -174,10 +178,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch30, start_height: 231, - end_height: STACKS_EPOCH_MAX, + end_height: 241, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: 241, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1 + }, ]; } @@ -3393,7 +3404,7 @@ fn vote_for_aggregate_key_burn_op() { /// This test boots a follower node using the block downloader #[test] #[ignore] -fn follower_bootup() { +fn follower_bootup_simple() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3805,8 +3816,13 @@ fn follower_bootup_across_multiple_cycles() { .reward_cycle_length * 2 { + let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); } info!("Nakamoto miner has advanced two reward cycles"); @@ -6357,18 +6373,12 @@ fn signer_chainstate() { ) .unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle( - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height, - ) - .unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6384,7 +6394,6 @@ fn signer_chainstate() { &mut signer_db, prior_tenure_first, miner_pk, - reward_cycle, true, ) .unwrap(); @@ -6394,14 +6403,7 @@ fn signer_chainstate() { ); for block in prior_tenure_interims.iter() { let valid = sortitions_view - .check_proposal( - &signer_client, - &mut signer_db, - block, - miner_pk, - reward_cycle, - true, - ) + .check_proposal(&signer_client, &mut signer_db, block, miner_pk, true) .unwrap(); assert!( !valid, @@ -6436,7 +6438,6 @@ fn signer_chainstate() { &mut signer_db, &proposal.0, &proposal.1, - reward_cycle, true, ) .unwrap(); @@ -6458,6 +6459,7 @@ fn signer_chainstate() { signed_group: None, ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, + validation_time_ms: None, }) .unwrap(); @@ -6492,7 +6494,6 @@ fn signer_chainstate() { &mut signer_db, &proposal_interim.0, &proposal_interim.1, - reward_cycle, true, ) .unwrap(); @@ -6508,6 +6509,7 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6523,7 +6525,6 @@ fn signer_chainstate() { &mut signer_db, &proposal_interim.0, &proposal_interim.1, - reward_cycle, true, ) .unwrap(); @@ -6546,6 +6547,7 @@ fn signer_chainstate() { signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, + validation_time_ms: Some(1000), }) .unwrap(); @@ -6586,14 +6588,9 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); - let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height; - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_block_height) - .unwrap(); assert!( !sortitions_view .check_proposal( @@ -6601,7 +6598,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6659,7 +6655,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6723,7 +6718,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6789,7 +6783,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -9429,6 +9422,178 @@ fn v3_blockbyheight_api_endpoint() { run_loop_thread.join().unwrap(); } +/// Verify that lockup events are attached to a phantom tx receipt +/// if the block does not have a coinbase tx +#[test] +#[ignore] +fn nakamoto_lockup_events() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let _signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * 100, + ); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // only subscribe to the block proposal events + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + blind_signer(&conf, &signers, proposals_submitted); + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_stacks_height = || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + tip.stacks_block_height + }; + let initial_block_height = get_stacks_height(); + + // This matches the data in `stx-genesis/chainstate-test.txt` + // Recipient: ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D + let unlock_recipient = "ST2CTPPV8BHBVSQR727A3MK00ZD85RNY9015WGW2D"; + let unlock_height = 35_u64; + let interims_to_mine = unlock_height - initial_block_height; + + info!( + "----- Mining to unlock height -----"; + "unlock_height" => unlock_height, + "initial_height" => initial_block_height, + "interims_to_mine" => interims_to_mine, + ); + + // submit a tx so that the miner will mine an extra stacks block + let mut sender_nonce = 0; + + for _ in 0..interims_to_mine { + let height_before = get_stacks_height(); + info!("----- Mining interim block -----"; + "height" => %height_before, + "nonce" => %sender_nonce, + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); + } + + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + assert_eq!( + block.get("block_height").unwrap().as_u64().unwrap(), + unlock_height + ); + + let events = block.get("events").unwrap().as_array().unwrap(); + let mut found_event = false; + for event in events { + let mint_event = event.get("stx_mint_event"); + if mint_event.is_some() { + found_event = true; + let mint_event = mint_event.unwrap(); + let recipient = mint_event.get("recipient").unwrap().as_str().unwrap(); + assert_eq!(recipient, unlock_recipient); + let amount = mint_event.get("amount").unwrap().as_str().unwrap(); + assert_eq!(amount, "12345678"); + let txid = event.get("txid").unwrap().as_str().unwrap(); + assert_eq!( + txid, + "0x63dd5773338782755e4947a05a336539137dfe13b19a0eac5154306850aca8ef" + ); + } + } + assert!(found_event); + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. @@ -9625,11 +9790,403 @@ fn skip_mining_long_tx() { run_loop_thread.join().unwrap(); } +/// Verify that a node in which there is no prepare-phase block can be recovered by +/// live-instantiating shadow tenures in the prepare phase +#[test] +#[ignore] +fn test_shadow_recovery() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signer_test: SignerTest = SignerTest::new(1, vec![]); + signer_test.boot_to_epoch_3(); + + let naka_conf = signer_test.running_nodes.conf.clone(); + let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + + // make another tenure + next_block_and_mine_commit( + btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + let prepare_phase_start = btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_phase_start( + btc_regtest_controller.get_burnchain().first_block_height, + reward_cycle, + ); + + let blocks_until_next_rc = prepare_phase_start + 1 - block_height + + (btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_length as u64) + + 1; + + // kill the chain by blowing through a prepare phase + btc_regtest_controller.bootstrap_chain(blocks_until_next_rc); + let target_burn_height = btc_regtest_controller.get_headers_height(); + + let burnchain = naka_conf.get_burnchain(); + let mut sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + false, + CHAIN_ID_TESTNET, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + wait_for(30, || { + let burn_height = get_chain_info(&naka_conf).burn_block_height; + if burn_height >= target_burn_height { + return Ok(true); + } + sleep_ms(500); + Ok(false) + }) + .unwrap(); + + let stacks_height_before = get_chain_info(&naka_conf).stacks_tip_height; + + // TODO: stall block processing; otherwise this test can flake + // stop block processing on the node + TEST_COORDINATOR_STALL.lock().unwrap().replace(true); + + // fix node + let shadow_blocks = shadow_chainstate_repair(&mut chainstate, &mut sortdb).unwrap(); + assert!(shadow_blocks.len() > 0); + + wait_for(30, || { + let Some(info) = get_chain_info_opt(&naka_conf) else { + sleep_ms(500); + return Ok(false); + }; + Ok(info.stacks_tip_height >= stacks_height_before) + }) + .unwrap(); + + TEST_COORDINATOR_STALL.lock().unwrap().replace(false); + info!("Beginning post-shadow tenures"); + + // revive ATC-C by waiting for commits + for _i in 0..4 { + btc_regtest_controller.bootstrap_chain(1); + sleep_ms(30_000); + } + + // make another tenure + next_block_and_mine_commit( + btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + // all shadow blocks are present and processed + let mut shadow_ids = HashSet::new(); + for sb in shadow_blocks { + let (_, processed, orphaned, _) = chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &sb.header.consensus_hash, + &sb.header.block_hash(), + ) + .unwrap() + .unwrap(); + assert!(processed); + assert!(!orphaned); + shadow_ids.insert(sb.block_id()); + } + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let mut cursor = tip.index_block_hash(); + + // the chainstate has four parts: + // * epoch 2 + // * epoch 3 prior to failure + // * shadow blocks + // * epoch 3 after recovery + // Make sure they're all there + + let mut has_epoch_3_recovery = false; + let mut has_shadow_blocks = false; + let mut has_epoch_3_failure = false; + + loop { + let header = NakamotoChainState::get_block_header(chainstate.db(), &cursor) + .unwrap() + .unwrap(); + if header.anchored_header.as_stacks_epoch2().is_some() { + break; + } + + let header = header.anchored_header.as_stacks_nakamoto().clone().unwrap(); + + if header.is_shadow_block() { + assert!(shadow_ids.contains(&header.block_id())); + } else { + assert!(!shadow_ids.contains(&header.block_id())); + } + + if !header.is_shadow_block() && !has_epoch_3_recovery { + has_epoch_3_recovery = true; + } else if header.is_shadow_block() && has_epoch_3_recovery && !has_shadow_blocks { + has_shadow_blocks = true; + } else if !header.is_shadow_block() + && has_epoch_3_recovery + && has_shadow_blocks + && !has_epoch_3_failure + { + has_epoch_3_failure = true; + } + + cursor = header.parent_block_id; + } + + assert!(has_epoch_3_recovery); + assert!(has_shadow_blocks); + assert!(has_epoch_3_failure); +} + #[test] #[ignore] +/// Integration test for SIP-029 +fn sip029_coinbase_change() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + let new_sched = vec![ + CoinbaseInterval { + coinbase: 1_000_000_000, + effective_start_height: 0, + }, + // NOTE: epoch 3.1 goes into effect at 241 + CoinbaseInterval { + coinbase: 500_000_000, + effective_start_height: 245, + }, + CoinbaseInterval { + coinbase: 125_000_000, + effective_start_height: 255, + }, + CoinbaseInterval { + coinbase: 62_500_000, + effective_start_height: 265, + }, + ]; + + set_test_coinbase_schedule(Some(new_sched.clone())); + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 180; + naka_conf.burnchain.max_rbf = 10_000_000; + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // mine until burnchain height 270 + loop { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + + let node_info = get_chain_info_opt(&naka_conf).unwrap(); + if node_info.burn_block_height >= 270 { + break; + } + } + + info!("Nakamoto miner has advanced to burn height 270"); + + // inspect `payments` table to see that coinbase was applied + let all_snapshots = sortdb.get_all_snapshots().unwrap(); + + // whether or not the last snapshot had a sortition + let mut prev_sortition = false; + + // whether or not we witnessed the requisite coinbases + let mut witnessed_1000 = false; + let mut witnessed_500 = false; + let mut witnessed_125 = false; + let mut witnessed_62_5 = false; + + // initial mining bonus + let initial_mining_bonus = 20400000; + + for sn in all_snapshots { + if !sn.sortition { + prev_sortition = false; + continue; + } + if sn.consensus_hash == ConsensusHash([0x00; 20]) { + continue; + } + let coinbase = { + let sql = "SELECT coinbase FROM payments WHERE consensus_hash = ?1"; + let args = rusqlite::params![&sn.consensus_hash]; + let Some(coinbase) = chainstate + .db() + .query_row(sql, args, |r| { + let coinbase_txt: String = r.get_unwrap(0); + let coinbase: u64 = coinbase_txt.parse().unwrap(); + Ok(coinbase) + }) + .optional() + .unwrap() + else { + info!("No coinbase for {} {}", sn.block_height, &sn.consensus_hash); + continue; + }; + + coinbase + }; + + info!( + "Coinbase at {} {}: {}", + sn.block_height, &sn.consensus_hash, coinbase + ); + // use >= for coinbases since a missed sortition can lead to coinbase accumulation + if sn.block_height < 245 { + if prev_sortition { + assert_eq!(coinbase, 1_000_000_000 + initial_mining_bonus); + witnessed_1000 = true; + } else { + assert!(coinbase >= 1_000_000_000 + initial_mining_bonus); + } + } else if sn.block_height < 255 { + if prev_sortition { + assert_eq!(coinbase, 500_000_000 + initial_mining_bonus); + witnessed_500 = true; + } else { + assert!(coinbase >= 500_000_000 + initial_mining_bonus); + } + } else if sn.block_height < 265 { + if prev_sortition { + assert_eq!(coinbase, 125_000_000 + initial_mining_bonus); + witnessed_125 = true; + } else { + assert!(coinbase >= 125_000_000 + initial_mining_bonus); + } + } else { + if prev_sortition { + assert_eq!(coinbase, 62_500_000 + initial_mining_bonus); + witnessed_62_5 = true; + } else { + assert!(coinbase >= 62_500_000 + initial_mining_bonus); + } + } + + prev_sortition = true; + } + + assert!(witnessed_1000); + assert!(witnessed_500); + assert!(witnessed_125); + assert!(witnessed_62_5); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + /// This test is testing that the clarity cost spend down works as expected, /// spreading clarity contract calls across the tenure instead of all in the first block. /// It also ensures that the clarity cost resets at the start of each tenure. +#[test] +#[ignore] fn clarity_cost_spend_down() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 7d9f1f0dc8..fc363d3db8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5,7 +5,6 @@ use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; -use clarity::consts::BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -39,8 +38,9 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli::{self, StacksChainConfig}; +use stacks::cli; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, @@ -83,7 +83,6 @@ use super::{ SK_2, SK_3, }; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; -use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; @@ -199,13 +198,13 @@ pub mod test_observer { use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::StacksTransaction; use stacks::codec::StacksMessageCodec; + use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util::hash::hex_bytes; use stacks_common::types::chainstate::StacksBlockId; use warp::Filter; use {tokio, warp}; - use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent}; use crate::Config; @@ -5766,7 +5765,7 @@ fn cost_voting_integration() { let transactions = block.get("transactions").unwrap().as_array().unwrap(); eprintln!("{}", transactions.len()); let mut tested = false; - let mut exec_cost = ExecutionCost::zero(); + let mut exec_cost = ExecutionCost::ZERO; for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -12691,22 +12690,9 @@ fn mock_miner_replay() { let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); let db_path = format!("{}/neon", conf.node.working_dir); let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; - let SortitionDB { - first_block_height, - first_burn_header_hash, - .. - } = *btc_regtest_controller.sortdb_mut(); - let replay_config = StacksChainConfig { - chain_id: conf.burnchain.chain_id, - first_block_height, - first_burn_header_hash, - first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), - pox_constants: burnchain_config.pox_constants, - epochs: conf.burnchain.epochs.expect("Missing `epochs` in config"), - }; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args, Some(&replay_config)); + cli::command_replay_mock_mining(&args, Some(&conf)); // ---------- Test finished, clean up ---------- diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a9053d8c5d..432b990667 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,12 +36,15 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockResponse, SignerMessage}; +use libsigner::v0::messages::{ + BlockAccepted, BlockResponse, MessageSlotID, PeerInfo, SignerMessage, +}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -53,14 +56,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; -use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; +use stacks_common::util::tests::TestFlag; +use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, TestFlag}; +use crate::neon::{Counters, RunLoopCounter}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -81,14 +84,14 @@ pub struct RunningNodes { pub btcd_controller: BitcoinCoreController, pub run_loop_thread: thread::JoinHandle<()>, pub run_loop_stopper: Arc, - pub vrfs_submitted: Arc, - pub commits_submitted: Arc, - pub blocks_processed: Arc, - pub nakamoto_blocks_proposed: Arc, - pub nakamoto_blocks_mined: Arc, - pub nakamoto_blocks_rejected: Arc, - pub nakamoto_blocks_signer_pushed: Arc, - pub nakamoto_test_skip_commit_op: TestFlag, + pub vrfs_submitted: RunLoopCounter, + pub commits_submitted: RunLoopCounter, + pub blocks_processed: RunLoopCounter, + pub nakamoto_blocks_proposed: RunLoopCounter, + pub nakamoto_blocks_mined: RunLoopCounter, + pub nakamoto_blocks_rejected: RunLoopCounter, + pub nakamoto_blocks_signer_pushed: RunLoopCounter, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -111,7 +114,7 @@ pub struct SignerTest { } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { - fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { + pub fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { Self::new_with_config_modifications( num_signers, initial_balances, @@ -307,10 +310,13 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height) + let info_after = self.get_peer_info(); + let blocks_mined = self.running_nodes.nakamoto_blocks_mined.get(); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height + && (!use_nakamoto_blocks_mined || blocks_mined > mined_before)) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); @@ -355,6 +363,26 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()) { + let blocks_before = self.running_nodes.nakamoto_blocks_mined.get(); + let info_before = self.get_peer_info(); + + f(); + + // Verify that the block was mined + wait_for(timeout_secs, || { + let blocks_mined = self.running_nodes.nakamoto_blocks_mined.get(); + let info = self.get_peer_info(); + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for nakamoto block to be mined"); + } + /// Wait for a confirmed block and return a list of individual /// signer signatures fn wait_for_confirmed_block_v0( @@ -618,6 +646,45 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockResponse { + let mut stackerdb = StackerDB::new( + &self.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + self.get_current_reward_cycle(), + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[slot_id], + ) + .expect("Failed to get message from stackerdb"); + let latest_msg = latest_msgs.last().unwrap(); + let SignerMessage::BlockResponse(block_response) = latest_msg else { + panic!("Latest message from slot #{slot_id} isn't a block acceptance"); + }; + block_response.clone() + } + + /// Get the latest block acceptance from the given slot + pub fn get_latest_block_acceptance(&self, slot_id: u32) -> BlockAccepted { + let block_response = self.get_latest_block_response(slot_id); + match block_response { + BlockResponse::Accepted(accepted) => accepted, + _ => panic!("Latest block response from slot #{slot_id} isn't a block acceptance"), + } + } + + /// Get /v2/info from the node + pub fn get_peer_info(&self) -> PeerInfo { + self.stacks_client + .get_peer_info() + .expect("Failed to get peer info") + } } fn setup_stx_btc_node( @@ -747,13 +814,13 @@ fn setup_stx_btc_node( btc_regtest_controller, run_loop_thread, run_loop_stopper, - vrfs_submitted: vrfs_submitted.0, - commits_submitted: commits_submitted.0, - blocks_processed: blocks_processed.0, - nakamoto_blocks_proposed: naka_blocks_proposed.0, - nakamoto_blocks_mined: naka_blocks_mined.0, - nakamoto_blocks_rejected: naka_blocks_rejected.0, - nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks.0, + vrfs_submitted, + commits_submitted, + blocks_processed, + nakamoto_blocks_proposed: naka_blocks_proposed, + nakamoto_blocks_mined: naka_blocks_mined, + nakamoto_blocks_rejected: naka_blocks_rejected, + nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b55b9bafe6..5200883667 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::str::FromStr; @@ -22,7 +23,8 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, + BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, + SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession, VERSION_STRING}; use stacks::address::AddressHashMode; @@ -35,14 +37,18 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; +use stacks::config::{EventKeyType, EventObserverConfig}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; -use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; +use stacks::net::api::postblock_proposal::{ + ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, TEST_VALIDATE_STALL, +}; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; +use stacks::util::get_epoch_time_secs; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -54,7 +60,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{ +use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, }; @@ -63,12 +69,11 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::config::{EventKeyType, EventObserverConfig}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; -use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; +use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ @@ -80,7 +85,9 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{self, gen_random_port, make_stacks_transfer}; +use crate::tests::{ + self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, +}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -227,7 +234,7 @@ impl SignerTest { } /// Run the test until the epoch 3 boundary - fn boot_to_epoch_3(&mut self) { + pub fn boot_to_epoch_3(&mut self) { boot_to_epoch_3_reward_set( &self.running_nodes.conf, &self.running_nodes.blocks_processed, @@ -273,7 +280,7 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - self.mine_nakamoto_block(Duration::from_secs(30)); + self.mine_nakamoto_block(Duration::from_secs(30), false); wait_for(30, || { Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) @@ -282,12 +289,17 @@ impl SignerTest { } // Only call after already past the epoch 3.0 boundary - fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { + fn mine_and_verify_confirmed_naka_block( + &mut self, + timeout: Duration, + num_signers: usize, + use_nakamoto_blocks_mined: bool, + ) { info!("------------------------- Try mining one block -------------------------"); let reward_cycle = self.get_current_reward_cycle(); - self.mine_nakamoto_block(timeout); + self.mine_nakamoto_block(timeout, use_nakamoto_blocks_mined); // Verify that the signers accepted the proposed block, sending back a validate ok response let proposed_signer_signature_hash = self @@ -370,11 +382,11 @@ impl SignerTest { let total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); for _ in 0..total_nmb_blocks_to_mine { - self.mine_and_verify_confirmed_naka_block(timeout, num_signers); + self.mine_and_verify_confirmed_naka_block(timeout, num_signers, false); } } - /// Propose an invalid block to the signers + /// Propose a block to the signers fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = @@ -384,6 +396,7 @@ impl SignerTest { .btc_regtest_controller .get_headers_height(); let reward_cycle = self.get_current_reward_cycle(); + let signer_signature_hash = block.header.signer_signature_hash(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -400,7 +413,7 @@ impl SignerTest { let mut version = 0; let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; let start = Instant::now(); - debug!("Proposing invalid block to signers"); + debug!("Proposing block to signers: {signer_signature_hash}"); while !accepted { let mut chunk = StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); @@ -418,6 +431,27 @@ impl SignerTest { } } +fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) if payload.cause == cause => { + info!("Found tenure change transaction: {parsed:?}"); + true + } + _ => false, + } +} + +fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { + assert!(last_block_contains_tenure_change_tx(cause)); +} + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -455,11 +489,13 @@ fn block_proposal_rejection() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], }; + block.header.timestamp = get_epoch_time_secs(); // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE // the block is submitted to the node for validation. @@ -484,7 +520,7 @@ fn block_proposal_rejection() { signer_test.wait_for_validate_reject_response(short_timeout, block_signer_signature_hash_2); assert!(matches!( reject.reason_code, - ValidateRejectCode::UnknownParent + ValidateRejectCode::InvalidBlock )); let start_polling = Instant::now(); @@ -507,10 +543,16 @@ fn block_proposal_rejection() { { if signer_signature_hash == block_signer_signature_hash_1 { found_signer_signature_hash_1 = true; - assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + assert!( + matches!(reason_code, RejectCode::SortitionViewMismatch), + "Expected sortition view mismatch rejection. Got: {reason_code}" + ); } else if signer_signature_hash == block_signer_signature_hash_2 { found_signer_signature_hash_2 = true; - assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + assert!(matches!( + reason_code, + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock) + )); } else { continue; } @@ -554,7 +596,7 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -786,14 +828,8 @@ fn reloads_signer_set_in() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr, send_amt + send_fee)], - |_config| {}, - |_| {}, - None, - None, - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); setup_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -917,7 +953,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); }, |config| { config.miner.tenure_cost_limit_per_block_percentage = None; @@ -1017,7 +1053,7 @@ fn forked_tenure_testing( thread::sleep(Duration::from_secs(1)); } - info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcon block, and un-stall block commits."); + info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcoin block, and un-stall block commits."); thread::sleep(post_btc_block_pause); // the block will be stored, not processed, so load it out of staging @@ -1073,16 +1109,15 @@ fn forked_tenure_testing( proposed_blocks.load(Ordering::SeqCst) }; let rejected_before = rejected_blocks.load(Ordering::SeqCst); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .set(false); - let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { // now allow block B to process if it hasn't already. @@ -1109,7 +1144,31 @@ fn forked_tenure_testing( && has_reject_count) }, ) - .unwrap(); + .unwrap_or_else(|_| { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let rejected_count = rejected_blocks.load(Ordering::SeqCst); + // see above for comments + let (blocks_count, rbf_count, has_reject_count) = if expect_tenure_c { + (mined_blocks.load(Ordering::SeqCst), 1, true) + } else { + ( + proposed_blocks.load(Ordering::SeqCst), + 0, + rejected_count > rejected_before, + ) + }; + error!("Tenure C failed to produce a block"; + "commits_count" => commits_count, + "commits_before" => commits_before, + "rbf_count" => rbf_count as u64, + "blocks_count" => blocks_count, + "blocks_before" => blocks_before, + "rejected_count" => rejected_count, + "rejected_before" => rejected_before, + "has_reject_count" => has_reject_count, + ); + panic!(); + }); // allow blocks B and C to be processed sleep_ms(1000); @@ -1222,8 +1281,18 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = - SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + epochs[StacksEpochId::Epoch30].end_height = 3_015; + epochs[StacksEpochId::Epoch31].start_height = 3_015; + }, + None, + None, + ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -1264,7 +1333,7 @@ fn bitcoind_forking_test() { for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1336,7 +1405,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1412,7 +1481,7 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); } let test_end_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1860,10 +1929,15 @@ fn miner_forking() { info!("Flushing any pending commits to enable custom winner selection"); let burn_height_before = get_burn_height(); + let blocks_before = test_observer::get_blocks().len(); + let nakamoto_blocks_count_before = get_nakamoto_headers(&conf).len(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 30, - || Ok(get_burn_height() > burn_height_before), + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > blocks_before) + }, ) .unwrap(); @@ -1954,7 +2028,6 @@ fn miner_forking() { ) .unwrap(); - let blocks_len = test_observer::get_blocks().len(); let burn_height_before = get_burn_height(); info!("Mine RL2 Tenure"); next_block_and( @@ -1964,11 +2037,12 @@ fn miner_forking() { ) .unwrap(); - // Ensure that RL2 doesn't produce a valid block - assert!( - wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)).is_err(), - "RL2 produced a block" - ); + wait_for(60, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("RL1 did not produce a tenure extend block"); // fetch the current sortition info let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -1980,6 +2054,20 @@ fn miner_forking() { "RL2 did not win the sortition" ); + let header_info = get_nakamoto_headers(&conf).into_iter().last().unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .expect("RL1 did not produce our last block"); + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) .into_iter() .map(|header| { @@ -1987,7 +2075,11 @@ fn miner_forking() { (header.consensus_hash, header) }) .collect(); - assert!(!nakamoto_headers.contains_key(&tip.consensus_hash)); + + assert!( + !nakamoto_headers.contains_key(&tip.consensus_hash), + "RL1 produced a block with the current consensus hash." + ); info!("------------------------- RL1 RBFs its Own Commit -------------------------"); info!("Pausing stacks block proposal to test RBF capability"); @@ -2041,14 +2133,16 @@ fn miner_forking() { }) .expect("Timed out waiting for miner 1 to RBF its old commit op"); + let blocks_before = test_observer::get_blocks().len(); info!("Mine RL1 Tenure"); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_blocks().len() > blocks_before), + ) + .unwrap(); // fetch the current sortition info - let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); // make sure the tenure was won by RL1 assert!(tip.sortition, "No sortition was won"); @@ -2084,14 +2178,16 @@ fn miner_forking() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + info!("Nakamoto blocks count before test: {nakamoto_blocks_count_before}, Nakamoto blocks count now: {nakamoto_blocks_count}"); assert_eq!(peer_1_height, peer_2_height); let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_blocks_count).unwrap() - 1, // subtract 1 for the first Nakamoto block + u64::try_from(nakamoto_blocks_count - nakamoto_blocks_count_before).unwrap(), // subtract 1 for the first Nakamoto block "There should be no forks in this test" ); @@ -2321,10 +2417,7 @@ fn retry_on_rejection() { .map(StacksPublicKey::from_private) .take(num_signers) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); let proposals_before = signer_test .running_nodes @@ -2371,10 +2464,7 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(vec![]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); loop { let blocks_mined = signer_test .running_nodes @@ -2418,7 +2508,7 @@ fn signers_broadcast_signed_blocks() { .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); wait_for(30, || { let blocks_mined = signer_test @@ -2434,7 +2524,7 @@ fn signers_broadcast_signed_blocks() { }) .expect("Timed out waiting for first nakamoto block to be mined"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined @@ -2485,12 +2575,8 @@ fn signers_broadcast_signed_blocks() { #[test] #[ignore] -/// This test checks the behaviour of signers when a sortition is empty. Specifically: -/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. -/// - The miner will stop trying to mine once it sees a threshold of signers reject the block -/// - The empty sortition will trigger the miner to attempt a tenure extend. -/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition -fn empty_sortition() { +/// This test verifies that a miner will produce a TenureExtend transaction after the signers' idle timeout is reached. +fn tenure_extend_after_idle_signers() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2506,175 +2592,43 @@ fn empty_sortition() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(20); + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, send_amt + send_fee)], |config| { - // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.tenure_idle_timeout = idle_timeout; }, |_| {}, None, None, ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(20); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - TEST_BROADCAST_STALL.lock().unwrap().replace(true); - - info!("------------------------- Test Mine Regular Tenure A -------------------------"); - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - // Mine a regular tenure - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, - ) - .unwrap(); - - info!("------------------------- Test Mine Empty Tenure B -------------------------"); - info!("Pausing stacks block mining to trigger an empty sortition."); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - // Start new Tenure B - // In the next block, the miner should win the tenure - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, - ) - .unwrap(); - - info!("Pausing stacks block proposal to force an empty tenure"); - TEST_BROADCAST_STALL.lock().unwrap().replace(true); - - info!("Pausing commit op to prevent tenure C from starting..."); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .set(true); - - let blocks_after = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - assert_eq!(blocks_after, blocks_before); - - let rejected_before = signer_test - .running_nodes - .nakamoto_blocks_rejected - .load(Ordering::SeqCst); - - // submit a tx so that the miner will mine an extra block - let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); - - std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); - - TEST_BROADCAST_STALL.lock().unwrap().replace(false); - - info!("------------------------- Test Delayed Block is Rejected -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle(); - let mut stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); + info!("---- Waiting for a tenure extend ----"); - // The miner's proposed block should get rejected by all the signers - let mut found_rejections = Vec::new(); - wait_for(short_timeout.as_secs(), || { - for slot_id in signer_slot_ids.iter() { - if found_rejections.contains(slot_id) { - continue; - } - let mut latest_msgs = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &[*slot_id] - ).expect("Failed to get message from stackerdb"); - assert!(latest_msgs.len() <= 1); - let Some(latest_msg) = latest_msgs.pop() else { - info!("No message yet from slot #{slot_id}, will wait to try again"); - continue; - }; - if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - reason_code, - metadata, - .. - })) = latest_msg - { - assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); - assert_eq!(metadata.server_version, VERSION_STRING.to_string()); - found_rejections.push(*slot_id); - } else { - info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); - } - } - let rejections = signer_test - .running_nodes - .nakamoto_blocks_rejected - .load(Ordering::SeqCst); + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); - // wait until we've found rejections for all the signers, and the miner has confirmed that - // the signers have rejected the block - Ok(found_rejections.len() == signer_slot_ids.len() && rejections > rejected_before) - }).unwrap(); signer_test.shutdown(); } #[test] #[ignore] -/// This test checks the behavior of signers when an empty sortition arrives -/// before the first block of the previous tenure has been approved. -/// Specifically: -/// - The empty sortition will trigger the miner to attempt a tenure extend. -/// - Signers will accept the tenure extend and sign subsequent blocks built -/// off the old sortition -fn empty_sortition_before_approval() { +/// This test verifies that a miner will produce a TenureExtend transaction after the miner's idle timeout +/// even if they do not see the signers' tenure extend timestamp responses. +fn tenure_extend_after_idle_miner() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2690,164 +2644,158 @@ fn empty_sortition_before_approval() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(20); + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = idle_timeout + Duration::from_secs(10); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, send_amt + send_fee)], |config| { - // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; }, - |_| {}, - None, + None, None, ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - next_block_and_process_new_stacks_block( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - &signer_test.running_nodes.coord_channel, - ) - .unwrap(); - - let info = get_chain_info(&signer_test.running_nodes.conf); - let burn_height_before = info.burn_block_height; - let stacks_height_before = info.stacks_tip_height; - - info!("Forcing miner to ignore signatures for next block"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); - - info!("Pausing block commits to trigger an empty sortition."); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - info!("------------------------- Test Mine Tenure A -------------------------"); - let proposed_before = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - // Mine a regular tenure and wait for a block proposal + info!("---- Start a new tenure but ignore block signatures so no timestamps are recorded ----"); + let tip_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + TEST_IGNORE_SIGNERS.set(true); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, - 60, + 30, || { - let proposed_count = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - Ok(proposed_count > proposed_before) + let tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + Ok(tip_height > tip_height_before) }, ) - .expect("Failed to mine tenure A and propose a block"); + .expect("Failed to mine the tenure change block"); - info!("------------------------- Test Mine Empty Tenure B -------------------------"); + // Now, wait for a block with a tenure change due to the new block + wait_for(30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::BlockFound, + )) + }) + .expect("Timed out waiting for a block with a tenure change"); - // Trigger an empty tenure - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; - Ok(burn_height == burn_height_before + 2) - }, - ) - .expect("Failed to mine empty tenure"); + info!("---- Waiting for a tenure extend ----"); - info!("Unpause block commits"); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(false); + TEST_IGNORE_SIGNERS.set(false); + // Now, wait for a block with a tenure extend + wait_for(miner_idle_timeout.as_secs() + 20, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + signer_test.shutdown(); +} - info!("Stop ignoring signers and wait for the tip to advance"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); +#[test] +#[ignore] +/// This test verifies that a miner that attempts to produce a tenure extend too early will be rejected by the signers, +/// but will eventually succeed after the signers' idle timeout has passed. +fn tenure_extend_succeeds_after_rejected_attempt() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - wait_for(60, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.stacks_tip_height > stacks_height_before) - }) - .expect("Failed to advance chain tip"); + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); - let info = get_chain_info(&signer_test.running_nodes.conf); - info!("Current state: {:?}", info); + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let miner_idle_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_timeout = miner_idle_timeout; + }, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - // Wait for a block with a tenure extend to be mined - wait_for(60, || { - let blocks = test_observer::get_blocks(); - let last_block = blocks.last().unwrap(); - info!("Last block mined: {:?}", last_block); - for tx in last_block["transactions"].as_array().unwrap() { - let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::TenureChange(payload) = &parsed.payload { - match payload.cause { - TenureChangeCause::Extended => { - info!("Found tenure extend block"); - return Ok(true); + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a rejected tenure extend ----"); + // Now, wait for a block with a tenure extend proposal from the miner, but ensure it is rejected. + wait_for(30, || { + let block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockProposal(proposal) = message { + if proposal.block.get_tenure_tx_payload().unwrap().cause + == TenureChangeCause::Extended + { + return Some(proposal.block); } - TenureChangeCause::BlockFound => {} } - }; - } - Ok(false) + None + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejected)) = message { + if block.header.signer_signature_hash() == rejected.signer_signature_hash { + return Some(rejected.signature); + } + } + None + }) + .collect::>(); + Ok(signatures.len() >= num_signers * 7 / 10) }) - .expect("Timed out waiting for tenure extend"); - - let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; - - // submit a tx so that the miner will mine an extra block - let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + .expect("Test timed out while waiting for a rejected tenure extend"); - wait_for(60, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.stacks_tip_height > stacks_height_before) + info!("---- Waiting for an accepted tenure extend ----"); + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) }) - .expect("Failed to advance chain tip with STX transfer"); - - next_block_and_process_new_stacks_block( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - &signer_test.running_nodes.coord_channel, - ) - .expect("Failed to mine a normal tenure after the tenure extend"); - + .expect("Test timed out while waiting for an accepted tenure extend"); signer_test.shutdown(); } #[test] #[ignore] -/// This test checks the behavior of signers when an empty sortition arrives -/// before the first block of the previous tenure has been proposed. -/// Specifically: -/// - The empty sortition will trigger the miner to attempt a tenure extend. -/// - Signers will accept the tenure extend and sign subsequent blocks built -/// off the old sortition -fn empty_sortition_before_proposal() { +/// Verify that Nakamoto blocks that don't modify the tenure's execution cost +/// don't modify the idle timeout. +fn stx_transfers_dont_effect_idle_timeout() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2863,143 +2811,118 @@ fn empty_sortition_before_proposal() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + let num_txs = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(20); + let idle_timeout = Duration::from_secs(60); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, send_amt + send_fee)], + vec![(sender_addr, (send_amt + send_fee) * num_txs)], |config| { - // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; + config.tenure_idle_timeout = idle_timeout; }, |_| {}, None, None, ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - next_block_and_process_new_stacks_block( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - &signer_test.running_nodes.coord_channel, - ) - .unwrap(); + // Add a delay to the block validation process + TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(5); - let info = get_chain_info(&signer_test.running_nodes.conf); - let stacks_height_before = info.stacks_tip_height; + let info_before = signer_test.get_peer_info(); + let blocks_before = signer_test.running_nodes.nakamoto_blocks_mined.get(); + info!("---- Nakamoto booted, starting test ----"; + "info_height" => info_before.stacks_tip_height, + "blocks_before" => blocks_before, + ); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - info!("Pause block commits to ensure we get an empty sortition"); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + info!("---- Getting current idle timeout ----"); - info!("Pause miner so it doesn't propose a block before the next tenure arrives"); - TEST_MINE_STALL.lock().unwrap().replace(true); + let reward_cycle = signer_test.get_current_reward_cycle(); - let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); - info!("------------------------- Test Mine Tenure A and B -------------------------"); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(2); + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; - wait_for(60, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.burn_block_height == burn_height_before + 2) - }) - .expect("Failed to advance chain tip"); + let last_block_hash = get_last_block_hash(); - // Sleep a bit more to ensure the signers see both burn blocks - sleep_ms(5_000); + let slot_id = 0_u32; - info!("Unpause miner"); - TEST_MINE_STALL.lock().unwrap().replace(false); + let initial_acceptance = signer_test.get_latest_block_acceptance(slot_id); + assert_eq!(initial_acceptance.signer_signature_hash, last_block_hash); - info!("Unpause block commits"); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(false); + info!( + "---- Last idle timeout: {} ----", + initial_acceptance.response_data.tenure_extend_timestamp + ); - wait_for(60, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.stacks_tip_height > stacks_height_before) - }) - .expect("Failed to advance chain tip"); + // Now, mine a few nakamoto blocks with just transfers - let info = get_chain_info(&signer_test.running_nodes.conf); - info!("Current state: {:?}", info); + let mut sender_nonce = 0; - // Wait for a block with a tenure extend to be mined - wait_for(60, || { - let blocks = test_observer::get_blocks(); - let last_block = blocks.last().unwrap(); - info!("Last block mined: {:?}", last_block); - for tx in last_block["transactions"].as_array().unwrap() { - let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::TenureChange(payload) = &parsed.payload { - match payload.cause { - TenureChangeCause::Extended => { - info!("Found tenure extend block"); - return Ok(true); - } - TenureChangeCause::BlockFound => {} - } - }; - } - Ok(false) - }) - .expect("Timed out waiting for tenure extend"); + // Note that this response was BEFORE the block was globally accepted. it will report a guestimated idle time + let initial_acceptance = initial_acceptance; + let mut first_global_acceptance = None; + for i in 0..num_txs { + info!("---- Mining interim block {} ----", i + 1); + signer_test.wait_for_nakamoto_block(30, || { + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + }); - let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + let latest_acceptance = signer_test.get_latest_block_acceptance(slot_id); + let last_block_hash = get_last_block_hash(); - // submit a tx so that the miner will mine an extra block - let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); - wait_for(60, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.stacks_tip_height > stacks_height_before) - }) - .expect("Failed to advance chain tip with STX transfer"); + if first_global_acceptance.is_none() { + assert!(latest_acceptance.response_data.tenure_extend_timestamp < initial_acceptance.response_data.tenure_extend_timestamp, "First global acceptance should be less than initial guesstimated acceptance as its based on block proposal time rather than epoch time at time of response."); + first_global_acceptance = Some(latest_acceptance); + } else { + // Because the block only contains transfers, the idle timeout should not have changed between blocks post the tenure change + assert_eq!( + latest_acceptance.response_data.tenure_extend_timestamp, + first_global_acceptance + .as_ref() + .map(|acceptance| acceptance.response_data.tenure_extend_timestamp) + .unwrap() + ); + }; + } - next_block_and_process_new_stacks_block( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - &signer_test.running_nodes.coord_channel, - ) - .expect("Failed to mine a normal tenure after the tenure extend"); + info!("---- Waiting for a tenure extend ----"); signer_test.shutdown(); } #[test] #[ignore] -/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. -fn mock_sign_epoch_25() { +/// Verify that a tenure extend will occur after an idle timeout +/// while actively mining. +fn idle_tenure_extend_active_mining() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3013,329 +2936,441 @@ fn mock_sign_epoch_25() { let num_signers = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); + let deployer_sk = Secp256k1PrivateKey::new(); + let deployer_addr = tests::to_addr(&deployer_sk); let send_amt = 100; let send_fee = 180; - + let num_txs = 5; + let num_naka_blocks = 5; + let tenure_count = 2; + let tx_fee = 10000; + let deploy_fee = 190200; + let amount = + deploy_fee + tx_fee * num_txs * tenure_count * num_naka_blocks * 100 + 100 * tenure_count; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(60); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, send_amt + send_fee)], - |_| {}, - |node_config| { - node_config.miner.pre_nakamoto_mock_signing = true; - let epochs = node_config.burnchain.epochs.as_mut().unwrap(); - epochs[StacksEpochId::Epoch25].end_height = 251; - epochs[StacksEpochId::Epoch30].start_height = 251; + vec![(sender_addr, amount), (deployer_addr, amount)], + |config| { + config.tenure_idle_timeout = idle_timeout; }, + |_| {}, None, None, ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + let mut sender_nonces: HashMap = HashMap::new(); + + let get_and_increment_nonce = + |sender_sk: &Secp256k1PrivateKey, sender_nonces: &mut HashMap| { + let nonce = sender_nonces.get(&sender_sk.to_hex()).unwrap_or(&0); + let result = *nonce; + sender_nonces.insert(sender_sk.to_hex(), result + 1); + result + }; - let epochs = signer_test - .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpochId::Epoch30]; - let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + signer_test.boot_to_epoch_3(); - signer_test.boot_to_epoch_25_reward_cycle(); + // Add a delay to the block validation process + TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(3); - info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - // Mine until epoch 3.0 and ensure that no more mock signatures are received - let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); - assert_eq!(signer_slot_ids.len(), num_signers); + info!("---- Getting current idle timeout ----"); - let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; - // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition - let main_poll_time = Instant::now(); - // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. - while signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height() - < epoch_3_boundary - { - let mut mock_block_mesage = None; - let mock_poll_time = Instant::now(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); - while mock_block_mesage.is_none() { - std::thread::sleep(Duration::from_millis(100)); - let chunks = test_observer::get_stackerdb_chunks(); - for chunk in chunks - .into_iter() - .filter_map(|chunk| { - if chunk.contract_id != miners_stackerdb_contract { - return None; + let last_block_hash = get_last_block_hash(); + + let slot_id = 0_u32; + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let log_idle_diff = |timestamp: u64| { + let now = get_epoch_time_secs(); + let diff = timestamp.saturating_sub(now); + info!("----- Idle diff: {diff} seconds -----"); + }; + + let initial_response = signer_test.get_latest_block_response(slot_id); + assert_eq!( + initial_response.get_signer_signature_hash(), + last_block_hash + ); + + info!( + "---- Last idle timeout: {} ----", + initial_response.get_tenure_extend_timestamp() + ); + + // Deploy a contract that will be called a lot + + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + (0..250) + .map(|_| format!("(var-get my-var)")) + .collect::>() + .join(" ") + ); + + // First, lets deploy the contract + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + + info!("----- Submitted deploy txs, mining BTC block -----"); + + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + let mut last_response = signer_test.get_latest_block_response(slot_id); + + // Make multiple tenures that get extended through idle timeouts + for t in 1..=tenure_count { + info!("----- Mining tenure {t} -----"); + log_idle_diff(last_response.get_tenure_extend_timestamp()); + // Now, start a tenure with contract calls + for i in 1..=num_naka_blocks { + // Just in case these Nakamoto blocks pass the idle timeout (probably because CI is slow), exit early + if i != 1 && last_block_contains_tenure_change_tx(TenureChangeCause::Extended) { + info!("---- Tenure extended before mining {i} nakamoto blocks -----"); + break; + } + info!("----- Mining nakamoto block {i} in tenure {t} -----"); + + signer_test.wait_for_nakamoto_block(30, || { + // Throw in a STX transfer to test mixed blocks + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + for _ in 0..num_txs { + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + // Fill up the mempool with contract calls + let contract_tx = make_contract_call( + &deployer_sk, + deployer_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + match submit_tx_fallible(&http_origin, &contract_tx) { + Ok(_txid) => {} + Err(_e) => { + // If we fail to submit a tx, we need to make sure we don't + // increment the nonce for this sender, so we don't end up + // skipping a tx. + sender_nonces.insert(deployer_sk.to_hex(), deployer_nonce); + } } - Some(chunk.modified_slots) - }) - .flatten() - { - if chunk.data.is_empty() { - continue; - } - let SignerMessage::MockBlock(mock_block) = - SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage") - else { - continue; - }; - if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height - { - mock_block - .mock_signatures - .iter() - .for_each(|mock_signature| { - assert!(signer_public_keys.iter().any(|signer| { - mock_signature - .verify( - &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) - .unwrap(), - ) - .expect("Failed to verify mock signature") - })); - }); - mock_block_mesage = Some(mock_block); - break; } - } - assert!( - mock_poll_time.elapsed() <= Duration::from_secs(15), - "Failed to find mock miner message within timeout" + }); + let latest_response = signer_test.get_latest_block_response(slot_id); + let naka_blocks = test_observer::get_mined_nakamoto_blocks(); + info!( + "----- Latest tenure extend timestamp: {} -----", + latest_response.get_tenure_extend_timestamp() ); + log_idle_diff(latest_response.get_tenure_extend_timestamp()); + info!( + "----- Latest block transaction events: {} -----", + naka_blocks.last().unwrap().tx_events.len() + ); + assert_eq!( + latest_response.get_signer_signature_hash(), + get_last_block_hash(), + "Expected the latest block response to be for the latest block" + ); + assert_ne!( + last_response.get_tenure_extend_timestamp(), + latest_response.get_tenure_extend_timestamp(), + "Tenure extend timestamp should change with each block" + ); + last_response = latest_response; } - assert!( - main_poll_time.elapsed() <= Duration::from_secs(45), - "Timed out waiting to advance epoch 3.0 boundary" + + let current_time = get_epoch_time_secs(); + let extend_diff = last_response + .get_tenure_extend_timestamp() + .saturating_sub(current_time); + + info!( + "----- After mining {num_naka_blocks} nakamoto blocks in tenure {t}, waiting for TenureExtend -----"; + "tenure_extend_timestamp" => last_response.get_tenure_extend_timestamp(), + "extend_diff" => extend_diff, + "current_time" => current_time, ); + + // Now, wait for the idle timeout to trigger + wait_for(extend_diff + 30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Expected a tenure extend after idle timeout"); + + last_response = signer_test.get_latest_block_response(slot_id); + + info!("----- Tenure {t} extended -----"); + log_idle_diff(last_response.get_tenure_extend_timestamp()); + } + + // After the last extend, mine a few more naka blocks + for i in 1..=num_naka_blocks { + // Just in case these Nakamoto blocks pass the idle timeout (probably because CI is slow), exit early + if i != 1 && last_block_contains_tenure_change_tx(TenureChangeCause::Extended) { + info!("---- Tenure extended before mining {i} nakamoto blocks -----"); + break; + } + info!("----- Mining nakamoto block {i} after last tenure extend -----"); + + signer_test.wait_for_nakamoto_block(30, || { + // Throw in a STX transfer to test mixed blocks + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + }); } + + info!("------------------------- Test Shutdown -------------------------"); + signer_test.shutdown(); } #[test] #[ignore] -fn multiple_miners_mock_sign_epoch_25() { +/// This test checks the behaviour of signers when a sortition is empty. Specifically: +/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - The miner will stop trying to mine once it sees a threshold of signers reject the block +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition +fn empty_sortition() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - - let btc_miner_1_seed = vec![1, 1, 1, 1]; - let btc_miner_2_seed = vec![2, 2, 2, 2]; - let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); - let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - - let node_1_rpc = gen_random_port(); - let node_1_p2p = gen_random_port(); - let node_2_rpc = gen_random_port(); - let node_2_p2p = gen_random_port(); - - let localhost = "127.0.0.1"; - let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); - let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); - let mut node_2_listeners = Vec::new(); - - // partition the signer set so that ~half are listening and using node 1 for RPC and events, - // and the rest are using node 2 - + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, send_amt + send_fee)], - |signer_config| { - let node_host = if signer_config.endpoint.port() % 2 == 0 { - &node_1_rpc_bind - } else { - &node_2_rpc_bind - }; - signer_config.node_host = node_host.to_string(); - }, |config| { - config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); - config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); - config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); - config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - - config.node.seed = btc_miner_1_seed.clone(); - config.node.local_peer_seed = btc_miner_1_seed.clone(); - config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); - config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); - config.miner.pre_nakamoto_mock_signing = true; - let epochs = config.burnchain.epochs.as_mut().unwrap(); - epochs[StacksEpochId::Epoch25].end_height = 251; - epochs[StacksEpochId::Epoch30].start_height = 251; - config.events_observers.retain(|listener| { - let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { - warn!( - "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", - listener.endpoint - ); - return true; - }; - if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { - return true; - } - node_2_listeners.push(listener.clone()); - false - }) + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; }, - Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + |_| {}, + None, None, ); - let conf = signer_test.running_nodes.conf.clone(); - let mut conf_node_2 = conf.clone(); - let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); - conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); - conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); - conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); - conf_node_2.node.seed = btc_miner_2_seed.clone(); - conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); - conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); - conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); - conf_node_2.node.miner = true; - conf_node_2.events_observers.clear(); - conf_node_2.events_observers.extend(node_2_listeners); - assert!(!conf_node_2.events_observers.is_empty()); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); - let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); - let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + signer_test.boot_to_epoch_3(); - conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); - conf_node_2.node.set_bootstrap_nodes( - format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), - conf.burnchain.chain_id, - conf.burnchain.peer_version, - ); + info!("------------------------- Test Mine Regular Tenure A -------------------------"); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Mine a regular tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); - let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); - let _run_loop_2_thread = thread::Builder::new() - .name("run_loop_2".into()) - .spawn(move || run_loop_2.start(None, 0)) - .unwrap(); + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + info!("Pausing stacks block mining to trigger an empty sortition."); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Start new Tenure B + // In the next block, the miner should win the tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); - let epochs = signer_test + info!("Pausing stacks block proposal to force an empty tenure"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("Pausing commit op to prevent tenure C from starting..."); + signer_test .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpochId::Epoch30]; - let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + .nakamoto_test_skip_commit_op + .set(true); - signer_test.boot_to_epoch_25_reward_cycle(); + let blocks_after = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before); - info!("------------------------- Reached Epoch 2.5 Reward Cycle-------------------------"); + let rejected_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); - // Mine until epoch 3.0 and ensure that no more mock signatures are received + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + info!("------------------------- Test Delayed Block is Rejected -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let signer_slot_ids: Vec<_> = signer_test .get_signer_indices(reward_cycle) .iter() .map(|id| id.0) .collect(); - let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); - let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); - - // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. - while signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height() - < epoch_3_boundary - { - let mut mock_block_mesage = None; - let mock_poll_time = Instant::now(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); - while mock_block_mesage.is_none() { - std::thread::sleep(Duration::from_millis(100)); - let chunks = test_observer::get_stackerdb_chunks(); - for chunk in chunks - .into_iter() - .filter_map(|chunk| { - if chunk.contract_id != miners_stackerdb_contract { - return None; - } - Some(chunk.modified_slots) - }) - .flatten() + // The miner's proposed block should get rejected by all the signers + let mut found_rejections = Vec::new(); + wait_for(short_timeout.as_secs(), || { + for slot_id in signer_slot_ids.iter() { + if found_rejections.contains(slot_id) { + continue; + } + let mut latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[*slot_id] + ).expect("Failed to get message from stackerdb"); + assert!(latest_msgs.len() <= 1); + let Some(latest_msg) = latest_msgs.pop() else { + info!("No message yet from slot #{slot_id}, will wait to try again"); + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code, + metadata, + .. + })) = latest_msg { - if chunk.data.is_empty() { - continue; - } - let SignerMessage::MockBlock(mock_block) = - SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage") - else { - continue; - }; - if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height - { - mock_block - .mock_signatures - .iter() - .for_each(|mock_signature| { - assert!(signer_public_keys.iter().any(|signer| { - mock_signature - .verify( - &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) - .unwrap(), - ) - .expect("Failed to verify mock signature") - })); - }); - mock_block_mesage = Some(mock_block); - break; - } + assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + assert_eq!(metadata.server_version, VERSION_STRING.to_string()); + found_rejections.push(*slot_id); + } else { + info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); } - assert!( - mock_poll_time.elapsed() <= Duration::from_secs(15), - "Failed to find mock miner message within timeout" - ); } - } + let rejections = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + + // wait until we've found rejections for all the signers, and the miner has confirmed that + // the signers have rejected the block + Ok(found_rejections.len() == signer_slot_ids.len() && rejections > rejected_before) + }).unwrap(); + signer_test.shutdown(); } #[test] #[ignore] -/// This test asserts that signer set rollover works as expected. -/// Specifically, if a new set of signers are registered for an upcoming reward cycle, -/// old signers shut down operation and the new signers take over with the commencement of -/// the next reward cycle. -fn signer_set_rollover() { +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been approved. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_approval() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() .with(fmt::layer()) .with(EnvFilter::from_default_env()) @@ -3343,131 +3378,113 @@ fn signer_set_rollover() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let new_num_signers = 4; - - let new_signer_private_keys: Vec<_> = (0..new_num_signers) - .map(|_| StacksPrivateKey::new()) - .collect(); - let new_signer_public_keys: Vec<_> = new_signer_private_keys - .iter() - .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) - .collect(); - let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let mut initial_balances = new_signer_addresses - .iter() - .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) - .collect::>(); + signer_test.boot_to_epoch_3(); - initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); - let run_stamp = rand::random(); + let info = get_chain_info(&signer_test.running_nodes.conf); + let burn_height_before = info.burn_block_height; + let stacks_height_before = info.stacks_tip_height; - let rpc_port = 51024; - let rpc_bind = format!("127.0.0.1:{rpc_port}"); + info!("Forcing miner to ignore signatures for next block"); + TEST_IGNORE_SIGNERS.set(true); - // Setup the new signers that will take over - let new_signer_configs = build_signer_config_tomls( - &new_signer_private_keys, - &rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", - run_stamp, - 3000 + num_signers, - Some(100_000), - None, - Some(9000 + num_signers), - None, - ); + info!("Pausing block commits to trigger an empty sortition."); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); - let new_spawned_signers: Vec<_> = new_signer_configs - .iter() - .map(|conf| { - info!("spawning signer"); - let signer_config = SignerConfig::load_from_str(conf).unwrap(); - SpawnedSigner::new(signer_config) - }) - .collect(); + info!("------------------------- Test Mine Tenure A -------------------------"); + let proposed_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + // Mine a regular tenure and wait for a block proposal + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let proposed_count = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + Ok(proposed_count > proposed_before) + }, + ) + .expect("Failed to mine tenure A and propose a block"); - // Boot with some initial signer set - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - initial_balances, - |_| {}, - |naka_conf| { - for toml in new_signer_configs.clone() { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); - info!( - "---- Adding signer endpoint to naka conf ({}) ----", - signer_config.endpoint - ); + info!("------------------------- Test Mine Empty Tenure B -------------------------"); - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("{}", signer_config.endpoint), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::BurnchainBlocks, - ], - timeout_ms: 1000, - }); - } - naka_conf.node.rpc_bind = rpc_bind.clone(); + // Trigger an empty tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height == burn_height_before + 2) }, - None, - None, - ); - assert_eq!( - new_spawned_signers[0].config.node_host, - signer_test.running_nodes.conf.node.rpc_bind - ); - // Only stack for one cycle so that the signer set changes - signer_test.num_stacking_cycles = 1_u64; + ) + .expect("Failed to mine empty tenure"); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(20); + info!("Unpause block commits"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); - // Verify that naka_conf has our new signer's event observers - for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - let endpoint = format!("{}", signer_config.endpoint); - assert!(signer_test - .running_nodes - .conf - .events_observers - .iter() - .any(|observer| observer.endpoint == endpoint)); - } + info!("Stop ignoring signers and wait for the tip to advance"); + TEST_IGNORE_SIGNERS.set(false); - // Advance to the first reward cycle, stacking to the old signers beforehand + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); - info!("---- Booting to epoch 3 -----"); - signer_test.boot_to_epoch_3(); + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); - // verify that the first reward cycle has the old signers in the reward set - let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_test_public_keys: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) - .collect(); + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for tenure extend"); - info!("---- Verifying that the current signers are the old signers ----"); - let current_signers = signer_test.get_reward_set_signers(reward_cycle); - assert_eq!(current_signers.len(), num_signers); - // Verify that the current signers are the same as the old signers - for signer in current_signers.iter() { - assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); - } + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; - info!("---- Mining a block to trigger the signer set -----"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = make_stacks_transfer( @@ -3479,124 +3496,154 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); - let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); - let block_sighash = mined_block.signer_signature_hash; - let signer_signatures = mined_block.signer_signature; - - // verify the mined_block signatures against the OLD signer set - for signature in signer_signatures.iter() { - let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) - .expect("FATAL: Failed to recover pubkey from block sighash"); - assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); - assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); - } - // advance to the next reward cycle, stacking to the new signers beforehand - let reward_cycle = signer_test.get_current_reward_cycle(); + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); - info!("---- Stacking new signers -----"); + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); - let burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); - for stacker_sk in new_signer_private_keys.iter() { - let pox_addr = PoxAddress::from_legacy( - AddressHashMode::SerializeP2PKH, - tests::to_addr(stacker_sk).bytes, - ); - let pox_addr_tuple: clarity::vm::Value = - pox_addr.clone().as_clarity_tuple().unwrap().into(); - let signature = make_pox_4_signer_key_signature( - &pox_addr, - stacker_sk, - reward_cycle.into(), - &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, - 1_u128, - u128::MAX, - 1, - ) - .unwrap() - .to_rsv(); + signer_test.shutdown(); +} - let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( - stacker_sk, - 0, - 1000, - signer_test.running_nodes.conf.burnchain.chain_id, - &StacksAddress::burn_address(false), - "pox-4", - "stack-stx", - &[ - clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), - pox_addr_tuple.clone(), - clarity::vm::Value::UInt(burn_block_height as u128), - clarity::vm::Value::UInt(1), - clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) - .unwrap(), - clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), - clarity::vm::Value::UInt(u128::MAX), - clarity::vm::Value::UInt(1), - ], - ); - submit_tx(&http_origin, &stacking_tx); +#[test] +#[ignore] +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been proposed. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_proposal() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let stacks_height_before = info.stacks_tip_height; + + info!("Pause block commits to ensure we get an empty sortition"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + info!("Pause miner so it doesn't propose a block before the next tenure arrives"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + + info!("------------------------- Test Mine Tenure A and B -------------------------"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(2); + wait_for(60, || { - Ok(accounts_to_check - .iter() - .all(|acct| get_account(&http_origin, acct).nonce >= 1)) + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.burn_block_height == burn_height_before + 2) }) - .expect("Timed out waiting for stacking txs to be mined"); + .expect("Failed to advance chain tip"); - signer_test.mine_nakamoto_block(short_timeout); + // Sleep a bit more to ensure the signers see both burn blocks + sleep_ms(5_000); - let next_reward_cycle = reward_cycle.saturating_add(1); + info!("Unpause miner"); + TEST_MINE_STALL.lock().unwrap().replace(false); - let next_cycle_height = signer_test + info!("Unpause block commits"); + signer_test .running_nodes - .btc_regtest_controller - .get_burnchain() - .nakamoto_first_block_of_cycle(next_reward_cycle) - .saturating_add(1); + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); - info!("---- Mining to next reward set calculation -----"); - signer_test.run_until_burnchain_height_nakamoto( - Duration::from_secs(60), - next_cycle_height.saturating_sub(3), - new_num_signers, - ); + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); - // Verify that the new reward set is the new signers - let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); - for signer in reward_set.iter() { - assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); - } + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); - info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); - signer_test.run_until_burnchain_height_nakamoto( - Duration::from_secs(60), - next_cycle_height, - new_num_signers, - ); - let new_reward_cycle = signer_test.get_current_reward_cycle(); - assert_eq!(new_reward_cycle, reward_cycle.saturating_add(1)); + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + info!("Last block mined: {:?}", last_block); + for tx in last_block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { + TenureChangeCause::Extended => { + info!("Found tenure extend block"); + return Ok(true); + } + TenureChangeCause::BlockFound => {} + } + }; + } + Ok(false) + }) + .expect("Timed out waiting for tenure extend"); - info!("---- Verifying that the current signers are the new signers ----"); - let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); - assert_eq!(current_signers.len(), new_num_signers); - for signer in current_signers.iter() { - assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); - } + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; - info!("---- Mining a block to verify new signer set -----"); - let sender_nonce = 1; + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -3606,31 +3653,27 @@ fn signer_set_rollover() { send_amt, ); submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); - let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); - info!("---- Verifying that the new signers signed the block -----"); - let signer_signatures = mined_block.signer_signature; + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); - // verify the mined_block signatures against the NEW signer set - for signature in signer_signatures.iter() { - let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) - .expect("FATAL: Failed to recover pubkey from block sighash"); - assert!(!signer_test_public_keys.contains(&pk.to_bytes_compressed())); - assert!(new_signer_public_keys.contains(&pk.to_bytes_compressed())); - } + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); signer_test.shutdown(); - for signer in new_spawned_signers { - assert!(signer.stop().is_none()); - } } #[test] #[ignore] -/// This test checks that the miners and signers will not produce Nakamoto blocks -/// until the minimum time has passed between blocks. -fn min_gap_between_blocks() { +/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. +fn mock_sign_epoch_25() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3647,224 +3690,133 @@ fn min_gap_between_blocks() { let send_amt = 100; let send_fee = 180; - let interim_blocks = 5; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let time_between_blocks_ms = 10_000; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, (send_amt + send_fee) * interim_blocks)], - |_config| {}, - |config| { - config.miner.min_time_between_blocks_ms = time_between_blocks_ms; + vec![(sender_addr, send_amt + send_fee)], + |_| {}, + |node_config| { + node_config.miner.pre_nakamoto_mock_signing = true; + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + epochs[StacksEpochId::Epoch25].end_height = 251; + epochs[StacksEpochId::Epoch30].start_height = 251; + epochs[StacksEpochId::Epoch30].end_height = 265; + epochs[StacksEpochId::Epoch31].start_height = 265; }, None, None, ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpochId::Epoch30]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary - signer_test.boot_to_epoch_3(); + signer_test.boot_to_epoch_25_reward_cycle(); - info!("Ensure that the first Nakamoto block was mined"); - let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - assert_eq!(blocks.len(), 1); - // mine the interim blocks - info!("Mining interim blocks"); - for interim_block_ix in 0..interim_blocks { - let blocks_processed_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - interim_block_ix, // same as the sender nonce - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); - info!("Submitted transfer tx and waiting for block to be processed"); - wait_for(60, || { - let blocks_processed = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) - .unwrap(); - info!("Mined interim block:{interim_block_ix}"); - } + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); + assert_eq!(signer_slot_ids.len(), num_signers); - wait_for(60, || { - let new_blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - Ok(new_blocks.len() == blocks.len() + interim_blocks as usize) - }) - .unwrap(); + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); - // Verify that every Nakamoto block is mined after the gap is exceeded between each - let mut blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - blocks.sort_by(|a, b| a.stacks_block_height.cmp(&b.stacks_block_height)); - for i in 1..blocks.len() { - let block = &blocks[i]; - let parent_block = &blocks[i - 1]; - assert_eq!( - block.stacks_block_height, - parent_block.stacks_block_height + 1 - ); - info!( - "Checking that the time between blocks {} and {} is respected", - parent_block.stacks_block_height, block.stacks_block_height - ); - let block_time = block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - let parent_block_time = parent_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - assert!( - block_time > parent_block_time, - "Block time is BEFORE parent block time" - ); + // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition + let main_poll_time = Instant::now(); + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_boundary + { + let mut mock_block_mesage = None; + let mock_poll_time = Instant::now(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + while mock_block_mesage.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() + { + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockBlock(mock_block) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); + break; + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); + } assert!( - Duration::from_secs(block_time - parent_block_time) - >= Duration::from_millis(time_between_blocks_ms), - "Block mined before gap was exceeded: {block_time}s - {parent_block_time}s > {time_between_blocks_ms}ms", + main_poll_time.elapsed() <= Duration::from_secs(145), + "Timed out waiting to advance epoch 3.0 boundary" ); } - debug!("Shutting down min_gap_between_blocks test"); - signer_test.shutdown(); } #[test] #[ignore] -/// Test scenario where there are duplicate signers with the same private key -/// First submitted signature should take precedence -fn duplicate_signers() { +fn multiple_miners_mock_sign_epoch_25() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - // Disable p2p broadcast of the nakamoto blocks, so that we rely - // on the signer's using StackerDB to get pushed blocks - *nakamoto_node::miner::TEST_SKIP_P2P_BROADCAST - .lock() - .unwrap() = Some(true); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let mut signer_stacks_private_keys = (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>(); - - // First two signers have same private key - signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; - let unique_signers = num_signers - 1; - let duplicate_pubkey = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[0]); - let duplicate_pubkey_from_copy = - Secp256k1PublicKey::from_private(&signer_stacks_private_keys[1]); - assert_eq!( - duplicate_pubkey, duplicate_pubkey_from_copy, - "Recovered pubkeys don't match" - ); - - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![], - |_| {}, - |_| {}, - None, - Some(signer_stacks_private_keys), - ); - - signer_test.boot_to_epoch_3(); - let timeout = Duration::from_secs(30); - - info!("------------------------- Try mining one block -------------------------"); - - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); - - info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); - - let mut signer_accepted_responses = vec![]; - let start_polling = Instant::now(); - while start_polling.elapsed() <= timeout { - std::thread::sleep(Duration::from_secs(1)); - let messages = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()).ok() - }) - .filter_map(|message| match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {m:?}"); - Some(m) - } - _ => { - debug!("Message(ignored): {message:?}"); - None - } - }); - signer_accepted_responses.extend(messages); - } - - info!("------------------------- Assert there are {unique_signers} unique signatures and recovered pubkeys -------------------------"); - - // Pick a message hash - let accepted = signer_accepted_responses - .iter() - .min_by_key(|accepted| accepted.signer_signature_hash) - .expect("No `BlockResponse::Accepted` messages recieved"); - let selected_sighash = accepted.signer_signature_hash; - - // Filter only resonses for selected block and collect unique pubkeys and signatures - let (pubkeys, signatures): (HashSet<_>, HashSet<_>) = signer_accepted_responses - .into_iter() - .filter(|accepted| accepted.signer_signature_hash == selected_sighash) - .map(|accepted| { - let pubkey = Secp256k1PublicKey::recover_to_pubkey( - accepted.signer_signature_hash.bits(), - &accepted.signature, - ) - .expect("Failed to recover pubkey"); - (pubkey, accepted.signature) - }) - .unzip(); - - assert_eq!(pubkeys.len(), unique_signers); - assert_eq!(signatures.len(), unique_signers); - - signer_test.shutdown(); -} - -/// This test involves two miners, each mining tenures with 6 blocks each. Half -/// of the signers are attached to each miner, so the test also verifies that -/// the signers' messages successfully make their way to the active miner. -#[test] -#[ignore] -fn multiple_miners_with_nakamoto_blocks() { let num_signers = 5; - let max_nakamoto_tenures = 20; - let inter_blocks_per_tenure = 5; - - // setup sender + recipient for a test stx transfer let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 1000; + let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -3883,12 +3835,10 @@ fn multiple_miners_with_nakamoto_blocks() { // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![( - sender_addr, - (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, - )], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -3902,14 +3852,17 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); - + config.miner.pre_nakamoto_mock_signing = true; + let epochs = config.burnchain.epochs.as_mut().unwrap(); + epochs[StacksEpochId::Epoch25].end_height = 251; + epochs[StacksEpochId::Epoch30].start_height = 251; + epochs[StacksEpochId::Epoch30].end_height = 265; + epochs[StacksEpochId::Epoch31].start_height = 265; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { warn!( @@ -3928,10 +3881,9 @@ fn multiple_miners_with_nakamoto_blocks() { Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); - let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); @@ -3956,575 +3908,407 @@ fn multiple_miners_with_nakamoto_blocks() { conf.burnchain.peer_version, ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); - let run_loop_stopper_2 = run_loop_2.get_termination_switch(); - let rl2_coord_channels = run_loop_2.coordinator_channels(); - let Counters { - naka_submitted_commits: rl2_commits, - naka_mined_blocks: blocks_mined2, - .. - } = run_loop_2.counters(); - let run_loop_2_thread = thread::Builder::new() + let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); - signer_test.boot_to_epoch_3(); - - wait_for(120, || { - let Some(node_1_info) = get_chain_info_opt(&conf) else { - return Ok(false); - }; - let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { - return Ok(false); - }; - Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) - }) - .expect("Timed out waiting for follower to catch up to the miner"); - - let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - - info!("------------------------- Reached Epoch 3.0 -------------------------"); - - // due to the random nature of mining sortitions, the way this test is structured - // is that we keep track of how many tenures each miner produced, and once enough sortitions - // have been produced such that each miner has produced 3 tenures, we stop and check the - // results at the end - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); - - let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); - let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 1; - let mut miner_1_tenures = 0; - let mut miner_2_tenures = 0; - let mut sender_nonce = 0; - while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { - if btc_blocks_mined > max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } - let blocks_processed_before = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], - Duration::from_secs(30), - ); - btc_blocks_mined += 1; - - // wait for the new block to be processed - wait_for(60, || { - let blocks_processed = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() .unwrap(); + let epoch_3 = &epochs[StacksEpochId::Epoch30]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary - info!( - "Nakamoto blocks mined: {}", - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) - ); + signer_test.boot_to_epoch_25_reward_cycle(); - // mine the interim blocks - info!("Mining interim blocks"); - for interim_block_ix in 0..inter_blocks_per_tenure { - let blocks_processed_before = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx); + info!("------------------------- Reached Epoch 2.5 Reward Cycle-------------------------"); - wait_for(60, || { - let blocks_processed = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) - .unwrap(); - info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); - } + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); + assert_eq!(signer_slot_ids.len(), num_signers); - let blocks = get_nakamoto_headers(&conf); - let mut seen_burn_hashes = HashSet::new(); - miner_1_tenures = 0; - miner_2_tenures = 0; - for header in blocks.iter() { - if seen_burn_hashes.contains(&header.burn_header_hash) { - continue; - } - seen_burn_hashes.insert(header.burn_header_hash); + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); - let header = header.anchored_header.as_stacks_nakamoto().unwrap(); - if miner_1_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap() - { - miner_1_tenures += 1; - } - if miner_2_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap() + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_boundary + { + let mut mock_block_mesage = None; + let mock_poll_time = Instant::now(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + while mock_block_mesage.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() { - miner_2_tenures += 1; + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockBlock(mock_block) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); + break; + } } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); } - info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } - - info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) - ); - - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); - - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); - assert_eq!(peer_1_height, peer_2_height); - assert_eq!( - peer_1_height, - pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) - ); - assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); - rl2_coord_channels - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper_2.store(false, Ordering::SeqCst); - run_loop_2_thread.join().unwrap(); - signer_test.shutdown(); } -/// This test involves two miners, 1 and 2. During miner 1's first tenure, miner -/// 2 is forced to ignore one of the blocks in that tenure. The next time miner -/// 2 mines a block, it should attempt to fork the chain at that point. The test -/// verifies that the fork is not successful and that miner 1 is able to -/// continue mining after this fork attempt. #[test] #[ignore] -fn partial_tenure_fork() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } +/// This test asserts that signer set rollover works as expected. +/// Specifically, if a new set of signers are registered for an upcoming reward cycle, +/// old signers shut down operation and the new signers take over with the commencement of +/// the next reward cycle. +fn signer_set_rollover() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let max_nakamoto_tenures = 30; - let inter_blocks_per_tenure = 5; + let new_num_signers = 4; - // setup sender + recipient for a test stx transfer + let new_signer_private_keys: Vec<_> = (0..new_num_signers) + .map(|_| StacksPrivateKey::new()) + .collect(); + let new_signer_public_keys: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 1000; + let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let btc_miner_1_seed = vec![1, 1, 1, 1]; - let btc_miner_2_seed = vec![2, 2, 2, 2]; - let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); - let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + let mut initial_balances = new_signer_addresses + .iter() + .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) + .collect::>(); - let node_1_rpc = gen_random_port(); - let node_1_p2p = gen_random_port(); - let node_2_rpc = gen_random_port(); - let node_2_p2p = gen_random_port(); + initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); - let localhost = "127.0.0.1"; - let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let run_stamp = rand::random(); - // All signers are listening to node 1 - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![( - sender_addr, - (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, - )], - |signer_config| { - signer_config.node_host = node_1_rpc_bind.clone(); - }, - |config| { - config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); - config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); - config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); - config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 30; + let rpc_port = 51024; + let rpc_bind = format!("127.0.0.1:{rpc_port}"); - config.node.seed = btc_miner_1_seed.clone(); - config.node.local_peer_seed = btc_miner_1_seed.clone(); - config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); - config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + // Setup the new signers that will take over + let new_signer_configs = build_signer_config_tomls( + &new_signer_private_keys, + &rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", + run_stamp, + 3000 + num_signers, + Some(100_000), + None, + Some(9000 + num_signers), + None, + ); - // Increase the reward cycle length to avoid missing a prepare phase - // while we are intentionally forking. - config.burnchain.pox_reward_length = Some(40); - config.burnchain.pox_prepare_length = Some(10); + let new_spawned_signers: Vec<_> = new_signer_configs + .iter() + .map(|conf| { + info!("spawning signer"); + let signer_config = SignerConfig::load_from_str(conf).unwrap(); + SpawnedSigner::new(signer_config) + }) + .collect(); - // Move epoch 2.5 and 3.0 earlier, so we have more time for the - // test before re-stacking is required. - if let Some(epochs) = config.burnchain.epochs.as_mut() { - epochs[StacksEpochId::Epoch24].end_height = 131; - epochs[StacksEpochId::Epoch25].start_height = 131; - epochs[StacksEpochId::Epoch25].end_height = 166; - epochs[StacksEpochId::Epoch30].start_height = 166; - } else { - panic!("Expected epochs to be set"); + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |naka_conf| { + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + info!( + "---- Adding signer endpoint to naka conf ({}) ----", + signer_config.endpoint + ); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + timeout_ms: 1000, + }); } + naka_conf.node.rpc_bind = rpc_bind.clone(); }, - Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, None, ); - let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - - let conf = signer_test.running_nodes.conf.clone(); - let mut conf_node_2 = conf.clone(); - conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); - conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); - conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); - conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); - conf_node_2.node.seed = btc_miner_2_seed.clone(); - conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); - conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); - conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); - conf_node_2.node.miner = true; - conf_node_2.events_observers.clear(); - - let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); - let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - - conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); - - conf_node_2.node.set_bootstrap_nodes( - format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), - conf.burnchain.chain_id, - conf.burnchain.peer_version, + assert_eq!( + new_spawned_signers[0].config.node_host, + signer_test.running_nodes.conf.node.rpc_bind ); + // Only stack for one cycle so that the signer set changes + signer_test.num_stacking_cycles = 1_u64; - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); - let rl2_coord_channels = run_loop_2.coordinator_channels(); - let run_loop_stopper_2 = run_loop_2.get_termination_switch(); - let Counters { - naka_mined_blocks: blocks_mined2, - naka_proposed_blocks: blocks_proposed2, - .. - } = run_loop_2.counters(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); - signer_test.boot_to_epoch_3(); - let run_loop_2_thread = thread::Builder::new() - .name("run_loop_2".into()) - .spawn(move || run_loop_2.start(None, 0)) - .unwrap(); + // Verify that naka_conf has our new signer's event observers + for toml in &new_signer_configs { + let signer_config = SignerConfig::load_from_str(toml).unwrap(); + let endpoint = format!("{}", signer_config.endpoint); + assert!(signer_test + .running_nodes + .conf + .events_observers + .iter() + .any(|observer| observer.endpoint == endpoint)); + } - let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + // Advance to the first reward cycle, stacking to the old signers beforehand - wait_for(200, || { - let Some(node_1_info) = get_chain_info_opt(&conf) else { - return Ok(false); - }; - let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { - return Ok(false); - }; - Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) - }) - .expect("Timed out waiting for follower to catch up to the miner"); + info!("---- Booting to epoch 3 -----"); + signer_test.boot_to_epoch_3(); - info!("------------------------- Reached Epoch 3.0 -------------------------"); + // verify that the first reward cycle has the old signers in the reward set + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_test_public_keys: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); - // due to the random nature of mining sortitions, the way this test is structured - // is that we keep track of how many tenures each miner produced, and once enough sortitions - // have been produced such that each miner has produced 3 tenures, we stop and check the - // results at the end - let mut btc_blocks_mined = 0; - let mut miner_1_tenures = 0u64; - let mut miner_2_tenures = 0u64; - let mut fork_initiated = false; - let mut min_miner_1_tenures = u64::MAX; - let mut min_miner_2_tenures = u64::MAX; - let mut ignore_block = 0; + info!("---- Verifying that the current signers are the old signers ----"); + let current_signers = signer_test.get_reward_set_signers(reward_cycle); + assert_eq!(current_signers.len(), num_signers); + // Verify that the current signers are the same as the old signers + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } - let mut miner_1_blocks = 0; - let mut miner_2_blocks = 0; - let mut min_miner_2_blocks = 0; + info!("---- Mining a block to trigger the signer set -----"); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + signer_test.mine_nakamoto_block(short_timeout, true); + let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); + let block_sighash = mined_block.signer_signature_hash; + let signer_signatures = mined_block.signer_signature; - while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { - if btc_blocks_mined >= max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } + // verify the mined_block signatures against the OLD signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } - // Mine a block and wait for it to be processed, unless we are in a - // forked tenure, in which case, just wait for the block proposal - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_before_1 = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); + // advance to the next reward cycle, stacking to the new signers beforehand + let reward_cycle = signer_test.get_current_reward_cycle(); - sleep_ms(1000); + info!("---- Stacking new signers -----"); - info!( - "Next tenure checking"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, + let burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); + for stacker_sk in new_signer_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(stacker_sk).bytes, ); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2) - }, + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 1_u128, + u128::MAX, + 1, ) - .unwrap_or_else(|_| { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_1 = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - error!( - "Next tenure failed to tick"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - "mined_1" => mined_1, - "mined_2" => mined_2, - "proposed_1" => proposed_1, - "proposed_2" => proposed_2, - ); - panic!(); - }); - btc_blocks_mined += 1; - - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let miner = if mined_1 > mined_before_1 { 1 } else { 2 }; - - if miner == 1 && miner_1_tenures == 0 { - // Setup miner 2 to ignore a block in this tenure - ignore_block = pre_nakamoto_peer_1_height - + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) - + 3; - set_ignore_block(ignore_block, &conf_node_2.node.working_dir); + .unwrap() + .to_rsv(); - // Ensure that miner 2 runs at least one more tenure - min_miner_2_tenures = miner_2_tenures + 1; - fork_initiated = true; - min_miner_2_blocks = miner_2_blocks; - } - if miner == 2 && miner_2_tenures == min_miner_2_tenures { - // This is the forking tenure. Ensure that miner 1 runs one more - // tenure after this to validate that it continues to build off of - // the proper block. - min_miner_1_tenures = miner_1_tenures + 1; - } + let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + stacker_sk, + 0, + 1000, + signer_test.running_nodes.conf.burnchain.chain_id, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(burn_block_height as u128), + clarity::vm::Value::UInt(1), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } - let mut blocks = inter_blocks_per_tenure; - // mine (or attempt to mine) the interim blocks - for interim_block_ix in 0..inter_blocks_per_tenure { - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + wait_for(60, || { + Ok(accounts_to_check + .iter() + .all(|acct| get_account(&http_origin, acct).nonce >= 1)) + }) + .expect("Timed out waiting for stacking txs to be mined"); - info!( - "Mining interim blocks"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - ); + signer_test.mine_nakamoto_block(short_timeout, true); - // submit a tx so that the miner will mine an extra block - let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - // This may fail if the forking miner wins too many tenures and this account's - // nonces get too high (TooMuchChaining) - match submit_tx_fallible(&http_origin, &transfer_tx) { - Ok(_) => { - wait_for(60, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + let next_reward_cycle = reward_cycle.saturating_add(1); - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2) - }) - .unwrap_or_else(|_| { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_1 = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - error!( - "Next tenure failed to tick"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - "mined_1" => mined_1, - "mined_2" => mined_2, - "proposed_1" => proposed_1, - "proposed_2" => proposed_2, - ); - panic!(); - }); - } - Err(e) => { - if e.to_string().contains("TooMuchChaining") { - info!("TooMuchChaining error, skipping block"); - blocks = interim_block_ix; - break; - } else { - panic!("Failed to submit tx: {e}"); - } - } - } - info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); - } + let next_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .nakamoto_first_block_of_cycle(next_reward_cycle) + .saturating_add(1); - if miner == 1 { - miner_1_tenures += 1; - miner_1_blocks += blocks; - } else { - miner_2_tenures += 1; - miner_2_blocks += blocks; - } + info!("---- Mining to next reward set calculation -----"); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(3), + new_num_signers, + ); - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); + // Verify that the new reward set is the new signers + let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); + for signer in reward_set.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } - info!( - "Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}, Miner 1 before: {mined_before_1}, Miner 2 before: {mined_before_2}, Miner 1 blocks: {mined_1}, Miner 2 blocks: {mined_2}", - ); + info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height, + new_num_signers, + ); + let new_reward_cycle = signer_test.get_current_reward_cycle(); + assert_eq!(new_reward_cycle, reward_cycle.saturating_add(1)); - if miner == 1 { - assert_eq!(mined_1, mined_before_1 + blocks + 1); - } else if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + info!("---- Verifying that the current signers are the new signers ----"); + let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); + assert_eq!(current_signers.len(), new_num_signers); + for signer in current_signers.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } - info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) + info!("---- Mining a block to verify new signer set -----"); + let sender_nonce = 1; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, ); + submit_tx(&http_origin, &transfer_tx); + signer_test.mine_nakamoto_block(short_timeout, true); + let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); - - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - assert_eq!(peer_2_height, ignore_block - 1); - // The height may be higher than expected due to extra transactions waiting - // to be mined during the forking miner's tenure. - // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure - // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 - // before the fork was initiated - assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); - assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + info!("---- Verifying that the new signers signed the block -----"); + let signer_signatures = mined_block.signer_signature; - let sortdb = SortitionDB::open( - &conf_node_2.get_burn_db_file_path(), - false, - conf_node_2.get_burnchain().pox_constants, - ) - .unwrap(); + // verify the mined_block signatures against the NEW signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(!signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } - let (chainstate, _) = StacksChainState::open( - false, - conf_node_2.burnchain.chain_id, - &conf_node_2.get_chainstate_path_str(), - None, - ) - .unwrap(); - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - assert_eq!(tip.stacks_block_height, ignore_block - 1); - rl2_coord_channels - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper_2.store(false, Ordering::SeqCst); - run_loop_2_thread.join().unwrap(); signer_test.shutdown(); + for signer in new_spawned_signers { + assert!(signer.stop().is_none()); + } } #[test] #[ignore] -/// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt -/// by the miner essentially reorg their prior locally accepted/signed block, i.e. the globally rejected block overrides -/// their local view. -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by >30% of the signers. -/// The miner then attempts to mine N+1', and all signers accept the block. -/// -/// Test Assertion: -/// Stacks tip advances to N+1' -fn locally_accepted_blocks_overriden_by_global_rejection() { +/// This test checks that the miners and signers will not produce Nakamoto blocks +/// until the minimum time has passed between blocks. +fn min_gap_between_blocks() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4540,178 +4324,107 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 3; + + let interim_blocks = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let short_timeout_secs = 20; - let mut signer_test: SignerTest = SignerTest::new( + let time_between_blocks_ms = 10_000; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * interim_blocks)], + |_config| {}, + |config| { + config.miner.min_time_between_blocks_ms = time_between_blocks_ms; + }, + None, + None, ); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - signer_test.boot_to_epoch_3(); - - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - // submit a tx so that the miner will mine a stacks block - let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout_secs, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for stacks block N to be mined"); - sender_nonce += 1; - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); - signer_test - .wait_for_block_acceptance( - short_timeout_secs, - &block_n.signer_signature_hash, - &all_signers, - ) - .expect("Timed out waiting for block acceptance of N"); - - info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = all_signers - .iter() - .cloned() - .take(num_signers / 2 + num_signers % 2) - .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); - test_observer::clear(); - // Make a new stacks transaction to create a different block signature, but make sure to propose it - // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} to mine block N+1"); - - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected - signer_test - .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) - .expect("Timed out waiting for block rejection of N+1"); - assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!(info_before, info_after); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1 = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_1, block_n); + signer_test.boot_to_epoch_3(); - info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + info!("Ensure that the first Nakamoto block was mined"); + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + assert_eq!(blocks.len(), 1); + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..interim_blocks { + let blocks_processed_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + interim_block_ix, // same as the sender nonce + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} to mine block N+1'"); + info!("Submitted transfer tx and waiting for block to be processed"); + wait_for(60, || { + let blocks_processed = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!("Mined interim block:{interim_block_ix}"); + } - wait_for(short_timeout_secs, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height - && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + wait_for(60, || { + let new_blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + Ok(new_blocks.len() == blocks.len() + interim_blocks as usize) }) - .expect("Timed out waiting for stacks block N+1' to be mined"); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - assert_eq!(blocks_after, blocks_before + 1); + .unwrap(); - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!( - info_after.stacks_tip_height, - info_before.stacks_tip_height + 1 - ); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1_prime = nakamoto_blocks.last().unwrap(); - assert_eq!( - info_after.stacks_tip.to_string(), - block_n_1_prime.block_hash - ); - assert_ne!(block_n_1_prime, block_n_1); - // Verify that all signers accepted the new block proposal - signer_test - .wait_for_block_acceptance( - short_timeout_secs, - &block_n_1_prime.signer_signature_hash, - &all_signers, - ) - .expect("Timed out waiting for block acceptance of N+1'"); + // Verify that every Nakamoto block is mined after the gap is exceeded between each + let mut blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + blocks.sort_by(|a, b| a.stacks_block_height.cmp(&b.stacks_block_height)); + for i in 1..blocks.len() { + let block = &blocks[i]; + let parent_block = &blocks[i - 1]; + assert_eq!( + block.stacks_block_height, + parent_block.stacks_block_height + 1 + ); + info!( + "Checking that the time between blocks {} and {} is respected", + parent_block.stacks_block_height, block.stacks_block_height + ); + let block_time = block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let parent_block_time = parent_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + assert!( + block_time > parent_block_time, + "Block time is BEFORE parent block time" + ); + assert!( + Duration::from_secs(block_time - parent_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "Block mined before gap was exceeded: {block_time}s - {parent_block_time}s > {time_between_blocks_ms}ms", + ); + } + debug!("Shutting down min_gap_between_blocks test"); + signer_test.shutdown(); } #[test] #[ignore] -/// Test that signers that reject a block locally, but that was accepted globally will accept -/// a subsequent block built on top of the accepted block -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by <30% of the signers. -/// The miner then attempts to mine N+2, and all signers accept the block. -/// -/// Test Assertion: -/// Stacks tip advances to N+2 -fn locally_rejected_blocks_overriden_by_global_acceptance() { +/// Test scenario where there are duplicate signers with the same private key +/// First submitted signature should take precedence +fn duplicate_signers() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4721,450 +4434,3135 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .with(EnvFilter::from_default_env()) .init(); - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let nmb_txs = 3; + // Disable p2p broadcast of the nakamoto blocks, so that we rely + // on the signer's using StackerDB to get pushed blocks + *nakamoto_node::miner::TEST_SKIP_P2P_BROADCAST + .lock() + .unwrap() = Some(true); - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_stacks_private_keys = (0..num_signers) + .map(|_| StacksPrivateKey::new()) + .collect::>(); + + // First two signers have same private key + signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + let unique_signers = num_signers - 1; + let duplicate_pubkey = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[0]); + let duplicate_pubkey_from_copy = + Secp256k1PublicKey::from_private(&signer_stacks_private_keys[1]); + assert_eq!( + duplicate_pubkey, duplicate_pubkey_from_copy, + "Recovered pubkeys don't match" ); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |_| {}, + |_| {}, + None, + Some(signer_stacks_private_keys), + ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = 30; signer_test.boot_to_epoch_3(); + let timeout = Duration::from_secs(30); - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - - // submit a tx so that the miner will mine a stacks block N - let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N"); + info!("------------------------- Try mining one block -------------------------"); - wait_for(short_timeout, || { - Ok(signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .stacks_tip_height - > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for N to be mined and processed"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); + info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); - // Ensure that the block was accepted globally so the stacks tip has advanced to N - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + let mut signer_accepted_responses = vec![]; + let start_polling = Instant::now(); + while start_polling.elapsed() <= timeout { + std::thread::sleep(Duration::from_secs(1)); + let messages = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()).ok() + }) + .filter_map(|message| match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { + info!("Message(accepted): {m:?}"); + Some(m) + } + _ => { + debug!("Message(ignored): {message:?}"); + None + } + }); + signer_accepted_responses.extend(messages); + } - // Make sure that ALL signers accepted the block proposal - signer_test - .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) - .expect("Timed out waiting for block acceptance of N"); + info!("------------------------- Assert there are {unique_signers} unique signatures and recovered pubkeys -------------------------"); - info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted - let rejecting_signers: Vec<_> = all_signers + // Pick a message hash + let accepted = signer_accepted_responses .iter() - .cloned() - .take(num_signers * 3 / 10) - .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); - test_observer::clear(); + .min_by_key(|accepted| accepted.signer_signature_hash) + .expect("No `BlockResponse::Accepted` messages recieved"); + let selected_sighash = accepted.signer_signature_hash; - // submit a tx so that the miner will mine a stacks block N+1 - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); + // Filter only resonses for selected block and collect unique pubkeys and signatures + let (pubkeys, signatures): (HashSet<_>, HashSet<_>) = signer_accepted_responses + .into_iter() + .filter(|accepted| accepted.signer_signature_hash == selected_sighash) + .map(|accepted| { + let pubkey = Secp256k1PublicKey::recover_to_pubkey( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to recover pubkey"); + (pubkey, accepted.signature) + }) + .unzip(); - wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for stacks block N+1 to be mined"); + assert_eq!(pubkeys.len(), unique_signers); + assert_eq!(signatures.len(), unique_signers); - signer_test - .wait_for_block_rejections(short_timeout, &rejecting_signers) - .expect("Timed out waiting for block rejection of N+1"); + signer_test.shutdown(); +} - // Assert the block was mined - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_before + 1, mined_blocks.load(Ordering::SeqCst)); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); +/// This test involves two miners, each mining tenures with 6 blocks each. Half +/// of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. +#[test] +#[ignore] +fn multiple_miners_with_nakamoto_blocks() { + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; - // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1 = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); - assert_ne!(block_n_1, block_n); + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - signer_test - .wait_for_block_acceptance( - short_timeout, - &block_n_1.signer_signature_hash, - &all_signers[num_signers * 3 / 10 + 1..], - ) - .expect("Timed out waiting for block acceptance of N+1"); + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); - // Ensure that all signers accept the block proposal N+2 - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); - // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N+2"); - wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for stacks block N+2 to be mined"); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - assert_eq!(blocks_after, blocks_before + 1); + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!( - info_before.stacks_tip_height + 1, + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr, + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 1; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + let mut sender_nonce = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +/// This test involves two miners, 1 and 2. During miner 1's first tenure, miner +/// 2 is forced to ignore one of the blocks in that tenure. The next time miner +/// 2 mines a block, it should attempt to fork the chain at that point. The test +/// verifies that the fork is not successful and that miner 1 is able to +/// continue mining after this fork attempt. +#[test] +#[ignore] +fn partial_tenure_fork() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let max_nakamoto_tenures = 30; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + + // All signers are listening to node 1 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr, + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + signer_config.node_host = node_1_rpc_bind.clone(); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(0); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.miner.block_commit_delay = Duration::from_secs(0); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[StacksEpochId::Epoch24].end_height = 131; + epochs[StacksEpochId::Epoch25].start_height = 131; + epochs[StacksEpochId::Epoch25].end_height = 166; + epochs[StacksEpochId::Epoch30].start_height = 166; + } else { + panic!("Expected epochs to be set"); + } + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let Counters { + naka_mined_blocks: blocks_mined2, + naka_proposed_blocks: blocks_proposed2, + naka_submitted_commits: commits_2, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + signer_test.boot_to_epoch_3(); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + wait_for(200, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0u64; + let mut miner_2_tenures = 0u64; + let mut fork_initiated = false; + let mut min_miner_1_tenures = u64::MAX; + let mut min_miner_2_tenures = u64::MAX; + let mut ignore_block = 0; + + let mut miner_1_blocks = 0; + let mut miner_2_blocks = 0; + let mut min_miner_2_blocks = 0; + let mut last_sortition_winner: Option = None; + let mut miner_2_won_2_in_a_row = false; + + let commits_1 = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let sortdb = SortitionDB::open( + &conf.get_burn_db_file_path(), + false, + conf.get_burnchain().pox_constants, + ) + .unwrap(); + + info!("-------- Waiting miner 2 to catch up to miner 1 --------"); + + // Wait for miner 2 to catch up to miner 1 + wait_for(60, || { + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + Ok(info_1.stacks_tip_height == info_2.stacks_tip_height) + }) + .expect("Timed out waiting for miner 2 to catch up to miner 1"); + + info!("-------- Miner 2 caught up to miner 1 --------"); + + // Pause block commits + rl1_skip_commit_op.set(true); + rl2_skip_commit_op.set(true); + + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let commits_before_1 = commits_1.load(Ordering::SeqCst); + let commits_before_2 = commits_2.load(Ordering::SeqCst); + + // Mine the first block + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 180, + || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + + Ok(mined_1 > mined_before_1 || mined_2 > mined_before_2) + }, + ) + .expect("Timed out waiting for new Stacks block to be mined"); + + info!("-------- Mined first block, wait for block commits --------"); + + // Unpause block commits and wait for both miners' commits + rl1_skip_commit_op.set(false); + rl2_skip_commit_op.set(false); + + // Ensure that both block commits have been sent before continuing + wait_for(60, || { + let commits_after_1 = commits_1.load(Ordering::SeqCst); + let commits_after_2 = commits_2.load(Ordering::SeqCst); + Ok(commits_after_1 > commits_before_1 && commits_after_2 > commits_before_2) + }) + .expect("Timed out waiting for block commits"); + + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + + // Mine a block and wait for it to be processed, unless we are in a + // forked tenure, in which case, just wait for the block proposal + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_before_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + info!( + "Next tenure checking"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + ); + + // Pause block commits + rl1_skip_commit_op.set(true); + rl2_skip_commit_op.set(true); + + let tip_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let commits_before_1 = commits_1.load(Ordering::SeqCst); + let commits_before_2 = commits_2.load(Ordering::SeqCst); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }, + ) + .expect("Timed out waiting for tenure change Stacks block"); + btc_blocks_mined += 1; + + // Unpause block commits + info!("Unpausing block commits"); + rl1_skip_commit_op.set(false); + rl2_skip_commit_op.set(false); + + // Wait for the block to be processed and the block commits to be submitted + wait_for(60, || { + let tip_after = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // Ensure that both block commits have been sent before continuing + let commits_after_1 = commits_1.load(Ordering::SeqCst); + let commits_after_2 = commits_2.load(Ordering::SeqCst); + Ok(commits_after_1 > commits_before_1 + && commits_after_2 > commits_before_2 + && tip_after.consensus_hash != tip_before.consensus_hash) + }) + .expect("Sortition DB tip did not change"); + + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + info!("tip_after: {:?}", tip_sn); + let miner = match tip_sn.miner_pk_hash { + Some(pk_hash) => { + if pk_hash == mining_pkh_1 { + 1 + } else { + 2 + } + } + None => { + panic!("No sortition found"); + } + }; + info!("Next tenure mined by miner {miner}"); + + if let Some(last_sortition_winner) = last_sortition_winner { + if last_sortition_winner == miner && miner == 2 { + miner_2_won_2_in_a_row = true; + } else { + miner_2_won_2_in_a_row = false; + } + } + last_sortition_winner = Some(miner); + + if miner == 1 && miner_1_tenures == 0 { + // Setup miner 2 to ignore a block in this tenure + ignore_block = pre_nakamoto_peer_1_height + + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + + 3; + set_ignore_block(ignore_block, &conf_node_2.node.working_dir); + + // Ensure that miner 2 runs at least one more tenure + min_miner_2_tenures = miner_2_tenures + 1; + fork_initiated = true; + min_miner_2_blocks = miner_2_blocks; + } + if miner == 2 && miner_2_tenures == min_miner_2_tenures { + // This is the forking tenure. Ensure that miner 1 runs one more + // tenure after this to validate that it continues to build off of + // the proper block. + min_miner_1_tenures = miner_1_tenures + 1; + } + + let mut blocks = inter_blocks_per_tenure; + // mine (or attempt to mine) the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + + info!( + "Mining interim blocks"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + ); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + // This may fail if the forking miner wins too many tenures and this account's + // nonces get too high (TooMuchChaining) + match submit_tx_fallible(&http_origin, &transfer_tx) { + Ok(_) => { + wait_for(60, || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2 + // Special case where neither miner can mine a block: + || (fork_initiated && miner_2_won_2_in_a_row)) + }) + .expect("Timed out waiting for interim block to be mined"); + } + Err(e) => { + if e.to_string().contains("TooMuchChaining") { + info!("TooMuchChaining error, skipping block"); + blocks = interim_block_ix; + break; + } else { + panic!("Failed to submit tx: {e}"); + } + } + } + info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); + } + + if miner == 1 { + miner_1_tenures += 1; + miner_1_blocks += blocks; + } else { + miner_2_tenures += 1; + miner_2_blocks += blocks; + } + + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + + info!( + "Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}, Miner 1 before: {mined_before_1}, Miner 2 before: {mined_before_2}, Miner 1 blocks: {mined_1}, Miner 2 blocks: {mined_2}", + ); + + if miner == 1 { + assert_eq!(mined_1, mined_before_1 + blocks + 1); + } else if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + blocks + 1); + } else { + // Miner 2 should have mined 0 blocks after the fork + assert_eq!(mined_2, mined_before_2); + } + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + assert_eq!(peer_2_height, ignore_block - 1); + // The height may be higher than expected due to extra transactions waiting + // to be mined during the forking miner's tenure. + // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure + // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 + // before the fork was initiated + assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + + let sortdb = SortitionDB::open( + &conf_node_2.get_burn_db_file_path(), + false, + conf_node_2.get_burnchain().pox_constants, + ) + .unwrap(); + + let (chainstate, _) = StacksChainState::open( + false, + conf_node_2.burnchain.chain_id, + &conf_node_2.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(tip.stacks_block_height, ignore_block - 1); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt +/// by the miner essentially reorg their prior locally accepted/signed block, i.e. the globally rejected block overrides +/// their local view. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by >30% of the signers. +/// The miner then attempts to mine N+1', and all signers accept the block. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn locally_accepted_blocks_overriden_by_global_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let short_timeout_secs = 20; + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N to be mined"); + sender_nonce += 1; + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers / 2 + num_signers % 2) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + test_observer::clear(); + // Make a new stacks transaction to create a different block signature, but make sure to propose it + // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} to mine block N+1"); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected + signer_test + .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); + + assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!(info_before, info_after); + // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + + info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} to mine block N+1'"); + + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + }) + .expect("Timed out waiting for stacks block N+1' to be mined"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1 + ); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n_1); + // Verify that all signers accepted the new block proposal + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n_1_prime.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1'"); +} + +#[test] +#[ignore] +/// Test that signers that reject a block locally, but that was accepted globally will accept +/// a subsequent block built on top of the accepted block +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by <30% of the signers. +/// The miner then attempts to mine N+2, and all signers accept the block. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn locally_rejected_blocks_overriden_by_global_acceptance() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N+1 to be mined"); + + signer_test + .wait_for_block_rejections(short_timeout, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); + + // Assert the block was mined + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_before + 1, mined_blocks.load(Ordering::SeqCst)); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + assert_ne!(block_n_1, block_n); + + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_1.signer_signature_hash, + &all_signers[num_signers * 3 / 10 + 1..], + ) + .expect("Timed out waiting for block acceptance of N+1"); + + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+2"); + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N+2 to be mined"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_2 = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); - assert_ne!(block_n_2, block_n_1); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_2.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+2"); +} + +#[test] +#[ignore] +/// Test that signers that have accepted a locally signed block N+1 built in tenure A can sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn reorg_locally_accepted_blocks_across_tenures_succeeds() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, + ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 7 / 10) + .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); + // Clear the stackerdb chunks + test_observer::clear(); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + wait_for(short_timeout, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) + } + _ => None, + } + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" + ); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); +} + +#[test] +#[ignore] +/// Test that signers that have accepted a locally signed block N+1 built in tenure A cannot sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is not exceeded, i.e. a reorg cannot occur at a tenure boundary +/// before the specified timeout has been exceeded. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers reject as the timeout +/// has not been exceeded. +/// +/// Test Assertion: +/// Stacks tip remains at N. +fn reorg_locally_accepted_blocks_across_tenures_fails() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Do not alow any reorg attempts essentially + config.tenure_last_block_proposal_timeout = Duration::from_secs(100_000); + }, + |_| {}, + None, + None, + ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 7 / 10) + .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignoring_signers.clone()); + // Clear the stackerdb chunks + test_observer::clear(); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + wait_for(short_timeout, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) + } + _ => None, + } + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // Clear the test observer so any old rejections are not counted + test_observer::clear(); + + // Start a new tenure and ensure the we see the expected rejections + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let rejected_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signature, + signer_signature_hash, + .. + })) => non_ignoring_signers.iter().find(|key| { + key.verify(signer_signature_hash.bits(), &signature).is_ok() + }), + _ => None, + } + }) + .collect::>(); + Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) + }, + ) + .expect("FAIL: Timed out waiting for block proposal rejections"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after.stacks_tip, info_before.stacks_tip); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n_1_prime); + assert_ne!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); +} + +#[test] +#[ignore] +/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure +/// before it receives these signatures, the miner can recover in the following tenure. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. +/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The +/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner +/// proposes a new block N+2 which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + wait_for(30, || { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + + // a tenure has begun, so wait until we mine a block + wait_for(30, || { + let new_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && new_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being + // broadcasted to the miner so it can end its tenure before block confirmation obtained + // Clear the stackerdb chunks + info!("Forcing miner to ignore block responses for block N+1"); + TEST_IGNORE_SIGNERS.set(true); + info!("Delaying signer block N+1 broadcasting to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.set(true); + test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let mut block = None; + wait_for(30, || { + block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + if block.header.signer_signature_hash() == accepted.signer_signature_hash { + Some(accepted.signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() >= num_signers * 7 / 10) + }) + .expect("Test timed out while waiting for signers signatures for first block proposal"); + let block = block.unwrap(); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was not yet broadcasted to the miner so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_same = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_same, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_same.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + // Wait for the miner to propose a new invalid block N+1' + let mut rejected_block = None; + wait_for(30, || { + rejected_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash != block.header.consensus_hash { + assert!( + proposal.block.header.chain_length == block.header.chain_length + ); + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + Ok(rejected_block.is_some()) + }) + .expect("Timed out waiting for block proposal of N+1' block proposal"); + + info!("Allowing miner to accept block responses again. "); + TEST_IGNORE_SIGNERS.set(false); + info!("Allowing signers to broadcast block N+1 to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.set(false); + + // Assert the N+1' block was rejected + let rejected_block = rejected_block.unwrap(); + wait_for(30, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + if rejection.signer_signature_hash + == rejected_block.header.signer_signature_hash() + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() >= num_signers * 7 / 10) + }) + .expect("FAIL: Timed out waiting for block proposal rejections"); + + // Induce block N+2 to get mined + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+2"); + + info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); + wait_for(30, || { + // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing + // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + Ok(info_before.stacks_tip_height + 2 <= info.stacks_tip_height) + }) + .expect("Timed out waiting for blocks to be mined"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + assert_eq!( + info_before.stacks_tip_height + 2, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert!(nmb_signatures >= num_signers * 7 / 10); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n); +} + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure and proposes a block N with a TenureChangePayload +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B but its proposed blocks are rejected by the signers. +/// Mine 2 empty burn blocks (simulate fast blocks scenario) +/// Miner 2 proposes block N+1 with a TenureChangePayload +/// Signers accept and the stacks tip advances to N+1 +/// Miner 2 proposes block N+2 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+2 +/// Mine an empty burn block +/// Miner 2 proposes block N+3 with a TenureExtend +/// Signers accept and the chain advances to N+3 +/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Signers accept and the chain advances to N+4 +/// Asserts: +/// - Block N+1 contains the TenureChangePayload +/// - Block N+2 contains the TokenTransfer +/// - Block N+3 contains the TenureExtend +/// - Block N+4 contains the TenureChangePayload +/// - The stacks tip advances to N+4 +#[test] +#[ignore] +fn continue_after_fast_block_no_sortition() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 1; + let sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + // Some helper functions for verifying the blocks contain their expected transactions + let verify_last_block_contains_transfer_tx = || { + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + assert!( + matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), + "Expected token transfer transaction, got {parsed:?}" + ); + }; + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // Make all signers ignore block proposals + let ignoring_signers = all_signers.to_vec(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(ignoring_signers.clone()); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let rejections_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "rejections_before" => rejections_before, + ); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("----- Waiting for block rejections -----"); + let min_rejections = num_signers * 4 / 10; + // Wait until we have some block rejections + wait_for(30, || { + std::thread::sleep(Duration::from_secs(1)); + let chunks = test_observer::get_stackerdb_chunks(); + let rejections: Vec<_> = chunks + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return false; + }; + matches!( + message, + SignerMessage::BlockResponse(BlockResponse::Rejected(_)) + ) + }) + .collect(); + Ok(rejections.len() >= min_rejections) + }) + .expect("Timed out waiting for block rejections"); + + // Mine another couple burn blocks and ensure there is _no_ sortition + info!("------------------------- Mine Two Burn Block(s) with No Sortitions -------------------------"); + for _ in 0..2 { + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + let commits_before_2 = rl2_commits.load(Ordering::SeqCst); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); + assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); + assert_eq!( + blocks_mined1.load(Ordering::SeqCst), + blocks_processed_before_1 + ); + assert_eq!( + blocks_mined2.load(Ordering::SeqCst), + blocks_processed_before_2 + ); + + // assure we have NO sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(!tip.sortition); + } + + // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + assert_eq!(stacks_height, stacks_height_before); + let stacks_height_before = stacks_height; + + info!("------------------------- Enabling Signer Block Proposals -------------------------"; + "stacks_height" => stacks_height_before, + ); + + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + // Allow signers to respond to proposals again + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + + info!("------------------------- Wait for Miner B's Block N -------------------------"); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + + let nmb_old_blocks = test_observer::get_blocks().len(); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Verify Miner B's Block N+1 -------------------------"); + + verify_last_block_contains_transfer_tx(); + + info!("------------------------- Mine An Empty Sortition -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Unpause Miner A's Block Commits -------------------------"); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1) + }) + .unwrap(); + + info!("------------------------- Run Miner A's Tenure -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + let peer_info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 5); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that we can mine a tenure extend and then continue mining afterwards. +fn continue_after_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 5)]); + let timeout = Duration::from_secs(200); + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("------------------------- Mine Normal Tenure -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); + + info!("------------------------- Extend Tenure -------------------------"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + // It's possible that we have a pending block commit already. + // Mine two BTC blocks to "flush" this commit. + let burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + for i in 0..2 { + info!( + "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", + i + 1 + ); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + wait_for(60, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for tenure extend block"); + } + + wait_for(30, || { + let new_burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + Ok(new_burn_height == burn_height + 2) + }) + .expect("Timed out waiting for burnchain to advance"); + + // The last block should have a single instruction in it, the tenure extend + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => {} + _ => panic!("Expected tenure extend transaction, got {parsed:?}"), + }; + + // Verify that the miner can continue mining in the tenure with the tenure extend + info!("------------------------- Mine After Tenure Extend -------------------------"); + let mut blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + for sender_nonce in 0..5 { + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for block proposal"); + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + info!("Block {blocks_processed_before} processed, continuing"); + } + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle +/// This ensures there is no race condition in the /v2/pox endpoint which could prevent it from updating +/// on time, possibly triggering an "off by one" like behaviour in the 0th tenure. +/// +fn signing_in_0th_tenure_of_reward_cycle() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + let signer_public_keys = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let long_timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + let next_reward_cycle = curr_reward_cycle + 1; + // Mine until the boundary of the first full Nakamoto reward cycles (epoch 3 starts in the middle of one) + let next_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_sub(1); + + info!("------------------------- Advancing to {next_reward_cycle} Boundary at Block {next_reward_cycle_height_boundary} -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + long_timeout, + next_reward_cycle_height_boundary, + num_signers, + ); - // Make sure that ALL signers accepted the block proposal + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { + let url = &format!( + "{http_origin}/v3/signer/{pk}/{reward_cycle}", + pk = pubkey.to_hex() + ); + info!("Send request: GET {url}"); + reqwest::blocking::get(url) + .unwrap_or_else(|e| panic!("GET request failed: {e}")) + .json::() + .unwrap() + .blocks_signed + }; + + assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + + info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test - .wait_for_block_acceptance( - short_timeout, - &block_n_2.signer_signature_hash, - &all_signers, - ) - .expect("Timed out waiting for block acceptance of N+2"); + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst) + > blocks_before) + }) + .unwrap(); + + let block_mined = test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .clone(); + // Must ensure that the signers that signed the block have their blocks_signed updated appropriately + for signature in &block_mined.signer_signature { + let signer = signer_public_keys + .iter() + .find(|pk| { + pk.verify(block_mined.signer_signature_hash.as_bytes(), signature) + .unwrap() + }) + .expect("Unknown signer signature"); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); + assert_eq!(blocks_signed, 1); + } + assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); } -#[test] -#[ignore] -/// Test that signers that have accepted a locally signed block N+1 built in tenure A can sign a block proposed during a -/// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers -/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers accept. -/// -/// Test Assertion: -/// Stacks tip advances to N+1' -fn reorg_locally_accepted_blocks_across_tenures_succeeds() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } +/// This test involves two miners with a custom chain id, each mining tenures with 6 blocks each. +/// Half of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. +#[test] +#[ignore] +fn multiple_miners_with_custom_chain_id() { + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + let chain_id = 0x87654321; + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr, + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.chain_id = Some(chain_id) + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.chain_id = chain_id; + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + + assert!(!conf_node_2.events_observers.is_empty()); - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let nmb_txs = 2; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], - |config| { - // Just accept all reorg attempts - config.tenure_last_block_proposal_timeout = Duration::from_secs(0); - }, - |config| { - config.miner.block_commit_delay = Duration::from_secs(0); - }, - None, - None, + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, ); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = 30; + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + signer_test.boot_to_epoch_3(); - info!("------------------------- Starting Tenure A -------------------------"); - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 1; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) ); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for block to be mined and processed"); - // Ensure that the block was accepted globally so the stacks tip has advanced to N - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height + peer_1_height, + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); - info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = all_signers - .iter() - .cloned() - .take(num_signers * 7 / 10) - .collect(); - let non_ignoring_signers: Vec<_> = all_signers - .iter() - .cloned() - .skip(num_signers * 7 / 10) - .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS + // Verify both nodes have the correct chain id + let miner1_info = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(miner1_info.network_id, chain_id); + + let miner2_info = get_chain_info(&conf_node_2); + assert_eq!(miner2_info.network_id, chain_id); + + rl2_coord_channels .lock() - .unwrap() - .replace(ignoring_signers.clone()); - // Clear the stackerdb chunks - test_observer::clear(); + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); +#[test] +#[ignore] +/// This test checks the behavior of the `block_commit_delay_ms` configuration option. +fn block_commit_delay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = Duration::from_secs(600); + }, + |config| { + // Set the block commit delay to 10 minutes to ensure no block commit is sent + config.miner.block_commit_delay = Duration::from_secs(600); + }, + None, + None, ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to attempt to mine block N+1"); - wait_for(short_timeout, || { - let accepted_signers = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - non_ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, - } - }) - .collect::>(); - Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + signer_test.boot_to_epoch_3(); + + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine first block"); + + // Ensure that the block commit has been sent before continuing + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) }) - .expect("FAIL: Timed out waiting for block proposal acceptance"); + .expect("Timed out waiting for block commit after new Stacks block"); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after, info_before); - // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1 = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_1, block_n); - assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + // Prevent a block from being mined by making signers reject it. + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); - info!("------------------------- Starting Tenure B -------------------------"); - // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("------------------------- Test Mine Burn Block -------------------------"); + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // Mine a burn block and wait for it to be processed. next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height > burn_height_before) }, ) .unwrap(); - info!( - "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" - ); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for block to be mined and processed"); + // Sleep an extra minute to ensure no block commits are sent + sleep_ms(60_000); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + assert_eq!(commits, commits_before); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1_prime = nakamoto_blocks.last().unwrap(); - assert_eq!( - info_after.stacks_tip.to_string(), - block_n_1_prime.block_hash - ); - assert_ne!(block_n_1_prime, block_n); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); - // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure - signer_test - .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) - .expect("Timed out waiting for block acceptance of N+1'"); + info!("------------------------- Resume Signing -------------------------"); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + + // Wait for a block to be mined + wait_for(60, || { + let blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); + + // Wait for a block commit to be sent + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + + signer_test.shutdown(); } +// Ensures that a signer that successfully submits a block to the node for validation +// will issue ConnectivityIssues rejections if a block submission times out. +// Also ensures that no other proposal gets submitted for validation if we +// are already waiting for a block submission response. #[test] #[ignore] -/// Test that signers that have accepted a locally signed block N+1 built in tenure A cannot sign a block proposed during a -/// new tenure B built upon the last globally accepted block N if the timeout is not exceeded, i.e. a reorg cannot occur at a tenure boundary -/// before the specified timeout has been exceeded. -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers -/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers reject as the timeout -/// has not been exceeded. -/// -/// Test Assertion: -/// Stacks tip remains at N. -fn reorg_locally_accepted_blocks_across_tenures_fails() { +fn block_validation_response_timeout() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -5176,41 +7574,39 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; + let timeout = Duration::from_secs(30); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, send_amt + send_fee)], |config| { - // Do not alow any reorg attempts essentially - config.tenure_last_block_proposal_timeout = Duration::from_secs(100_000); + config.block_proposal_validation_timeout = timeout; }, |_| {}, None, None, ); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = 30; signer_test.boot_to_epoch_3(); - info!("------------------------- Starting Tenure A -------------------------"); - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let mut sender_nonce = 0; + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); + info!("------------------------- Test Block Validation Stalled -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + let validation_stall_start = Instant::now(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + // submit a tx so that the miner will attempt to mine an extra block + let sender_nonce = 0; let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -5219,273 +7615,341 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { &recipient, send_amt, ); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst) + > proposals_before) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for block proposal"); - // Ensure that the block was accepted globally so the stacks tip has advanced to N - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height + assert!( + validation_stall_start.elapsed() < timeout, + "Test was too slow to propose another block before the timeout" ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); - info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = all_signers - .iter() - .cloned() - .take(num_signers * 7 / 10) - .collect(); - let non_ignoring_signers: Vec<_> = all_signers - .iter() - .cloned() - .skip(num_signers * 7 / 10) - .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(ignoring_signers.clone()); - // Clear the stackerdb chunks - test_observer::clear(); + info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + tenure_last_block_proposal_timeout: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(100), + tenure_idle_timeout: Duration::from_secs(300), + }; + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + block.header.timestamp = get_epoch_time_secs(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = info_before.stacks_tip_height + 1; - info!("Submitted tx {tx} in to attempt to mine block N+1"); - wait_for(short_timeout, || { - let accepted_signers = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - non_ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, - } - }) - .collect::>(); - Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance"); + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block, timeout); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after, info_before); - // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1 = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_1, block_n); - assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + info!("------------------------- Waiting for Timeout -------------------------"); + // Sleep the necessary timeout to make sure the validation times out. + let elapsed = validation_stall_start.elapsed(); + let wait = timeout.saturating_sub(elapsed); + info!("Sleeping for {} ms", wait.as_millis()); + std::thread::sleep(timeout.saturating_sub(elapsed)); - info!("------------------------- Starting Tenure B -------------------------"); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); + info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); + // Verify that the signer that submits the block to the node will issue a ConnectivityIssues rejection + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + .. + })) = message + else { + continue; + }; + // We are waiting for the original block proposal which will have a diff signature to our + // second proposed block. + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "Received a rejection for the wrong block" + ); + if matches!(reason_code, RejectCode::ConnectivityIssues) { + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for block proposal rejections"); + // Make sure our chain has still not advanced + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(info_before, info_after); + let info_before = info_after; + info!("Unpausing block validation"); + // Disable the stall and wait for the block to be processed successfully + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be processed"); - // Clear the test observer so any old rejections are not counted - test_observer::clear(); + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, + ); + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + let info_before = info_after; + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); - // Start a new tenure and ensure the we see the expected rejections - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let rejected_signers = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - signature, - signer_signature_hash, - .. - })) => non_ignoring_signers.iter().find(|key| { - key.verify(signer_signature_hash.bits(), &signature).is_ok() - }), - _ => None, - } - }) - .collect::>(); - Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) - }, - ) - .expect("FAIL: Timed out waiting for block proposal rejections"); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .unwrap(); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after.stacks_tip, info_before.stacks_tip); - // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1' - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1_prime = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_1, block_n_1_prime); - assert_ne!( - info_after.stacks_tip.to_string(), - block_n_1_prime.block_hash + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, ); } #[test] -#[ignore] -/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure -/// before it receives these signatures, the miner can recover in the following tenure. -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. -/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The -/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner -/// proposes a new block N+2 which all signers accept. -/// -/// Test Assertion: -/// Stacks tip advances to N+2 -fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); +#[ignore] +/// Test that a miner will extend its tenure after the succeding miner fails to mine a block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 2 wins a tenure but fails to mine a block +/// - Miner 1 extends its tenure +fn tenure_extend_after_failed_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - info!("------------------------- Test Setup -------------------------"); let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 3; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let num_txs = 2; + let mut sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + signer_test.boot_to_epoch_3(); - info!("------------------------- Starting Tenure A -------------------------"); - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); - // wait until we get a sortition. - // we might miss a block-commit at the start of epoch 3 let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - Ok(tip.sortition) - }) - .expect("Timed out waiting for sortition"); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); + .expect("Failed to get peer info") + .stacks_tip_height; - // a tenure has begun, so wait until we mine a block - wait_for(30, || { - let new_height = signer_test + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && new_height > info_before.stacks_tip_height) + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("Timed out waiting for block to be mined and processed"); + .unwrap(); - sender_nonce += 1; - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + info!("------------------------- Miner 1 Mines Another Block -------------------------"); - info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being - // broadcasted to the miner so it can end its tenure before block confirmation obtained - // Clear the stackerdb chunks - info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); - info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); - test_observer::clear(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); + .expect("Failed to get peer info") + .stacks_tip_height; + // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -5494,148 +7958,100 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { &recipient, send_amt, ); + submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; - let tx = submit_tx(&http_origin, &transfer_tx); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); - info!("Submitted tx {tx} in to attempt to mine block N+1"); - let mut block = None; + // Unpause miner 2's block commits + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + // Ensure miner 2 submits a block commit before mining the bitcoin block wait_for(30, || { - block = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .find_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockProposal(proposal) => { - if proposal.block.header.consensus_hash - == info_before.stacks_tip_consensus_hash - { - Some(proposal.block) - } else { - None - } - } - _ => None, - } - }); - let Some(block) = &block else { - return Ok(false); - }; - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - if block.header.signer_signature_hash() == accepted.signer_signature_hash { - Some(accepted.signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) }) - .expect("Test timed out while waiting for signers signatures for first block proposal"); - let block = block.unwrap(); + .unwrap(); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - let info_after = signer_test + info!("------------------------- Miner 2 Wins Tenure B, Mines No Blocks -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after, info_before); - // Ensure that the block was not yet broadcasted to the miner so the stacks tip has NOT advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_same = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_same, block_n); - assert_ne!(info_after.stacks_tip.to_string(), block_n_same.block_hash); - - info!("------------------------- Starting Tenure B -------------------------"); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, + || Ok(get_burn_height() > burn_height_before), ) .unwrap(); - info!( - "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Wait for Block Proposal Timeout -------------------------"); + sleep_ms( + signer_test.signer_configs[0] + .block_proposal_timeout + .as_millis() as u64 + * 2, ); - // Wait for the miner to propose a new invalid block N+1' - let mut rejected_block = None; - wait_for(30, || { - rejected_block = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .find_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockProposal(proposal) => { - if proposal.block.header.consensus_hash != block.header.consensus_hash { - assert!( - proposal.block.header.chain_length == block.header.chain_length - ); - Some(proposal.block) - } else { - None - } - } - _ => None, - } - }); - Ok(rejected_block.is_some()) - }) - .expect("Timed out waiting for block proposal of N+1' block proposal"); - info!("Allowing miner to accept block responses again. "); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + info!("------------------------- Miner 1 Extends Tenure A -------------------------"); - // Assert the N+1' block was rejected - let rejected_block = rejected_block.unwrap(); - wait_for(30, || { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - if rejection.signer_signature_hash - == rejected_block.header.signer_signature_hash() - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(block_rejections.len() == num_signers) + // Re-enable block mining + TEST_MINE_STALL.lock().unwrap().replace(false); + + // wait for a tenure extend block from miner 1 to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("FAIL: Timed out waiting for block proposal rejections"); + .expect("Timed out waiting for tenure extend block to be mined and processed"); - // Induce block N+2 to get mined + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -5644,73 +8060,74 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { &recipient, send_amt, ); + submit_tx(&http_origin, &transfer_tx); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to attempt to mine block N+2"); - - info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); + // wait for the new block to be processed wait_for(30, || { - // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing - // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) - let info = signer_test + let stacks_height = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); - Ok(info_before.stacks_tip_height + 2 <= info.stacks_tip_height) + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(true); + + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) }) - .expect("Timed out waiting for blocks to be mined"); + .expect("Timed out waiting for block commit from miner 2"); - let info_after = signer_test + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); + .expect("Failed to get peer info") + .stacks_tip_height; - assert_eq!( - info_before.stacks_tip_height + 2, - info_after.stacks_tip_height - ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert!(nmb_signatures >= num_signers * 7 / 10); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_2 = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); - assert_ne!(block_n_2, block_n); + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); } -/// Test a scenario where: -/// Two miners boot to Nakamoto. -/// Miner 1 wins the first tenure and proposes a block N with a TenureChangePayload -/// Signers accept and the stacks tip advances to N -/// Miner 2 wins the second tenure B but its proposed blocks are rejected by the signers. -/// Mine 2 empty burn blocks (simulate fast blocks scenario) -/// Miner 2 proposes block N+1 with a TenureChangePayload -/// Signers accept and the stacks tip advances to N+1 -/// Miner 2 proposes block N+2 with a TokenTransfer -/// Signers accept and the stacks tip advances to N+2 -/// Mine an empty burn block -/// Miner 2 proposes block N+3 with a TenureExtend -/// Signers accept and the chain advances to N+3 -/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload -/// Signers accept and the chain advances to N+4 -/// Asserts: -/// - Block N+1 contains the TenureChangePayload -/// - Block N+2 contains the TokenTransfer -/// - Block N+3 contains the TenureExtend -/// - Block N+4 contains the TenureChangePayload -/// - The stacks tip advances to N+4 #[test] #[ignore] -fn continue_after_fast_block_no_sortition() { +/// Test that a miner will extend its tenure after the succeding miner commits to the wrong block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 1 wins another tenure and mines normally, but miner 2 does not see any blocks from this tenure +/// - Miner 2 wins a tenure and is unable to mine a block +/// - Miner 1 extends its tenure and mines an additional block +/// - Miner 2 wins the next tenure and mines normally +fn tenure_extend_after_bad_commit() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -5720,9 +8137,9 @@ fn continue_after_fast_block_no_sortition() { let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; - let send_fee = 180; - let num_txs = 1; - let sender_nonce = 0; + let send_fee = 180; + let num_txs = 2; + let mut sender_nonce = 0; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -5745,6 +8162,8 @@ fn continue_after_fast_block_no_sortition() { // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 + let first_proposal_burn_block_timing = Duration::from_secs(1); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * num_txs)], @@ -5755,6 +8174,8 @@ fn continue_after_fast_block_no_sortition() { &node_2_rpc_bind }; signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + signer_config.first_proposal_burn_block_timing = first_proposal_burn_block_timing; }, |config| { config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -5788,6 +8209,13 @@ fn continue_after_fast_block_no_sortition() { Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); @@ -5821,44 +8249,11 @@ fn continue_after_fast_block_no_sortition() { let Counters { naka_submitted_commits: rl2_commits, naka_skip_commit_op: rl2_skip_commit_op, - naka_mined_blocks: blocks_mined2, .. } = run_loop_2.counters(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - // Some helper functions for verifying the blocks contain their expected transactions - let verify_last_block_contains_tenure_change_tx = |cause: TenureChangeCause| { - let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, cause); - } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; - }; - - let verify_last_block_contains_transfer_tx = || { - let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - assert!( - matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), - "Expected token transfer transaction, got {parsed:?}" - ); - }; - info!("------------------------- Pause Miner 2's Block Commits -------------------------"); // Make sure Miner 2 cannot win a sortition at first. @@ -5898,28 +8293,23 @@ fn continue_after_fast_block_no_sortition() { let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); let get_burn_height = || { - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + let sort_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() - .block_height + .block_height; + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + min( + sort_height, + min(info_1.burn_block_height, info_2.burn_block_height), + ) }; - let starting_peer_height = get_chain_info(&conf).stacks_tip_height; - let starting_burn_height = get_burn_height(); - let mut btc_blocks_mined = 0; info!("------------------------- Pause Miner 1's Block Commit -------------------------"); // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .set(true); + rl1_skip_commit_op.set(true); - info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); let stacks_height_before = signer_test @@ -5932,7 +8322,6 @@ fn continue_after_fast_block_no_sortition() { .running_nodes .btc_regtest_controller .build_next_block(1); - btc_blocks_mined += 1; // assure we have a successful sortition that miner A won let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -5946,9 +8335,11 @@ fn continue_after_fast_block_no_sortition() { .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); Ok( blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before && test_observer::get_blocks().len() > nmb_old_blocks, ) }) @@ -5956,163 +8347,173 @@ fn continue_after_fast_block_no_sortition() { verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); let stacks_height_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - // Make all signers ignore block proposals - let ignoring_signers = all_signers.to_vec(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(ignoring_signers.clone()); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; - info!("------------------------- Submit Miner 2 Block Commit -------------------------"); - let rejections_before = signer_test - .running_nodes - .nakamoto_blocks_rejected - .load(Ordering::SeqCst); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); - let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); - // Unpause miner 2's block commits - rl2_skip_commit_op.set(false); + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); - // Ensure the miner 2 submits a block commit before mining the bitcoin block + // Unpause miner 1's block commits + let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + rl1_skip_commit_op.set(false); + + // Ensure miner 1 submits a block commit before mining the bitcoin block wait_for(30, || { - Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); - // Make miner 2 also fail to submit any FURTHER block commits - rl2_skip_commit_op.set(true); - - let burn_height_before = get_burn_height(); + rl1_skip_commit_op.set(true); - info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; - "burn_height_before" => burn_height_before, - "rejections_before" => rejections_before, - ); + info!("------------------------- Miner 1 Wins Tenure B -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || Ok(get_burn_height() > burn_height_before), ) .unwrap(); - btc_blocks_mined += 1; - // assure we have a successful sortition that miner B won + // assure we have a successful sortition that miner 1 won let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); - assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("----------------- Miner 2 Submits Block Commit Before Any Blocks ------------------"); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); - info!("----- Waiting for block rejections -----"); - let min_rejections = num_signers * 4 / 10; - // Wait until we have some block rejections wait_for(30, || { - std::thread::sleep(Duration::from_secs(1)); - let chunks = test_observer::get_stackerdb_chunks(); - let rejections: Vec<_> = chunks - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter(|chunk| { - let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - return false; - }; - matches!( - message, - SignerMessage::BlockResponse(BlockResponse::Rejected(_)) - ) - }) - .collect(); - Ok(rejections.len() >= min_rejections) + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) }) - .expect("Timed out waiting for block rejections"); + .expect("Timed out waiting for block commit from miner 2"); - // Mine another couple burn blocks and ensure there is _no_ sortition - info!("------------------------- Mine Two Burn Block(s) with No Sortitions -------------------------"); - for _ in 0..2 { - let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); - let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); - let burn_height_before = get_burn_height(); - let commits_before_1 = rl1_commits.load(Ordering::SeqCst); - let commits_before_2 = rl2_commits.load(Ordering::SeqCst); + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(get_burn_height() > burn_height_before), - ) - .unwrap(); - btc_blocks_mined += 1; + info!("----------------------------- Resume Block Production -----------------------------"); - assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); - assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); - assert_eq!( - blocks_mined1.load(Ordering::SeqCst), - blocks_processed_before_1 - ); - assert_eq!( - blocks_mined2.load(Ordering::SeqCst), - blocks_processed_before_2 - ); + TEST_MINE_STALL.lock().unwrap().replace(false); - // assure we have NO sortition - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - assert!(!tip.sortition); - } + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); - // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner - let stacks_height = signer_test + info!("--------------- Miner 2 Wins Tenure C With Old Block Commit ----------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - assert_eq!(stacks_height, stacks_height_before); - let stacks_height_before = stacks_height; + let burn_height_before = get_burn_height(); - info!("------------------------- Enabling Signer Block Proposals -------------------------"; - "stacks_height" => stacks_height_before, + // Sleep enough time to pass the first proposal burn block timing + let sleep_duration = first_proposal_burn_block_timing.saturating_add(Duration::from_secs(2)); + info!( + "Sleeping for {} seconds before issuing next burn block.", + sleep_duration.as_secs() ); + thread::sleep(sleep_duration); - let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); - let nmb_old_blocks = test_observer::get_blocks().len(); - // Allow signers to respond to proposals again - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + info!("--------------- Triggering new burn block for tenure C ---------------"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); - info!("------------------------- Wait for Miner B's Block N -------------------------"); - // wait for the new block to be processed - wait_for(30, || { + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Miner 1 Extends Tenure B -------------------------"); + + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); Ok( - blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before && test_observer::get_blocks().len() > nmb_old_blocks, ) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for tenure extend block to be mined and processed"); - info!( - "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" - ); - verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); - info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); - let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let stacks_height_before = signer_test .stacks_client .get_peer_info() @@ -6137,80 +8538,56 @@ fn continue_after_fast_block_no_sortition() { .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); Ok( - blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 - && stacks_height > stacks_height_before - && test_observer::get_blocks().len() > nmb_old_blocks, - ) - }) - .expect("Timed out waiting for block to be mined and processed"); - - info!("------------------------- Verify Miner B's Block N+1 -------------------------"); - - verify_last_block_contains_transfer_tx(); - - info!("------------------------- Mine An Empty Sortition -------------------------"); - let nmb_old_blocks = test_observer::get_blocks().len(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - Ok(get_burn_height() > burn_height_before - && test_observer::get_blocks().len() > nmb_old_blocks) - }, - ) - .unwrap(); - btc_blocks_mined += 1; + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); - info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); - verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); - info!("------------------------- Unpause Miner A's Block Commits -------------------------"); - let commits_before_1 = rl1_commits.load(Ordering::SeqCst); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .set(false); + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + // Wait for block commit from miner 2 wait_for(30, || { - Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1) + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) }) - .unwrap(); + .expect("Timed out waiting for block commit from miner 2"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; - info!("------------------------- Run Miner A's Tenure -------------------------"); - let nmb_old_blocks = test_observer::get_blocks().len(); - let burn_height_before = get_burn_height(); - let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - Ok(get_burn_height() > burn_height_before - && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 - && test_observer::get_blocks().len() > nmb_old_blocks) + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok(stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before) }, ) - .unwrap(); - btc_blocks_mined += 1; + .expect("Timed out waiting for final block to be mined and processed"); - // assure we have a successful sortition that miner A won + // assure we have a successful sortition that miner 2 won and it had a block found tenure change let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); - assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); - - info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - info!( - "------------------------- Confirm Burn and Stacks Block Heights -------------------------" - ); - let peer_info = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - - assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); - assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 5); - info!("------------------------- Shutdown -------------------------"); rl2_coord_channels .lock() @@ -6223,504 +8600,557 @@ fn continue_after_fast_block_no_sortition() { #[test] #[ignore] -/// Test that we can mine a tenure extend and then continue mining afterwards. -fn continue_after_tenure_extend() { +/// Test that a miner will extend its tenure after the succeding miner commits to the wrong block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 1 wins another tenure and mines normally, but miner 2 does not see any blocks from this tenure +/// - Miner 2 wins a tenure and is unable to mine a block +/// - Miner 1 extends its tenure and mines an additional block +/// - Miner 2 wins another tenure and is still unable to mine a block +/// - Miner 1 extends its tenure again and mines an additional block +/// - Miner 2 wins the next tenure and mines normally +fn tenure_extend_after_2_bad_commits() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = - SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 5)]); - let timeout = Duration::from_secs(200); - let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let num_txs = 2; + let mut sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + signer_test.boot_to_epoch_3(); - info!("------------------------- Mine Normal Tenure -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + let sort_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + min( + sort_height, + min(info_1.burn_block_height, info_2.burn_block_height), + ) + }; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - info!("------------------------- Extend Tenure -------------------------"); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .set(true); + info!("------------------------- Miner 1 Mines Another Block -------------------------"); - // It's possible that we have a pending block commit already. - // Mine two BTC blocks to "flush" this commit. - let burn_height = signer_test + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") - .burn_block_height; - for i in 0..2 { - info!( - "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", - i + 1 - ); - - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); + .stacks_tip_height; - wait_for(60, || { - let blocks_processed_after = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed_after > blocks_processed_before) - }) - .expect("Timed out waiting for tenure extend block"); - } + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + // wait for the new block to be processed wait_for(30, || { - let new_burn_height = signer_test + let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") - .burn_block_height; - Ok(new_burn_height == burn_height + 2) + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("Timed out waiting for burnchain to advance"); + .expect("Timed out waiting for block to be mined and processed"); - // The last block should have a single instruction in it, the tenure extend - let blocks = test_observer::get_blocks(); - let last_block = blocks.last().unwrap(); - let transactions = last_block["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) - if payload.cause == TenureChangeCause::Extended => {} - _ => panic!("Expected tenure extend transaction, got {parsed:?}"), - }; + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); - // Verify that the miner can continue mining in the tenure with the tenure extend - info!("------------------------- Mine After Tenure Extend -------------------------"); - let mut blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - for sender_nonce in 0..5 { - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + // Unpause miner 1's block commits + let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + rl1_skip_commit_op.set(false); - info!("Submitted transfer tx and waiting for block proposal"); - wait_for(30, || { - let blocks_processed_after = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed_after > blocks_processed_before) - }) - .expect("Timed out waiting for block proposal"); - blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - info!("Block {blocks_processed_before} processed, continuing"); - } + // Ensure miner 1 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) + }) + .unwrap(); - signer_test.shutdown(); -} + rl1_skip_commit_op.set(true); -#[test] -#[ignore] -/// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle -/// This ensures there is no race condition in the /v2/pox endpoint which could prevent it from updating -/// on time, possibly triggering an "off by one" like behaviour in the 0th tenure. -/// -fn signing_in_0th_tenure_of_reward_cycle() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } + info!("------------------------- Miner 1 Wins Tenure B -------------------------"); - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let signer_public_keys = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); - let long_timeout = Duration::from_secs(200); - signer_test.boot_to_epoch_3(); - let curr_reward_cycle = signer_test.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle + 1; - // Mine until the boundary of the first full Nakamoto reward cycles (epoch 3 starts in the middle of one) - let next_reward_cycle_height_boundary = signer_test - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle) - .saturating_sub(1); + // assure we have a successful sortition that miner 1 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); - info!("------------------------- Advancing to {next_reward_cycle} Boundary at Block {next_reward_cycle_height_boundary} -------------------------"); - signer_test.run_until_burnchain_height_nakamoto( - long_timeout, - next_reward_cycle_height_boundary, - num_signers, - ); + info!("----------------- Miner 2 Submits Block Commit Before Any Blocks ------------------"); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { - let url = &format!( - "{http_origin}/v3/signer/{pk}/{reward_cycle}", - pk = pubkey.to_hex() - ); - info!("Send request: GET {url}"); - reqwest::blocking::get(url) - .unwrap_or_else(|e| panic!("GET request failed: {e}")) - .json::() - .unwrap() - .blocks_signed - }; + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); - assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); - for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(signer, next_reward_cycle); - assert_eq!(blocks_signed, 0); - } + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); - info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); - for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(signer, next_reward_cycle); - assert_eq!(blocks_signed, 0); - } - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); + info!("----------------------------- Resume Block Production -----------------------------"); - wait_for(30, || { - Ok(signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst) - > blocks_before) + TEST_MINE_STALL.lock().unwrap().replace(false); + + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .unwrap(); + .expect("Timed out waiting for block to be mined and processed"); - let block_mined = test_observer::get_mined_nakamoto_blocks() - .last() - .unwrap() - .clone(); - // Must ensure that the signers that signed the block have their blocks_signed updated appropriately - for signature in &block_mined.signer_signature { - let signer = signer_public_keys - .iter() - .find(|pk| { - pk.verify(block_mined.signer_signature_hash.as_bytes(), signature) - .unwrap() - }) - .expect("Unknown signer signature"); - let blocks_signed = get_v3_signer(signer, next_reward_cycle); - assert_eq!(blocks_signed, 1); - } - assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); -} + info!("--------------- Miner 2 Wins Tenure C With Old Block Commit ----------------"); -/// This test involves two miners with a custom chain id, each mining tenures with 6 blocks each. -/// Half of the signers are attached to each miner, so the test also verifies that -/// the signers' messages successfully make their way to the active miner. -#[test] -#[ignore] -fn multiple_miners_with_custom_chain_id() { - let num_signers = 5; - let max_nakamoto_tenures = 20; - let inter_blocks_per_tenure = 5; + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); - // setup sender + recipient for a test stx transfer - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 1000; - let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + // Pause block production again so that we can make sure miner 2 commits + // to the wrong block again. + TEST_MINE_STALL.lock().unwrap().replace(true); - let btc_miner_1_seed = vec![1, 1, 1, 1]; - let btc_miner_2_seed = vec![2, 2, 2, 2]; - let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); - let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); - let node_1_rpc = gen_random_port(); - let node_1_p2p = gen_random_port(); - let node_2_rpc = gen_random_port(); - let node_2_p2p = gen_random_port(); + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); - let localhost = "127.0.0.1"; - let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); - let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); - let mut node_2_listeners = Vec::new(); - let chain_id = 0x87654321; - // partition the signer set so that ~half are listening and using node 1 for RPC and events, - // and the rest are using node 2 - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![( - sender_addr, - (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, - )], - |signer_config| { - let node_host = if signer_config.endpoint.port() % 2 == 0 { - &node_1_rpc_bind - } else { - &node_2_rpc_bind - }; - signer_config.node_host = node_host.to_string(); - signer_config.chain_id = Some(chain_id) - }, - |config| { - config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); - config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); - config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); - config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 30; - config.burnchain.chain_id = chain_id; + info!("---------- Miner 2 Submits Block Commit Before Any Blocks (again) ----------"); - config.node.seed = btc_miner_1_seed.clone(); - config.node.local_peer_seed = btc_miner_1_seed.clone(); - config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); - config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); - config.events_observers.retain(|listener| { - let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { - warn!( - "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", - listener.endpoint - ); - return true; - }; - if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { - return true; - } - node_2_listeners.push(listener.clone()); - false - }) - }, - Some(vec![btc_miner_1_pk, btc_miner_2_pk]), - None, - ); - let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); - let conf = signer_test.running_nodes.conf.clone(); - let mut conf_node_2 = conf.clone(); - conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); - conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); - conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); - conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); - conf_node_2.node.seed = btc_miner_2_seed.clone(); - conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); - conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); - conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); - conf_node_2.node.miner = true; - conf_node_2.events_observers.clear(); - conf_node_2.events_observers.extend(node_2_listeners); + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); - assert!(!conf_node_2.events_observers.is_empty()); + info!("------------------------- Miner 1 Extends Tenure B -------------------------"); - let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); - let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + TEST_MINE_STALL.lock().unwrap().replace(false); - conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); - conf_node_2.node.set_bootstrap_nodes( - format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), - conf.burnchain.chain_id, - conf.burnchain.peer_version, - ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + info!("------------------------- Miner 1 Mines Another Block -------------------------"); - let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); - let run_loop_stopper_2 = run_loop_2.get_termination_switch(); - let rl2_coord_channels = run_loop_2.coordinator_channels(); - let Counters { - naka_submitted_commits: rl2_commits, - naka_mined_blocks: blocks_mined2, - .. - } = run_loop_2.counters(); - let run_loop_2_thread = thread::Builder::new() - .name("run_loop_2".into()) - .spawn(move || run_loop_2.start(None, 0)) - .unwrap(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; - signer_test.boot_to_epoch_3(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); - wait_for(120, || { - let Some(node_1_info) = get_chain_info_opt(&conf) else { - return Ok(false); - }; - let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { - return Ok(false); - }; - Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("Timed out waiting for follower to catch up to the miner"); + .expect("Timed out waiting for block to be mined and processed"); - let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + info!("------------ Miner 2 Wins Tenure C With Old Block Commit (again) -----------"); - info!("------------------------- Reached Epoch 3.0 -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); - // due to the random nature of mining sortitions, the way this test is structured - // is that we keep track of how many tenures each miner produced, and once enough sortitions - // have been produced such that each miner has produced 3 tenures, we stop and check the - // results at the end - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); - let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); - let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 1; - let mut miner_1_tenures = 0; - let mut miner_2_tenures = 0; - let mut sender_nonce = 0; - while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { - if btc_blocks_mined > max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } - let blocks_processed_before = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], - Duration::from_secs(30), - ); - btc_blocks_mined += 1; + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); - // wait for the new block to be processed - wait_for(60, || { - let blocks_processed = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) - .unwrap(); + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); - info!( - "Nakamoto blocks mined: {}", - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) - ); + info!("---------------------- Miner 1 Extends Tenure B (again) ---------------------"); - // mine the interim blocks - info!("Mining interim blocks"); - for interim_block_ix in 0..inter_blocks_per_tenure { - let blocks_processed_before = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx); + TEST_MINE_STALL.lock().unwrap().replace(false); + + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); - wait_for(60, || { - let blocks_processed = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) - .unwrap(); - info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); - } + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); - let blocks = get_nakamoto_headers(&conf); - let mut seen_burn_hashes = HashSet::new(); - miner_1_tenures = 0; - miner_2_tenures = 0; - for header in blocks.iter() { - if seen_burn_hashes.contains(&header.burn_header_hash) { - continue; - } - seen_burn_hashes.insert(header.burn_header_hash); + info!("------------------------- Miner 1 Mines Another Block -------------------------"); - let header = header.anchored_header.as_stacks_nakamoto().unwrap(); - if miner_1_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap() - { - miner_1_tenures += 1; - } - if miner_2_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap() - { - miner_2_tenures += 1; - } - } - info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); - } + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; - info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, ); + submit_tx(&http_origin, &transfer_tx); - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); - assert_eq!(peer_1_height, peer_2_height); - assert_eq!( - peer_1_height, - pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) - ); - assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + info!("----------------------- Miner 2 Mines the Next Tenure -----------------------"); - // Verify both nodes have the correct chain id - let miner1_info = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!(miner1_info.network_id, chain_id); + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); - let miner2_info = get_chain_info(&conf_node_2); - assert_eq!(miner2_info.network_id, chain_id); + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); + + // assure we have a successful sortition that miner 2 won and it had a block found tenure change + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + info!("------------------------- Shutdown -------------------------"); rl2_coord_channels .lock() .expect("Mutex poisoned") @@ -6732,8 +9162,24 @@ fn multiple_miners_with_custom_chain_id() { #[test] #[ignore] -/// This test checks the behavior of the `block_commit_delay_ms` configuration option. -fn block_commit_delay() { +/// Test the block_proposal_max_age_secs signer configuration option. It should reject blocks that are +/// invalid but within the max age window, otherwise it should simply drop the block without further processing. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// +/// Test Execution: +/// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. +/// An invalid block proposal with a recent timestamp is forcibly written to the miner's slot to simulate the miner proposing a block. +/// The signers process the invalid block and broadcast a block response rejection to the respective .signers-XXX-YYY contract. +/// A second block proposal with an outdated timestamp is then submitted to the miner's slot to simulate the miner proposing a very old block. +/// The test confirms no further block rejection response is submitted to the .signers-XXX-YYY contract. +/// +/// Test Assertion: +/// - Each signer successfully rejects the recent invalid block proposal. +/// - No signer submits a block proposal response for the outdated block proposal. +/// - The stacks tip does not advance +fn block_proposal_max_age_rejections() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -6745,125 +9191,111 @@ fn block_commit_delay() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], |config| { - // make the duration long enough that the miner will be marked as malicious - config.block_proposal_timeout = block_proposal_timeout; - }, - |config| { - // Set the block commit delay to 10 minutes to ensure no block commit is sent - config.miner.block_commit_delay = Duration::from_secs(600); + config.block_proposal_max_age_secs = 30; }, + |_| {}, None, None, ); - signer_test.boot_to_epoch_3(); + let short_timeout = Duration::from_secs(30); - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - - next_block_and_process_new_stacks_block( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - &signer_test.running_nodes.coord_channel, - ) - .expect("Failed to mine first block"); - - // Ensure that the block commit has been sent before continuing - wait_for(60, || { - let commits = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits > commits_before) - }) - .expect("Timed out waiting for block commit after new Stacks block"); - - // Prevent a block from being mined by making signers reject it. - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(all_signers); - - info!("------------------------- Test Mine Burn Block -------------------------"); - let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - - // Mine a burn block and wait for it to be processed. - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; - Ok(burn_height > burn_height_before) - }, - ) - .unwrap(); - - // Sleep an extra minute to ensure no block commits are sent - sleep_ms(60_000); - - let commits = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - assert_eq!(commits, commits_before); - - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + info!("------------------------- Send Block Proposal To Signers -------------------------"); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + // First propose a stale block that is older than the block_proposal_max_age_secs + block.header.timestamp = get_epoch_time_secs().saturating_sub( + signer_test.signer_configs[0] + .block_proposal_max_age_secs + .saturating_add(1), + ); + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block.clone(), short_timeout); - info!("------------------------- Resume Signing -------------------------"); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + // Next propose a recent invalid block + block.header.timestamp = get_epoch_time_secs(); + let block_signer_signature_hash_2 = block.header.signer_signature_hash(); + signer_test.propose_block(block, short_timeout); - // Wait for a block to be mined - wait_for(60, || { - let blocks = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - Ok(blocks > blocks_before) + info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify the signers rejected only the SECOND block proposal. The first was not even processed. + wait_for(30, || { + let rejections: Vec<_> = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .map(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return None; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) => { + assert_eq!( + signer_signature_hash, block_signer_signature_hash_2, + "We should only reject the second block" + ); + Some(signature) + } + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) => { + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "We should never have accepted block" + ); + None + } + _ => None, + } + }) + .collect(); + Ok(rejections.len() > num_signers * 7 / 10) }) - .expect("Timed out waiting for block to be mined"); + .expect("Timed out waiting for block rejections"); - // Wait for a block commit to be sent - wait_for(60, || { - let commits = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits > commits_before) - }) - .expect("Timed out waiting for block commit after new Stacks block"); + info!("------------------------- Test Peer Info-------------------------"); + assert_eq!(info_before, get_chain_info(&signer_test.running_nodes.conf)); + info!("------------------------- Test Shutdown-------------------------"); signer_test.shutdown(); } -// Ensures that a signer that successfully submits a block to the node for validation -// will issue ConnectivityIssues rejections if a block submission times out. -// Also ensures that no other proposal gets submitted for validation if we -// are already waiting for a block submission response. #[test] #[ignore] -fn block_validation_response_timeout() { +/// Test that signers do not mark a block as globally accepted if it was not announced by the node. +/// This will simulate this case via testing flags, and ensure that a block can be reorged across tenure +/// boundaries now (as it is only marked locally accepted and no longer gets marked globally accepted +/// by simply seeing the threshold number of signatures). +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// 1. The node mines 1 stacks block N (all signers sign it). +/// 2. <30% of signers are configured to auto reject any block proposals, broadcast of new blocks are skipped, and miners are configured to ignore signers responses. +/// 3. The node mines 1 stacks block N+1 (all signers sign it, but one which rejects it) but eventually all mark the block as locally accepted. +/// 4. A new tenure starts and the miner attempts to mine a new sister block N+1' (as it does not see the threshold number of signatures or any block push from signers). +/// 5. The signers accept this sister block as a valid reorg and the node advances to block N+1'. +/// +/// Test Assertion: +/// - All signers accepted block N. +/// - Less than 30% of the signers rejected block N+1. +/// - All signers accept block N+1' as a valid reorg. +/// - The node advances to block N+1' +fn global_acceptance_depends_on_block_announcement() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -6875,39 +9307,46 @@ fn block_validation_response_timeout() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let timeout = Duration::from_secs(30); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let nmb_txs = 4; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, send_amt + send_fee)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], |config| { - config.block_proposal_validation_timeout = timeout; + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); }, - |_| {}, None, None, ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; signer_test.boot_to_epoch_3(); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); - info!("------------------------- Test Block Validation Stalled -------------------------"); - TEST_VALIDATE_STALL.lock().unwrap().replace(true); - let validation_stall_start = Instant::now(); - - let proposals_before = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); - // submit a tx so that the miner will attempt to mine an extra block - let sender_nonce = 0; + test_observer::clear(); + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -6916,113 +9355,176 @@ fn block_validation_response_timeout() { &recipient, send_amt, ); - submit_tx(&http_origin, &transfer_tx); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); - info!("Submitted transfer tx and waiting for block proposal"); - wait_for(30, || { + wait_for(short_timeout, || { Ok(signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst) - > proposals_before) + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block proposal"); + .expect("Timed out waiting for N to be mined and processed"); - assert!( - validation_stall_start.elapsed() < timeout, - "Test was too slow to propose another block before the timeout" + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height ); - info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); - let proposal_conf = ProposalEvalConfig { - first_proposal_burn_block_timing: Duration::from_secs(0), - tenure_last_block_proposal_timeout: Duration::from_secs(30), - block_proposal_timeout: Duration::from_secs(100), - }; - let mut block = NakamotoBlock { - header: NakamotoBlockHeader::empty(), - txs: vec![], - }; + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall - let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); - block.header.pox_treatment = BitVec::ones(1).unwrap(); - block.header.consensus_hash = view.cur_sortition.consensus_hash; - block.header.chain_length = info_before.stacks_tip_height + 1; + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); - let block_signer_signature_hash_1 = block.header.signer_signature_hash(); - signer_test.propose_block(block, timeout); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is accepted by the node, but not announced. + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(rejecting_signers.clone()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(true); + TEST_IGNORE_SIGNERS.set(true); + TEST_SKIP_BLOCK_BROADCAST.set(true); + test_observer::clear(); - info!("------------------------- Waiting for Timeout -------------------------"); - // Sleep the necessary timeout to make sure the validation times out. - let elapsed = validation_stall_start.elapsed(); - let wait = timeout.saturating_sub(elapsed); - info!("Sleeping for {} ms", wait.as_millis()); - std::thread::sleep(timeout.saturating_sub(elapsed)); + // submit a tx so that the miner will mine a stacks block N+1 + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+1"); - info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); - // Verify that the signer that submits the block to the node will issue a ConnectivityIssues rejection - wait_for(30, || { - let chunks = test_observer::get_stackerdb_chunks(); - for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { - let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - reason: _reason, - reason_code, - signer_signature_hash, - .. - })) = message - else { - continue; - }; - // We are waiting for the original block proposal which will have a diff signature to our - // second proposed block. - assert_ne!( - signer_signature_hash, block_signer_signature_hash_1, - "Received a rejection for the wrong block" - ); - if matches!(reason_code, RejectCode::ConnectivityIssues) { - return Ok(true); - } - } - Ok(false) - }) - .expect("Timed out waiting for block proposal rejections"); - // Make sure our chain has still not advanced - let info_after = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!(info_before, info_after); - let info_before = info_after; - info!("Unpausing block validation"); - // Disable the stall and wait for the block to be processed successfully - TEST_VALIDATE_STALL.lock().unwrap().replace(false); - wait_for(30, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for block to be processed"); + let mut proposed_block = None; + let start_time = Instant::now(); + while proposed_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + proposed_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let proposed_block = proposed_block.expect("Failed to find proposed block within 30s"); - let info_after = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!( - info_after.stacks_tip_height, - info_before.stacks_tip_height + 1, + // Even though one of the signers rejected the block, it will eventually accept the block as it sees the 70% threshold of signatures + signer_test + .wait_for_block_acceptance( + short_timeout, + &proposed_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1 by all signers"); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" ); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - let info_before = info_after; - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); - wait_for(30, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - Ok(info.stacks_tip_height > info_before.stacks_tip_height) - }) + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_IGNORE_SIGNERS.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + test_observer::clear(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }, + ) .unwrap(); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut sister_block = None; + let start_time = Instant::now(); + while sister_block.is_none() && start_time.elapsed() < Duration::from_secs(30) { + sister_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_after.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + let sister_block = sister_block.expect("Failed to find proposed sister block within 30s"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &sister_block.header.signer_signature_hash(), + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1' by all signers"); - let info_after = get_chain_info(&signer_test.running_nodes.conf); + // Assert the block was mined and the tip has changed. assert_eq!( info_after.stacks_tip_height, - info_before.stacks_tip_height + 1, + sister_block.header.chain_length + ); + assert_eq!(info_after.stacks_tip, sister_block.header.block_hash()); + assert_eq!( + info_after.stacks_tip_consensus_hash, + sister_block.header.consensus_hash + ); + assert_eq!( + sister_block.header.chain_length, + proposed_block.header.chain_length ); + assert_ne!(sister_block, proposed_block); } diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index c68b477b47..6212dd6fcc 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -18,6 +18,7 @@ use std::{env, thread}; use clarity::vm::types::QualifiedContractIdentifier; use stacks::chainstate::stacks::StacksPrivateKey; +use stacks::config::{EventKeyType, InitialBalance}; use stacks::libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -25,7 +26,6 @@ use {reqwest, serde_json}; use super::bitcoin_regtest::BitcoinCoreController; use crate::burnchains::BurnchainController; -use crate::config::{EventKeyType, InitialBalance}; use crate::tests::neon_integrations::{ neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, };