From cfd1a123a9c1da0114ca2fe5582a271a45655309 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 4 Dec 2023 16:21:33 -0600 Subject: [PATCH 01/16] feat: add nakamoto_node, nakamoto-neon mode * Refactor some of the reused structs from `neon_node` * Fix a logic-bug in `nakamoto::coordinator`: the first prepare phase information will be a Epoch2x block, so the reward set calculation has to handle that. * Add `nakamoto_node` module based on `neon_node` * Add simple integration test for `nakamoto_node` --- .../chainstate/nakamoto/coordinator/mod.rs | 40 +- stackslib/src/chainstate/nakamoto/miner.rs | 8 +- stackslib/src/chainstate/stacks/miner.rs | 17 +- .../burnchains/bitcoin_regtest_controller.rs | 9 +- testnet/stacks-node/src/config.rs | 17 +- testnet/stacks-node/src/globals.rs | 266 +++++ testnet/stacks-node/src/keychain.rs | 24 +- testnet/stacks-node/src/main.rs | 6 + testnet/stacks-node/src/mockamoto.rs | 8 +- testnet/stacks-node/src/nakamoto_node.rs | 683 +++++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 645 +++++++++++ testnet/stacks-node/src/nakamoto_node/peer.rs | 418 +++++++ .../stacks-node/src/nakamoto_node/relayer.rs | 961 +++++++++++++++ testnet/stacks-node/src/neon_node.rs | 266 +---- testnet/stacks-node/src/run_loop/mod.rs | 1 + testnet/stacks-node/src/run_loop/nakamoto.rs | 1029 +++++++++++++++++ testnet/stacks-node/src/run_loop/neon.rs | 55 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 1 + testnet/stacks-node/src/tests/mod.rs | 1 + .../src/tests/nakamoto_integrations.rs | 322 ++++++ .../src/tests/neon_integrations.rs | 4 +- 21 files changed, 4480 insertions(+), 301 deletions(-) create mode 100644 testnet/stacks-node/src/globals.rs create mode 100644 testnet/stacks-node/src/nakamoto_node.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/miner.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/peer.rs create mode 100644 testnet/stacks-node/src/nakamoto_node/relayer.rs create mode 100644 testnet/stacks-node/src/run_loop/nakamoto.rs create mode 100644 testnet/stacks-node/src/tests/nakamoto_integrations.rs diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 462662d4d9..6dde267bc2 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -169,7 +169,7 @@ pub fn get_nakamoto_reward_cycle_info( .epoch_id; assert!( - epoch_at_height >= StacksEpochId::Epoch30, + epoch_at_height >= StacksEpochId::Epoch25, "FATAL: called a nakamoto function outside of epoch 3" ); @@ -216,22 +216,40 @@ pub fn get_nakamoto_reward_cycle_info( } // find the first Stacks block processed in the prepare phase - let Some(prepare_start_block_header) = + let parent_block_id = if let Some(nakamoto_start_block) = NakamotoChainState::get_nakamoto_tenure_start_block_header( chain_state.db(), &sn.consensus_hash, + )? { + nakamoto_start_block + .anchored_header + .as_stacks_nakamoto() + // TODO: maybe `get_nakamoto_tenure_start_block_header` should + // return a type that doesn't require this unwrapping? + .expect("FATAL: queried non-Nakamoto tenure start header") + .parent_block_id + } else { + let Some(block_header) = + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chain_state.db(), + &sn.consensus_hash, + )? + else { + // no header for this snapshot (possibly invalid) + debug!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + continue; + }; + let Some(parent_block_id) = StacksChainState::get_parent_block_id( + chain_state.db(), + &block_header.index_block_hash(), )? - else { - // no header for this snapshot (possibly invalid) - continue; + else { + debug!("Failed to get parent block"; "block_id" => %block_header.index_block_hash()); + continue; + }; + parent_block_id }; - let parent_block_id = &prepare_start_block_header - .anchored_header - .as_stacks_nakamoto() - .expect("FATAL: queried non-Nakamoto tenure start header") - .parent_block_id; - // find the tenure-start block of the tenure of the parent of this Stacks block. // in epoch 2, this is the preceding anchor block // in nakamoto, this is the tenure-start block of the preceding tenure diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 82b6d34b93..1f75cd55ac 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -498,7 +498,7 @@ impl NakamotoBlockBuilder { state_root_hash ); - info!( + debug!( "Miner: mined Nakamoto block"; "consensus_hash" => %block.header.consensus_hash, "block_hash" => %block.header.block_hash(), @@ -570,13 +570,15 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + let initial_txs: Vec<_> = + [new_tenure_info.tenure_change_tx.cloned(), + new_tenure_info.coinbase_tx.cloned()].into_iter().filter_map(|x| x).collect(); let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( &mut tenure_tx, &mut builder, mempool, parent_stacks_header.stacks_block_height, - tenure_info.tenure_change_tx(), - tenure_info.coinbase_tx(), + &initial_txs, settings, event_observer, ASTRules::PrecheckSize, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index a9cfacf929..3eb1ea36cc 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2139,8 +2139,7 @@ impl StacksBlockBuilder { builder: &mut B, mempool: &mut MemPoolDB, tip_height: u64, - tenure_change_tx: Option<&StacksTransaction>, - coinbase_tx: Option<&StacksTransaction>, + initial_txs: &[StacksTransaction], settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, ast_rules: ASTRules, @@ -2155,17 +2154,10 @@ impl StacksBlockBuilder { let mut tx_events = Vec::new(); - if let Some(tenure_tx) = tenure_change_tx { + for initial_tx in initial_txs.iter() { tx_events.push( builder - .try_mine_tx(epoch_tx, tenure_tx, ast_rules.clone())? - .convert_to_event(), - ); - } - if let Some(coinbase_tx) = coinbase_tx { - tx_events.push( - builder - .try_mine_tx(epoch_tx, coinbase_tx, ast_rules.clone())? + .try_mine_tx(epoch_tx, initial_tx, ast_rules.clone())? .convert_to_event(), ); } @@ -2442,8 +2434,7 @@ impl StacksBlockBuilder { &mut builder, mempool, parent_stacks_header.stacks_block_height, - None, - Some(coinbase_tx), + &[coinbase_tx.clone()], settings, event_observer, ast_rules, diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d70fca1c02..ad83dd6f57 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -8,7 +8,7 @@ use async_h1::client; use async_std::io::ReadExt; use async_std::net::TcpStream; use base64::encode; -use clarity::vm::types::PrincipalData; + use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; @@ -50,11 +50,16 @@ use stacks_common::deps_common::bitcoin::network::encodable::ConsensusEncodable; use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_deserialize; use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; -use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; +use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; +#[cfg(test)] +use clarity::vm::types::PrincipalData; +#[cfg(test)] +use stacks_common::types::chainstate::StacksAddress; + use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f634f526c8..feaa0208ac 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -35,6 +35,8 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use crate::mockamoto::signer::SelfSigner; + pub const DEFAULT_SATS_PER_VB: u64 = 50; const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; @@ -491,6 +493,13 @@ lazy_static! { } impl Config { + pub fn self_signing(&self) -> Option { + if !(self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto") { + return None; + } + self.miner.self_signing_key.clone() + } + /// get the up-to-date burnchain from the config pub fn get_burnchain_config(&self) -> Result { if let Some(path) = &self.config_path { @@ -1095,6 +1104,7 @@ impl Config { .as_ref() .map(|x| Secp256k1PrivateKey::from_hex(x)) .transpose()?, + self_signing_key: None, }, None => miner_default_config, }; @@ -1108,6 +1118,7 @@ impl Config { "xenon", "mainnet", "mockamoto", + "nakamoto-neon", ]; if !supported_modes.contains(&burnchain.mode.as_str()) { @@ -1629,10 +1640,10 @@ impl BurnchainConfig { match self.mode.as_str() { "mainnet" => ("mainnet".to_string(), BitcoinNetworkType::Mainnet), "xenon" => ("testnet".to_string(), BitcoinNetworkType::Testnet), - "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" => { + "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" | "nakamoto-neon" => { ("regtest".to_string(), BitcoinNetworkType::Regtest) } - _ => panic!("Invalid bitcoin mode -- expected mainnet, testnet, or regtest"), + other => panic!("Invalid stacks-node mode: {other}"), } } } @@ -2116,6 +2127,7 @@ pub struct MinerConfig { pub candidate_retry_cache_size: u64, pub unprocessed_block_deadline_secs: u64, pub mining_key: Option, + pub self_signing_key: Option, } impl MinerConfig { @@ -2133,6 +2145,7 @@ impl MinerConfig { candidate_retry_cache_size: 10_000, unprocessed_block_deadline_secs: 30, mining_key: None, + self_signing_key: None, } } } diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs new file mode 100644 index 0000000000..acace012f8 --- /dev/null +++ b/testnet/stacks-node/src/globals.rs @@ -0,0 +1,266 @@ +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::mpsc::SyncSender; +use std::sync::Arc; +use std::sync::Mutex; + +use stacks::burnchains::Txid; +use stacks::chainstate::burn::operations::LeaderKeyRegisterOp; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::MinerStatus; +use stacks::net::NetworkResult; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::chainstate::ConsensusHash; + +use crate::neon::Counters; +use crate::run_loop::RegisteredKey; +use crate::syncctl::PoxSyncWatchdogComms; + +use crate::neon_node::LeaderKeyRegistrationState; + +/// Command types for the relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + HandleNetResult(NetworkResult), + /// Announce a new sortition. Process and broadcast the block if we won. + ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Try to mine a block + RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) + /// A nakamoto tenure's first block has been processed. + NakamotoTenureStartProcessed(ConsensusHash, BlockHeaderHash), + /// Try to register a VRF public key + RegisterKey(BlockSnapshot), + /// Stop the relayer thread + Exit, +} + +/// Inter-thread communication structure, shared between threads +#[derive(Clone)] +pub struct Globals { + /// Last sortition processed + last_sortition: Arc>>, + /// Status of the miner + miner_status: Arc>, + /// Communication link to the coordinator thread + pub(crate) coord_comms: CoordinatorChannels, + /// Unconfirmed transactions (shared between the relayer and p2p threads) + unconfirmed_txs: Arc>, + /// Writer endpoint to the relayer thread + pub relay_send: SyncSender, + /// Cointer state in the main thread + pub counters: Counters, + /// Connection to the PoX sync watchdog + pub sync_comms: PoxSyncWatchdogComms, + /// Global flag to see if we should keep running + pub should_keep_running: Arc, + /// Status of our VRF key registration state (shared between the main thread and the relayer) + leader_key_registration_state: Arc>, +} + +impl Globals { + pub fn new( + coord_comms: CoordinatorChannels, + miner_status: Arc>, + relay_send: SyncSender, + counters: Counters, + sync_comms: PoxSyncWatchdogComms, + should_keep_running: Arc, + ) -> Globals { + Globals { + last_sortition: Arc::new(Mutex::new(None)), + miner_status, + coord_comms, + unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), + relay_send, + counters, + sync_comms, + should_keep_running, + leader_key_registration_state: Arc::new(Mutex::new( + LeaderKeyRegistrationState::Inactive, + )), + } + } + + /// Get the last sortition processed by the relayer thread + pub fn get_last_sortition(&self) -> Option { + self.last_sortition + .lock() + .unwrap_or_else(|_| { + error!("Sortition mutex poisoned!"); + panic!(); + }) + .clone() + } + + /// Set the last sortition processed + pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { + let mut last_sortition = self.last_sortition.lock().unwrap_or_else(|_| { + error!("Sortition mutex poisoned!"); + panic!(); + }); + last_sortition.replace(block_snapshot); + } + + /// Get the status of the miner (blocked or ready) + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + pub fn block_miner(&self) { + self.miner_status + .lock() + .expect("FATAL: mutex poisoned") + .add_blocked() + } + + pub fn unblock_miner(&self) { + self.miner_status + .lock() + .expect("FATAL: mutex poisoned") + .remove_blocked() + } + + /// Get the main thread's counters + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } + + /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't + /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. + /// Clears the unconfirmed transactions, and replaces them with the chainstate's. + pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { + let Some(ref unconfirmed) = chainstate.unconfirmed_state else { + return; + }; + let mut txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}"); + panic!(); + }); + txs.clear(); + txs.extend(unconfirmed.mined_txs.clone()); + } + + /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. + /// Puts the shared unconfirmed transactions to chainstate. + pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { + let Some(ref mut unconfirmed) = chainstate.unconfirmed_state else { + return; + }; + let txs = self.unconfirmed_txs.lock().unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {e:?}"); + panic!(); + }); + unconfirmed.mined_txs.clear(); + unconfirmed.mined_txs.extend(txs.clone()); + } + + /// Signal system-wide stop + pub fn signal_stop(&self) { + self.should_keep_running.store(false, Ordering::SeqCst); + } + + /// Should we keep running? + pub fn keep_running(&self) -> bool { + self.should_keep_running.load(Ordering::SeqCst) + } + + /// Get the handle to the coordinator + pub fn coord(&self) -> &CoordinatorChannels { + &self.coord_comms + } + + /// Get the current leader key registration state. + /// Called from the runloop thread and relayer thread. + pub fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState { + let key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + key_state.clone() + } + + /// Set the initial leader key registration state. + /// Called from the runloop thread when booting up. + pub fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) { + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + *key_state = new_state; + } + + /// Advance the leader key registration state to pending, given a txid we just sent. + /// Only the relayer thread calls this. + pub fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) { + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|_e| { + error!("FATAL: failed to lock leader key registration state mutex"); + panic!(); + }); + *key_state = LeaderKeyRegistrationState::Pending(target_block_height, txid); + } + + /// Advance the leader key registration state to active, given the VRF key registration ops + /// we've discovered in a given snapshot. + /// The runloop thread calls this whenever it processes a sortition. + pub fn try_activate_leader_key_registration( + &self, + burn_block_height: u64, + key_registers: Vec, + ) -> bool { + let mut activated = false; + let mut key_state = self + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + // if key_state is anything but pending, then we don't activate + let LeaderKeyRegistrationState::Pending(target_block_height, txid) = *key_state else { + return false; + }; + for op in key_registers.into_iter() { + info!( + "Processing burnchain block with key_register_op"; + "burn_block_height" => burn_block_height, + "txid" => %op.txid, + "checking_txid" => %txid, + ); + + if txid == op.txid { + *key_state = LeaderKeyRegistrationState::Active(RegisteredKey { + target_block_height, + vrf_public_key: op.public_key, + block_height: u64::from(op.block_height), + op_vtxindex: u32::from(op.vtxindex), + }); + activated = true; + } else { + debug!( + "key_register_op {} does not match our pending op {}", + txid, &op.txid + ); + } + } + + activated + } +} diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 7ea3b90556..712fa0b662 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -7,7 +7,7 @@ use stacks_common::address::{ }; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{Hash160, Sha256Sum}; -use stacks_common::util::secp256k1::Secp256k1PublicKey; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use super::operations::BurnchainOpSigner; @@ -16,6 +16,7 @@ use super::operations::BurnchainOpSigner; #[derive(Clone)] pub struct Keychain { secret_state: Vec, + nakamoto_mining_key: Secp256k1PrivateKey, } impl Keychain { @@ -44,10 +45,27 @@ impl Keychain { StacksPrivateKey::from_slice(&sk_bytes[..]).expect("FATAL: Keychain::make_secret_key_bytes() returned bytes that could not be parsed into a secp256k1 secret key!") } - /// Create a default keychain from the seed + /// Get the public key hash of the nakamoto mining key (i.e., Hash160(pubkey)) + pub fn get_nakamoto_pkh(&self) -> Hash160 { + let pk = Secp256k1PublicKey::from_private(&self.nakamoto_mining_key); + Hash160::from_node_public_key(&pk) + } + + /// Get the secrete key of the nakamoto mining key + pub fn get_nakamoto_sk(&self) -> &Secp256k1PrivateKey { + &self.nakamoto_mining_key + } + + /// Create a default keychain from the seed, with a default nakamoto mining key derived + /// from the same seed ( pub fn default(seed: Vec) -> Keychain { + let secret_state = Self::make_secret_key_bytes(&seed); + // re-hash secret_state to use as a default seed for the nakamoto mining key + let nakamoto_mining_key = + Secp256k1PrivateKey::from_seed(Sha256Sum::from_data(&secret_state).as_bytes()); Keychain { - secret_state: Keychain::make_secret_key_bytes(&seed), + secret_state, + nakamoto_mining_key, } } diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 6addce37a1..8675b43132 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -19,8 +19,10 @@ pub mod burnchains; pub mod config; pub mod event_dispatcher; pub mod genesis_data; +pub mod globals; pub mod keychain; pub mod mockamoto; +pub mod nakamoto_node; pub mod neon_node; pub mod node; pub mod operations; @@ -44,6 +46,7 @@ pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; use crate::mockamoto::MockamotoNode; +use crate::run_loop::nakamoto; fn main() { panic::set_hook(Box::new(|panic_info| { @@ -209,6 +212,9 @@ fn main() { } else if conf.burnchain.mode == "mockamoto" { let mut mockamoto = MockamotoNode::new(&conf).unwrap(); mockamoto.run(); + } else if conf.burnchain.mode == "nakamoto-neon" { + let mut run_loop = nakamoto::RunLoop::new(conf); + run_loop.start(None, 0); } else { println!("Burnchain mode '{}' not supported", conf.burnchain.mode); } diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 8f17aae677..845f838828 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -69,10 +69,9 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use self::signer::SelfSigner; +use crate::globals::{Globals, RelayerDirective}; use crate::neon::Counters; -use crate::neon_node::{ - Globals, PeerThread, RelayerDirective, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, -}; +use crate::neon_node::{PeerThread, StacksNode, BLOCK_PROCESSOR_STACK_SIZE}; use crate::syncctl::PoxSyncWatchdogComms; use crate::{Config, EventDispatcher}; @@ -894,8 +893,7 @@ impl MockamotoNode { &mut builder, &mut self.mempool, parent_chain_length, - None, - None, + &[], BlockBuilderSettings { max_miner_time_ms: 15_000, mempool_settings: MemPoolWalkSettings::default(), diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs new file mode 100644 index 0000000000..1c71b09045 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -0,0 +1,683 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; +use std::convert::TryFrom; +use std::net::SocketAddr; +use std::sync::mpsc::Receiver; +use std::thread; +use std::thread::JoinHandle; + +use super::{Config, EventDispatcher, Keychain}; +use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::globals::Globals; +use crate::globals::RelayerDirective; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::nakamoto::RunLoop; +use crate::run_loop::RegisteredKey; +use clarity::vm::ast::ASTRules; +use clarity::vm::types::QualifiedContractIdentifier; +use stacks::burnchains::{Burnchain, BurnchainSigner, Txid}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::core::mempool::MemPoolDB; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; +use stacks::monitoring; +use stacks::monitoring::update_active_miners_count_gauge; +use stacks::net::atlas::{AtlasConfig, AtlasDB}; +use stacks::net::db::PeerDB; +use stacks::net::p2p::PeerNetwork; +use stacks::net::relay::Relayer; +use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; +use stacks::net::{Error as NetError, PeerNetworkComms, ServiceFlags}; +use stacks::util_lib::strings::{UrlString, VecDisplay}; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::net::PeerAddress; +use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; + +pub mod miner; +pub mod peer; +pub mod relayer; + +use self::peer::PeerThread; +use self::relayer::RelayerThread; + +pub const RELAYER_MAX_BUFFER: usize = 100; +const VRF_MOCK_MINER_KEY: u64 = 1; + +pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB + +pub type BlockCommits = HashMap; + +/// Node implementation for both miners and followers. +/// This struct is used to set up the node proper and launch the p2p thread and relayer thread. +/// It is further used by the main thread to communicate with these two threads. +pub struct StacksNode { + /// Atlas network configuration + pub atlas_config: AtlasConfig, + /// Global inter-thread communication handle + pub globals: Globals, + /// True if we're a miner + is_miner: bool, + /// handle to the p2p thread + pub p2p_thread_handle: JoinHandle<()>, + /// handle to the relayer thread + pub relayer_thread_handle: JoinHandle<()>, +} + +/// Fault injection logic to artificially increase the length of a tenure. +/// Only used in testing +#[cfg(test)] +fn fault_injection_long_tenure() { + // simulated slow block + match std::env::var("STX_TEST_SLOW_TENURE") { + Ok(tenure_str) => match tenure_str.parse::() { + Ok(tenure_time) => { + info!( + "Fault injection: sleeping for {} milliseconds to simulate a long tenure", + tenure_time + ); + stacks_common::util::sleep_ms(tenure_time); + } + Err(_) => { + error!("Parse error for STX_TEST_SLOW_TENURE"); + panic!(); + } + }, + _ => {} + } +} + +#[cfg(not(test))] +fn fault_injection_long_tenure() {} + +/// Fault injection to skip mining in this bitcoin block height +/// Only used in testing +#[cfg(test)] +fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { + match std::env::var("STACKS_DISABLE_MINER") { + Ok(disable_heights) => { + let disable_schedule: serde_json::Value = + serde_json::from_str(&disable_heights).unwrap(); + let disable_schedule = disable_schedule.as_array().unwrap(); + for disabled in disable_schedule { + let target_miner_rpc_bind = disabled + .get("rpc_bind") + .unwrap() + .as_str() + .unwrap() + .to_string(); + if target_miner_rpc_bind != rpc_bind { + continue; + } + let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); + for target_block_value in target_block_heights { + let target_block = target_block_value.as_i64().unwrap() as u64; + if target_block == target_burn_height { + return true; + } + } + } + return false; + } + Err(_) => { + return false; + } + } +} + +#[cfg(not(test))] +fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { + false +} + +/// Open the chainstate, and inject faults from the config file +pub(crate) fn open_chainstate_with_faults( + config: &Config, +) -> Result { + let stacks_chainstate_path = config.get_chainstate_path_str(); + let (mut chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), + )?; + + chainstate.fault_injection.hide_blocks = config.node.fault_injection_hide_blocks; + Ok(chainstate) +} + +/// Types of errors that can arise during mining +#[derive(Debug)] +enum Error { + /// Can't find the block sortition snapshot for the chain tip + SnapshotNotFoundForChainTip, + /// The burnchain tip changed while this operation was in progress + BurnchainTipChanged, + SpawnError(std::io::Error), + FaultInjection, + MissedMiningOpportunity, + /// Attempted to mine while there was no active VRF key + NoVRFKeyActive, + /// The parent block or tenure could not be found + ParentNotFound, + /// Something unexpected happened (e.g., hash mismatches) + UnexpectedChainState, + /// A burnchain operation failed when submitting it to the burnchain + BurnchainSubmissionFailed, + NewParentDiscovered, +} + +impl StacksNode { + /// Set up the AST size-precheck height, if configured + fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { + if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { + info!( + "Override burnchain height of {:?} to {}", + ASTRules::PrecheckSize, + ast_precheck_size_height + ); + let mut tx = sortdb + .tx_begin() + .expect("FATAL: failed to begin tx on sortition DB"); + SortitionDB::override_ast_rule_height( + &mut tx, + ASTRules::PrecheckSize, + ast_precheck_size_height, + ) + .expect("FATAL: failed to override AST PrecheckSize rule height"); + tx.commit() + .expect("FATAL: failed to commit sortition DB transaction"); + } + } + + /// Set up the mempool DB by making sure it exists. + /// Panics on failure. + fn setup_mempool_db(config: &Config) -> MemPoolDB { + // force early mempool instantiation + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + cost_estimator, + metric, + ) + .expect("BUG: failed to instantiate mempool"); + + mempool + } + + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * blacklisted/whitelisted nodes + /// * node keys + /// * bootstrap nodes + /// Returns the instantiated PeerDB + /// Panics on failure. + fn setup_peer_db( + config: &Config, + burnchain: &Burnchain, + stackerdb_contract_ids: &[QualifiedContractIdentifier], + ) -> PeerDB { + let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let initial_neighbors = config.node.bootstrap_node.clone(); + if initial_neighbors.len() > 0 { + info!( + "Will bootstrap from peers {}", + VecDisplay(&initial_neighbors) + ); + } else { + warn!("Without a peer to bootstrap from, the node will start mining a new chain"); + } + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let p2p_addr: SocketAddr = config.node.p2p_address.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_address + )); + let node_privkey = Secp256k1PrivateKey::from_seed(&config.node.local_peer_seed); + + let mut peerdb = PeerDB::connect( + &config.get_peer_db_file_path(), + true, + config.burnchain.chain_id, + burnchain.network_id, + Some(node_privkey), + config.connection_options.private_key_lifetime.clone(), + PeerAddress::from_socketaddr(&p2p_addr), + p2p_sock.port(), + data_url, + &[], + Some(&initial_neighbors), + stackerdb_contract_ids, + ) + .map_err(|e| { + eprintln!( + "Failed to open {}: {:?}", + &config.get_peer_db_file_path(), + &e + ); + panic!(); + }) + .unwrap(); + + // allow all bootstrap nodes + { + let mut tx = peerdb.tx_begin().unwrap(); + for initial_neighbor in initial_neighbors.iter() { + // update peer in case public key changed + PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::set_allow_peer( + &mut tx, + initial_neighbor.addr.network_id, + &initial_neighbor.addr.addrbytes, + initial_neighbor.addr.port, + -1, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + + if !config.node.deny_nodes.is_empty() { + warn!("Will ignore nodes {:?}", &config.node.deny_nodes); + } + + // deny all config-denied peers + { + let mut tx = peerdb.tx_begin().unwrap(); + for denied in config.node.deny_nodes.iter() { + PeerDB::set_deny_peer( + &mut tx, + denied.addr.network_id, + &denied.addr.addrbytes, + denied.addr.port, + get_epoch_time_secs() + 24 * 365 * 3600, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + + // update services to indicate we can support mempool sync + { + let mut tx = peerdb.tx_begin().unwrap(); + PeerDB::set_local_services( + &mut tx, + (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), + ) + .unwrap(); + tx.commit().unwrap(); + } + + peerdb + } + + /// Set up the PeerNetwork, but do not bind it. + pub fn setup_peer_network( + config: &Config, + atlas_config: &AtlasConfig, + burnchain: Burnchain, + ) -> PeerNetwork { + let sortdb = SortitionDB::open( + &config.get_burn_db_file_path(), + true, + burnchain.pox_constants.clone(), + ) + .expect("Error while instantiating sor/tition db"); + + let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) + .expect("Error while loading stacks epochs"); + + let view = { + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + .expect("Failed to get sortition tip"); + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) + .unwrap() + }; + + let atlasdb = + AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); + + let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); + + let mut chainstate = + open_chainstate_with_faults(config).expect("FATAL: could not open chainstate DB"); + + let mut stackerdb_machines = HashMap::new(); + for stackerdb_contract_id in config.node.stacker_dbs.iter() { + // attempt to load the config + let (instantiate, stacker_db_config) = match StackerDBConfig::from_smart_contract( + &mut chainstate, + &sortdb, + stackerdb_contract_id, + ) { + Ok(c) => (true, c), + Err(e) => { + warn!( + "Failed to load StackerDB config for {}: {:?}", + stackerdb_contract_id, &e + ); + (false, StackerDBConfig::noop()) + } + }; + let mut stackerdbs = + StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); + + if instantiate { + match stackerdbs.get_stackerdb_id(stackerdb_contract_id) { + Ok(..) => { + // reconfigure + let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); + tx.reconfigure_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) + .expect(&format!( + "FATAL: failed to reconfigure StackerDB replica {}", + stackerdb_contract_id + )); + tx.commit().unwrap(); + } + Err(NetError::NoSuchStackerDB(..)) => { + // instantiate replica + let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); + tx.create_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) + .expect(&format!( + "FATAL: failed to instantiate StackerDB replica {}", + stackerdb_contract_id + )); + tx.commit().unwrap(); + } + Err(e) => { + panic!("FATAL: failed to query StackerDB state: {:?}", &e); + } + } + } + let stacker_db_sync = match StackerDBSync::new( + stackerdb_contract_id.clone(), + &stacker_db_config, + PeerNetworkComms::new(), + stackerdbs, + ) { + Ok(s) => s, + Err(e) => { + warn!( + "Failed to instantiate StackerDB sync machine for {}: {:?}", + stackerdb_contract_id, &e + ); + continue; + } + }; + + stackerdb_machines.insert( + stackerdb_contract_id.clone(), + (stacker_db_config, stacker_db_sync), + ); + } + + let stackerdb_contract_ids: Vec<_> = + stackerdb_machines.keys().map(|sc| sc.clone()).collect(); + let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); + + let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { + Ok(local_peer) => local_peer, + _ => panic!("Unable to retrieve local peer"), + }; + + let p2p_net = PeerNetwork::new( + peerdb, + atlasdb, + stackerdbs, + local_peer, + config.burnchain.peer_version, + burnchain, + view, + config.connection_options.clone(), + stackerdb_machines, + epochs, + ); + + p2p_net + } + + /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. + /// + /// This variable is used for prometheus monitoring (which only + /// runs when the feature flag `monitoring_prom` is activated). + /// The address is set using the single-signature BTC address + /// associated with `keychain`'s public key. This address always + /// assumes Epoch-2.1 rules for the miner address: if the + /// node is configured for segwit, then the miner address generated + /// is a segwit address, otherwise it is a p2pkh. + /// + fn set_monitoring_miner_address(keychain: &Keychain, relayer_thread: &RelayerThread) { + let public_key = keychain.get_pub_key(); + let miner_addr = relayer_thread + .bitcoin_controller + .get_miner_address(StacksEpochId::Epoch21, &public_key); + let miner_addr_str = addr2str(&miner_addr); + let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { + warn!("Failed to set global burnchain signer: {:?}", &e); + e + }); + } + + pub fn spawn( + runloop: &RunLoop, + globals: Globals, + // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push + relay_recv: Receiver, + ) -> StacksNode { + let config = runloop.config().clone(); + let is_miner = runloop.is_miner(); + let burnchain = runloop.get_burnchain(); + let atlas_config = config.atlas.clone(); + let keychain = Keychain::default(config.node.seed.clone()); + + // we can call _open_ here rather than _connect_, since connect is first called in + // make_genesis_block + let mut sortdb = SortitionDB::open( + &config.get_burn_db_file_path(), + true, + burnchain.pox_constants.clone(), + ) + .expect("Error while instantiating sortition db"); + + Self::setup_ast_size_precheck(&config, &mut sortdb); + + let _ = Self::setup_mempool_db(&config); + + let mut p2p_net = Self::setup_peer_network(&config, &atlas_config, burnchain.clone()); + + let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) + .expect("FATAL: failed to connect to stacker DB"); + + let relayer = Relayer::from_p2p(&mut p2p_net, stackerdbs); + + let local_peer = p2p_net.local_peer.clone(); + + // setup initial key registration + let leader_key_registration_state = if config.node.mock_mining { + // mock mining, pretend to have a registered key + let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); + LeaderKeyRegistrationState::Active(RegisteredKey { + target_block_height: VRF_MOCK_MINER_KEY, + block_height: 1, + op_vtxindex: 1, + vrf_public_key, + }) + } else { + LeaderKeyRegistrationState::Inactive + }; + globals.set_initial_leader_key_registration_state(leader_key_registration_state); + + let relayer_thread = RelayerThread::new(runloop, local_peer.clone(), relayer); + + StacksNode::set_monitoring_miner_address(&keychain, &relayer_thread); + + let relayer_thread_handle = thread::Builder::new() + .name(format!("relayer-{}", &local_peer.data_url)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + relayer_thread.main(relay_recv); + }) + .expect("FATAL: failed to start relayer thread"); + + let p2p_event_dispatcher = runloop.get_event_dispatcher(); + let p2p_thread = PeerThread::new(runloop, p2p_net); + let p2p_thread_handle = thread::Builder::new() + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .name(format!( + "p2p-({},{})", + &config.node.p2p_bind, &config.node.rpc_bind + )) + .spawn(move || { + p2p_thread.main(p2p_event_dispatcher); + }) + .expect("FATAL: failed to start p2p thread"); + + info!("Start HTTP server on: {}", &config.node.rpc_bind); + info!("Start P2P server on: {}", &config.node.p2p_bind); + + StacksNode { + atlas_config, + globals, + is_miner, + p2p_thread_handle, + relayer_thread_handle, + } + } + + /// Notify the relayer that a new burn block has been processed by the sortition db, + /// telling it to process the block and begin mining if this miner won. + /// returns _false_ if the relayer hung up the channel. + /// Called from the main thread. + pub fn relayer_burnchain_notify(&self) -> bool { + if !self.is_miner { + // node is a follower, don't try to process my own tenure. + return true; + } + + let Some(snapshot) = self.globals.get_last_sortition() else { + debug!("Tenure: Notify sortition! No last burn block"); + return true; + }; + + debug!( + "Tenure: Notify sortition!"; + "consensus_hash" => %snapshot.consensus_hash, + "burn_block_hash" => %snapshot.burn_header_hash, + "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, + "burn_block_height" => &snapshot.block_height, + "sortition_id" => %snapshot.sortition_id + ); + + // unlike in neon_node, the nakamoto node should *always* notify the relayer of + // a new burnchain block + + return self + .globals + .relay_send + .send(RelayerDirective::ProcessTenure( + snapshot.consensus_hash.clone(), + snapshot.parent_burn_header_hash.clone(), + snapshot.winning_stacks_block_hash.clone(), + )) + .is_ok(); + } + + /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp + /// and inspecting if a sortition was won. + /// `ibd`: boolean indicating whether or not we are in the initial block download + /// Called from the main thread. + pub fn process_burnchain_state( + &mut self, + sortdb: &SortitionDB, + sort_id: &SortitionId, + ibd: bool, + ) -> Option { + let mut last_sortitioned_block = None; + + let ic = sortdb.index_conn(); + + let block_snapshot = SortitionDB::get_block_snapshot(&ic, sort_id) + .expect("Failed to obtain block snapshot for processed burn block.") + .expect("Failed to obtain block snapshot for processed burn block."); + let block_height = block_snapshot.block_height; + + let block_commits = + SortitionDB::get_block_commits_by_block(&ic, &block_snapshot.sortition_id) + .expect("Unexpected SortitionDB error fetching block commits"); + + let num_block_commits = block_commits.len(); + + update_active_miners_count_gauge(block_commits.len() as i64); + + for op in block_commits.into_iter() { + if op.txid == block_snapshot.winning_block_txid { + info!( + "Received burnchain block #{} including block_commit_op (winning) - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); + last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); + } else { + if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); + } + } + } + + let key_registers = + SortitionDB::get_leader_keys_by_block(&ic, &block_snapshot.sortition_id) + .expect("Unexpected SortitionDB error fetching key registers"); + + let num_key_registers = key_registers.len(); + + self.globals + .try_activate_leader_key_registration(block_height, key_registers); + + debug!( + "Processed burnchain state"; + "burn_height" => block_height, + "leader_keys_count" => num_key_registers, + "block_commits_count" => num_block_commits, + "in_initial_block_download?" => ibd, + ); + + self.globals.set_last_sortition(block_snapshot); + last_sortitioned_block.map(|x| x.0) + } + + /// Join all inner threads + pub fn join(self) { + self.relayer_thread_handle.join().unwrap(); + self.p2p_thread_handle.join().unwrap(); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs new file mode 100644 index 0000000000..cb9942d451 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -0,0 +1,645 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::convert::TryFrom; +use std::thread; +use std::thread::JoinHandle; +use std::time::Instant; + +use super::relayer::RelayerThread; +use super::Error as NakamotoNodeError; +use super::{Config, EventDispatcher, Keychain}; +use crate::globals::Globals; +use crate::mockamoto::signer::SelfSigner; +use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::RegisteredKey; +use crate::ChainTip; +use clarity::vm::types::PrincipalData; +use stacks::burnchains::{Burnchain, BurnchainParameters}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureStart}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::chainstate::stacks::TenureChangeCause; +use stacks::chainstate::stacks::TenureChangePayload; +use stacks::chainstate::stacks::ThresholdSignature; +use stacks::chainstate::stacks::{ + CoinbasePayload, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, + TransactionPayload, TransactionVersion, +}; +use stacks::core::mempool::MemPoolDB; +use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +use stacks_common::types::PrivateKey; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; + +pub enum MinerDirective { + /// The miner won sortition so they should begin a new tenure + BeginTenure { + parent_tenure_start: StacksBlockId, + burnchain_tip: BlockSnapshot, + }, + /// The miner should try to continue their tenure if they are the active miner + ContinueTenure { new_burn_view: ConsensusHash }, + /// The miner did not win sortition + StopTenure, +} + +struct ParentTenureInfo { + #[allow(dead_code)] + parent_tenure_start: StacksBlockId, + parent_tenure_blocks: u64, +} + +/// Metadata required for beginning a new tenure +struct ParentStacksBlockInfo { + /// Header metadata for the Stacks block we're going to build on top of + stacks_parent_header: StacksHeaderInfo, + /// the total amount burned in the sortition that selected the Stacks block parent + parent_block_total_burn: u64, + /// nonce to use for this new block's coinbase transaction + coinbase_nonce: u64, + parent_tenure: Option, +} + +pub struct BlockMinerThread { + /// node config struct + config: Config, + /// handle to global state + globals: Globals, + /// copy of the node's keychain + keychain: Keychain, + /// burnchain configuration + burnchain: Burnchain, + /// Set of blocks that we have mined, but are still potentially-broadcastable + /// (copied from RelayerThread since we need the info to determine the strategy for mining the + /// next block during this tenure). + last_mined_blocks: Vec, + /// Copy of the node's registered VRF key + registered_key: RegisteredKey, + /// Burnchain block snapshot which elected this miner + burn_block: BlockSnapshot, + /// The start of the parent tenure for this tenure + parent_tenure_id: StacksBlockId, + /// Handle to the node's event dispatcher + event_dispatcher: EventDispatcher, +} + +impl BlockMinerThread { + /// Instantiate the miner thread + pub fn new( + rt: &RelayerThread, + registered_key: RegisteredKey, + burn_block: BlockSnapshot, + parent_tenure_id: StacksBlockId, + ) -> BlockMinerThread { + BlockMinerThread { + config: rt.config.clone(), + globals: rt.globals.clone(), + keychain: rt.keychain.clone(), + burnchain: rt.burnchain.clone(), + last_mined_blocks: vec![], + registered_key, + burn_block, + event_dispatcher: rt.event_dispatcher.clone(), + parent_tenure_id, + } + } + + /// Stop a miner tenure by blocking the miner and then joining the tenure thread + pub fn stop_miner(globals: &Globals, prior_miner: JoinHandle<()>) { + globals.block_miner(); + prior_miner + .join() + .expect("FATAL: IO failure joining prior mining thread"); + globals.unblock_miner(); + } + + pub fn run_miner(mut self, prior_miner: Option>) { + // when starting a new tenure, block the mining thread if its currently running. + // the new mining thread will join it (so that the new mining thread stalls, not the relayer) + if let Some(prior_miner) = prior_miner { + Self::stop_miner(&self.globals, prior_miner); + } + + // now, actually run this tenure + let Some(new_block) = self.mine_block() else { + warn!("Failed to mine block"); + return; + }; + + if let Some(self_signer) = self.config.self_signing() { + if let Err(e) = self.self_sign_and_broadcast(self_signer, new_block.clone()) { + warn!("Error self-signing block: {e:?}"); + } else { + self.globals.coord().announce_new_stacks_block(); + } + } else { + warn!("Not self-signing: nakamoto node does not support stacker-signer-protocol yet"); + } + + self.globals.counters.bump_naka_mined_blocks(); + self.last_mined_blocks.push(new_block); + } + + fn self_sign_and_broadcast( + &self, + mut signer: SelfSigner, + mut block: NakamotoBlock, + ) -> Result<(), ChainstateError> { + signer.sign_nakamoto_block(&mut block); + let mut chain_state = super::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let chainstate_config = chain_state.config(); + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let sortition_handle = sort_db.index_handle_at_tip(); + let staging_tx = chain_state.staging_db_tx_begin()?; + NakamotoChainState::accept_block( + &chainstate_config, + block, + &sortition_handle, + &staging_tx, + &signer.aggregate_public_key, + )?; + staging_tx.commit()?; + Ok(()) + } + + /// Get the coinbase recipient address, if set in the config and if allowed in this epoch + fn get_coinbase_recipient(&self, epoch_id: StacksEpochId) -> Option { + if epoch_id < StacksEpochId::Epoch21 && self.config.miner.block_reward_recipient.is_some() { + warn!("Coinbase pay-to-contract is not supported in the current epoch"); + None + } else { + self.config.miner.block_reward_recipient.clone() + } + } + + fn generate_tenure_change_tx( + &mut self, + nonce: u64, + parent_block_id: StacksBlockId, + parent_tenure_blocks: u64, + miner_pkh: Hash160, + ) -> Option { + if self.config.self_signing().is_none() { + // if we're not self-signing, then we can't generate a tenure change tx: it has to come from the signers. + return None; + } + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let tenure_change_tx_payload = TransactionPayload::TenureChange( + TenureChangePayload { + previous_tenure_end: parent_block_id, + previous_tenure_blocks: u32::try_from(parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), + cause: TenureChangeCause::BlockFound, + pubkey_hash: miner_pkh, + signers: vec![], + }, + ThresholdSignature::mock(), + ); + + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let mut tx = StacksTransaction::new(version, tx_auth, tenure_change_tx_payload); + + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); + + Some(tx_signer.get_tx().unwrap()) + } + + /// Create a coinbase transaction. + fn generate_coinbase_tx( + &mut self, + nonce: u64, + epoch_id: StacksEpochId, + vrf_proof: VRFProof, + ) -> StacksTransaction { + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let recipient_opt = self.get_coinbase_recipient(epoch_id); + + let mut tx = StacksTransaction::new( + version, + tx_auth, + TransactionPayload::Coinbase( + CoinbasePayload([0u8; 32]), + recipient_opt, + Some(vrf_proof), + ), + ); + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); + + tx_signer.get_tx().unwrap() + } + + /// Load up the parent block info for mining. + /// If there's no parent because this is the first block, then return the genesis block's info. + /// If we can't find the parent in the DB but we expect one, return None. + fn load_block_parent_info( + &self, + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + ) -> Option { + let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) + .expect("FATAL: could not query chain tip") + else { + debug!("No Stacks chain tip known, will return a genesis block"); + let (network, _) = self.config.burnchain.get_bitcoin_network(); + let burnchain_params = + BurnchainParameters::from_params(&self.config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); + + let chain_tip = ChainTip::genesis( + &burnchain_params.first_block_hash, + burnchain_params.first_block_height.into(), + burnchain_params.first_block_timestamp.into(), + ); + + return Some(ParentStacksBlockInfo { + parent_tenure: Some(ParentTenureInfo { + parent_tenure_start: chain_tip.metadata.index_block_hash(), + parent_tenure_blocks: 0, + }), + stacks_parent_header: chain_tip.metadata, + parent_block_total_burn: 0, + coinbase_nonce: 0, + }); + }; + + let miner_address = self + .keychain + .origin_address(self.config.is_mainnet()) + .unwrap(); + match ParentStacksBlockInfo::lookup( + chain_state, + burn_db, + &self.burn_block, + miner_address, + &self.parent_tenure_id, + stacks_tip, + ) { + Ok(parent_info) => Some(parent_info), + Err(NakamotoNodeError::BurnchainTipChanged) => { + self.globals.counters.bump_missed_tenures(); + None + } + Err(..) => None, + } + } + + /// Generate the VRF proof for the block we're going to build. + /// Returns Some(proof) if we could make the proof + /// Return None if we could not make the proof + fn make_vrf_proof(&mut self) -> Option { + // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF + // key + let vrf_proof = if self.config.node.mock_mining { + self.keychain.generate_proof( + VRF_MOCK_MINER_KEY, + self.burn_block.sortition_hash.as_bytes(), + ) + } else { + self.keychain.generate_proof( + self.registered_key.target_block_height, + self.burn_block.sortition_hash.as_bytes(), + ) + }; + + debug!( + "Generated VRF Proof: {} over {} ({},{}) with key {}", + vrf_proof.to_hex(), + &self.burn_block.sortition_hash, + &self.burn_block.block_height, + &self.burn_block.burn_header_hash, + &self.registered_key.vrf_public_key.to_hex() + ); + Some(vrf_proof) + } + + /// Try to mine a Stacks block by assembling one from mempool transactions and sending a + /// burnchain block-commit transaction. If we succeed, then return the assembled block data as + /// well as the microblock private key to use to produce microblocks. + /// Return None if we couldn't build a block for whatever reason. + fn mine_block(&mut self) -> Option { + debug!("block miner thread ID is {:?}", thread::current().id()); + super::fault_injection_long_tenure(); + + let burn_db_path = self.config.get_burn_db_file_path(); + let stacks_chainstate_path = self.config.get_chainstate_path_str(); + + let cost_estimator = self + .config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = self + .config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + // NOTE: read-write access is needed in order to be able to query the recipient set. + // This is an artifact of the way the MARF is built (see #1449) + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let mut chain_state = super::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + + let mut mem_pool = MemPoolDB::open( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &stacks_chainstate_path, + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + let assembly_start = Instant::now(); + + let target_epoch_id = + SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) + .ok()? + .expect("FATAL: no epoch defined") + .epoch_id; + let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; + let vrf_proof = self.make_vrf_proof()?; + + if self.last_mined_blocks.is_empty() { + if parent_block_info.parent_tenure.is_none() { + warn!( + "Miner should be starting a new tenure, but failed to load parent tenure info" + ); + return None; + } + } + + // create our coinbase if this is the first block we've mined this tenure + let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + let current_miner_nonce = parent_block_info.coinbase_nonce; + let tenure_change_tx = self.generate_tenure_change_tx( + current_miner_nonce, + parent_block_id, + par_tenure_info.parent_tenure_blocks, + self.keychain.get_nakamoto_pkh(), + )?; + let coinbase_tx = self.generate_coinbase_tx( + current_miner_nonce + 1, + target_epoch_id, + vrf_proof.clone(), + ); + Some(NakamotoTenureStart { + coinbase_tx, + // TODO (refactor): the nakamoto block builder doesn't use this VRF proof, + // it has to be included in the coinbase tx, which is an arg to the builder. + // we should probably just remove this from the nakamoto block builder. + vrf_proof: vrf_proof.clone(), + tenure_change_tx, + }) + } else { + None + }; + + parent_block_info.stacks_parent_header.microblock_tail = None; + + // build the block itself + let (mut block, _, _) = match NakamotoBlockBuilder::build_nakamoto_block( + &chain_state, + &burn_db.index_conn(), + &mut mem_pool, + // TODO (refactor): the nakamoto block builder doesn't use the parent tenure ID, + // it has to be included in the tenure change tx, which is an arg to the builder. + // we should probably just remove this from the nakamoto block builder, so that + // there isn't duplicated or unused logic here + &self.parent_tenure_id, + &parent_block_info.stacks_parent_header, + &self.burn_block.consensus_hash, + self.burn_block.total_burn, + tenure_start_info, + self.config.make_block_builder_settings( + // TODO: the attempt counter needs a different configuration approach in nakamoto + 1, + false, + self.globals.get_miner_status(), + ), + Some(&self.event_dispatcher), + ) { + Ok(block) => block, + Err(e) => { + error!("Relayer: Failure mining anchored block: {}", e); + return None; + } + }; + + let mining_key = self.keychain.get_nakamoto_sk(); + let miner_signature = mining_key + .sign(block.header.signature_hash().ok()?.as_bytes()) + .ok()?; + block.header.miner_signature = miner_signature; + + info!( + "Miner: Succeeded assembling {} block #{}: {}, with {} txs", + if parent_block_info.parent_block_total_burn == 0 { + "Genesis" + } else { + "Stacks" + }, + block.header.chain_length, + block.header.block_hash(), + block.txs.len(), + ); + + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long + // enough to build this block that another block could have arrived), and confirm that all + // Stacks blocks with heights higher than the canoincal tip are processed. + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != block.header.consensus_hash { + info!("Miner: Cancel block assembly; burnchain tip has changed"); + self.globals.counters.bump_missed_tenures(); + return None; + } + + Some(block) + } +} + +impl ParentStacksBlockInfo { + /// Determine where in the set of forks to attempt to mine the next anchored block. + /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. + /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. + /// This is used to mitigate (but not eliminate) a TOCTTOU issue with mining: the caller's + /// conception of the sortition history tip may have become stale by the time they call this + /// method, in which case, mining should *not* happen (since the block will be invalid). + pub fn lookup( + chain_state: &mut StacksChainState, + burn_db: &mut SortitionDB, + check_burn_block: &BlockSnapshot, + miner_address: StacksAddress, + parent_tenure_id: &StacksBlockId, + stacks_tip_header: StacksHeaderInfo, + ) -> Result { + // the stacks block I'm mining off of's burn header hash and vtxindex: + let parent_snapshot = SortitionDB::get_block_snapshot_consensus( + burn_db.conn(), + &stacks_tip_header.consensus_hash, + ) + .expect("Failed to look up block's parent snapshot") + .expect("Failed to look up block's parent snapshot"); + + let parent_sortition_id = &parent_snapshot.sortition_id; + + let parent_block_total_burn = + if &stacks_tip_header.consensus_hash == &FIRST_BURNCHAIN_CONSENSUS_HASH { + 0 + } else { + let parent_burn_block = + SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find block snapshot for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + parent_burn_block.total_burn + }; + + // don't mine off of an old burnchain block + let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + info!( + "New canonical burn chain tip detected. Will not try to mine."; + "new_consensus_hash" => %burn_chain_tip.consensus_hash, + "old_consensus_hash" => %check_burn_block.consensus_hash, + "new_burn_height" => burn_chain_tip.block_height, + "old_burn_height" => check_burn_block.block_height + ); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + + let Ok(Some(parent_tenure_header)) = + NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + else { + warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); + return Err(NakamotoNodeError::ParentNotFound); + }; + + // check if we're mining a first tenure block (by checking if our parent block is in the tenure of parent_tenure_id) + // and if so, figure out how many blocks there were in the parent tenure + let parent_tenure_info = if stacks_tip_header.consensus_hash + == parent_tenure_header.consensus_hash + { + let parent_tenure_blocks = if parent_tenure_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + let Ok(Some(last_parent_tenure_header)) = + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + chain_state.db(), + &parent_tenure_header.consensus_hash, + ) + else { + warn!("Failed loading last block of parent tenure"; "parent_tenure_id" => %parent_tenure_id); + return Err(NakamotoNodeError::ParentNotFound); + }; + // the last known tenure block of our parent should be the stacks_tip. if not, error. + if stacks_tip_header.index_block_hash() + != last_parent_tenure_header.index_block_hash() + { + return Err(NakamotoNodeError::NewParentDiscovered); + } + 1 + last_parent_tenure_header.stacks_block_height + - parent_tenure_header.stacks_block_height + } else { + 1 + }; + Some(ParentTenureInfo { + parent_tenure_start: parent_tenure_id.clone(), + parent_tenure_blocks, + }) + } else { + None + }; + + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, + &parent_snapshot.consensus_hash, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + + let coinbase_nonce = { + let principal = miner_address.into(); + let account = chain_state + .with_read_only_clarity_tx( + &burn_db.index_conn(), + &stacks_tip_header.index_block_hash(), + |conn| StacksChainState::get_account(conn, &principal), + ) + .expect(&format!( + "BUG: stacks tip block {} no longer exists after we queried it", + &stacks_tip_header.index_block_hash(), + )); + account.nonce + }; + + Ok(ParentStacksBlockInfo { + stacks_parent_header: stacks_tip_header, + parent_block_total_burn, + coinbase_nonce, + parent_tenure: parent_tenure_info, + }) + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs new file mode 100644 index 0000000000..8fe688972e --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -0,0 +1,418 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::cmp; +use std::collections::VecDeque; + +use std::default::Default; +use std::net::SocketAddr; +use std::sync::mpsc::TrySendError; + +use std::thread; +use std::time::Duration; + +use stacks::burnchains::db::BurnchainHeaderReader; +use stacks::burnchains::PoxConstants; +use stacks::chainstate::burn::db::sortdb::SortitionDB; + +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::signal_mining_blocked; + +use stacks::core::mempool::MemPoolDB; + +use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; +use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; + +use stacks::net::dns::{DNSClient, DNSResolver}; +use stacks::net::p2p::PeerNetwork; + +use stacks::net::RPCHandlerArgs; + +use stacks_common::util::hash::Sha256Sum; + +use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals; +use crate::globals::RelayerDirective; + +use crate::run_loop::nakamoto::RunLoop; + +use crate::{Config, EventDispatcher}; + +use super::open_chainstate_with_faults; + +/// Thread that runs the network state machine, handling both p2p and http requests. +pub struct PeerThread { + /// Node config + config: Config, + /// instance of the peer network. Made optional in order to trick the borrow checker. + net: Option, + /// handle to global inter-thread comms + globals: Globals, + /// how long to wait for network messages on each poll, in millis + poll_timeout: u64, + /// handle to the sortition DB (optional so we can take/replace it) + sortdb: Option, + /// handle to the chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// handle to the mempool DB (optional so we can take/replace it) + mempool: Option, + /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet + /// (i.e. due to backpressure). We track this separately, instead of just using a bigger + /// channel, because we need to know when backpressure occurs in order to throttle the p2p + /// thread's downloader. + results_with_data: VecDeque, + /// total number of p2p state-machine passes so far. Used to signal when to download the next + /// reward cycle of blocks + num_p2p_state_machine_passes: u64, + /// total number of inventory state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_inv_sync_passes: u64, + /// total number of download state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_download_passes: u64, + /// last burnchain block seen in the PeerNetwork's chain view since the last run + last_burn_block_height: u64, +} + +impl PeerThread { + /// Main loop of the p2p thread. + /// Runs in a separate thread. + /// Continuously receives, until told otherwise. + pub fn main(mut self, event_dispatcher: EventDispatcher) { + debug!("p2p thread ID is {:?}", thread::current().id()); + let should_keep_running = self.globals.should_keep_running.clone(); + let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); + + // spawn a daemon thread that runs the DNS resolver. + // It will die when the rest of the system dies. + { + let _jh = thread::Builder::new() + .name("dns-resolver".to_string()) + .spawn(move || { + debug!("DNS resolver thread ID is {:?}", thread::current().id()); + dns_resolver.thread_main(); + }) + .unwrap(); + } + + // NOTE: these must be instantiated in the thread context, since it can't be safely sent + // between threads + let fee_estimator_opt = self.config.make_fee_estimator(); + let cost_estimator = self + .config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let cost_metric = self + .config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let indexer = make_bitcoin_indexer(&self.config, Some(should_keep_running)); + + // receive until we can't reach the receiver thread + loop { + if !self.globals.keep_running() { + break; + } + if !self.run_one_pass( + &indexer, + Some(&mut dns_client), + &event_dispatcher, + &cost_estimator, + &cost_metric, + fee_estimator_opt.as_ref(), + ) { + break; + } + } + + // kill miner + signal_mining_blocked(self.globals.get_miner_status()); + + // set termination flag so other threads die + self.globals.signal_stop(); + + // thread exited, so signal to the relayer thread to die. + while let Err(TrySendError::Full(_)) = + self.globals.relay_send.try_send(RelayerDirective::Exit) + { + warn!("Failed to direct relayer thread to exit, sleeping and trying again"); + thread::sleep(Duration::from_secs(5)); + } + info!("P2P thread exit!"); + } + + /// set up the mempool DB connection + pub fn connect_mempool_db(config: &Config) -> MemPoolDB { + // create estimators, metric instances for RPC handler + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + mempool + } + + /// Instantiate the p2p thread. + /// Binds the addresses in the config (which may panic if the port is blocked). + /// This is so the node will crash "early" before any new threads start if there's going to be + /// a bind error anyway. + pub fn new(runloop: &RunLoop, net: PeerNetwork) -> PeerThread { + Self::new_all( + runloop.get_globals(), + runloop.config(), + runloop.get_burnchain().pox_constants, + net, + ) + } + + pub fn new_all( + globals: Globals, + config: &Config, + pox_constants: PoxConstants, + mut net: PeerNetwork, + ) -> Self { + let config = config.clone(); + let mempool = Self::connect_mempool_db(&config); + let burn_db_path = config.get_burn_db_file_path(); + + let sortdb = SortitionDB::open(&burn_db_path, false, pox_constants) + .expect("FATAL: could not open sortition DB"); + + let chainstate = + open_chainstate_with_faults(&config).expect("FATAL: could not open chainstate DB"); + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let rpc_sock = config.node.rpc_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.rpc_bind + )); + + net.bind(&p2p_sock, &rpc_sock) + .expect("BUG: PeerNetwork could not bind or is already bound"); + + let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + + PeerThread { + config, + net: Some(net), + globals, + poll_timeout, + sortdb: Some(sortdb), + chainstate: Some(chainstate), + mempool: Some(mempool), + results_with_data: VecDeque::new(), + num_p2p_state_machine_passes: 0, + num_inv_sync_passes: 0, + num_download_passes: 0, + last_burn_block_height: 0, + } + } + + /// Do something with mutable references to the mempool, sortdb, and chainstate + /// Fools the borrow checker. + /// NOT COMPOSIBLE + fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut PeerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self.sortdb.take().expect("BUG: sortdb already taken"); + let mut chainstate = self + .chainstate + .take() + .expect("BUG: chainstate already taken"); + let mut mempool = self.mempool.take().expect("BUG: mempool already taken"); + + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + + res + } + + /// Get an immutable ref to the inner network. + /// DO NOT USE WITHIN with_network() + fn get_network(&self) -> &PeerNetwork { + self.net.as_ref().expect("BUG: did not replace net") + } + + /// Do something with mutable references to the network. + /// Fools the borrow checker. + /// NOT COMPOSIBLE. DO NOT CALL THIS OR get_network() IN func + fn with_network(&mut self, func: F) -> R + where + F: FnOnce(&mut PeerThread, &mut PeerNetwork) -> R, + { + let mut net = self.net.take().expect("BUG: net already taken"); + + let res = func(self, &mut net); + + self.net = Some(net); + res + } + + /// Run one pass of the p2p/http state machine + /// Return true if we should continue running passes; false if not + pub fn run_one_pass( + &mut self, + indexer: &B, + dns_client_opt: Option<&mut DNSClient>, + event_dispatcher: &EventDispatcher, + cost_estimator: &Box, + cost_metric: &Box, + fee_estimator: Option<&Box>, + ) -> bool { + // initial block download? + let ibd = self.globals.sync_comms.get_ibd(); + let download_backpressure = self.results_with_data.len() > 0; + let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { + // keep getting those blocks -- drive the downloader state-machine + debug!( + "P2P: backpressure: {}, more downloads: {}", + download_backpressure, + self.get_network().has_more_downloads() + ); + 1 + } else { + self.poll_timeout + }; + + // do one pass + let p2p_res = self.with_chainstate(|p2p_thread, sortdb, chainstate, mempool| { + // NOTE: handler_args must be created such that it outlives the inner net.run() call and + // doesn't ref anything within p2p_thread. + let handler_args = RPCHandlerArgs { + exit_at_block_height: p2p_thread + .config + .burnchain + .process_exit_at_block_height + .clone(), + genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) + .unwrap(), + event_observer: Some(event_dispatcher), + cost_estimator: Some(cost_estimator.as_ref()), + cost_metric: Some(cost_metric.as_ref()), + fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), + ..RPCHandlerArgs::default() + }; + p2p_thread.with_network(|_, net| { + net.run( + indexer, + sortdb, + chainstate, + mempool, + dns_client_opt, + download_backpressure, + ibd, + poll_ms, + &handler_args, + ) + }) + }); + + match p2p_res { + Ok(network_result) => { + let mut have_update = false; + if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { + // p2p state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_p2p_state_pass(); + self.num_p2p_state_machine_passes = network_result.num_state_machine_passes; + } + + if self.num_inv_sync_passes < network_result.num_inv_sync_passes { + // inv-sync state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_inv_sync_pass(); + self.num_inv_sync_passes = network_result.num_inv_sync_passes; + + // the relayer cares about the number of inventory passes, so pass this along + have_update = true; + } + + if self.num_download_passes < network_result.num_download_passes { + // download state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_download_pass(); + self.num_download_passes = network_result.num_download_passes; + + // the relayer cares about the number of download passes, so pass this along + have_update = true; + } + + if network_result.has_data_to_store() + || self.last_burn_block_height != network_result.burn_height + || have_update + { + // pass along if we have blocks, microblocks, or transactions, or a status + // update on the network's view of the burnchain + self.last_burn_block_height = network_result.burn_height; + self.results_with_data + .push_back(RelayerDirective::HandleNetResult(network_result)); + } + } + Err(e) => { + // this is only reachable if the network is not instantiated correctly -- + // i.e. you didn't connect it + panic!("P2P: Failed to process network dispatch: {:?}", &e); + } + }; + + while let Some(next_result) = self.results_with_data.pop_front() { + // have blocks, microblocks, and/or transactions (don't care about anything else), + // or a directive to mine microblocks + if let Err(e) = self.globals.relay_send.try_send(next_result) { + debug!( + "P2P: {:?}: download backpressure detected (bufferred {})", + &self.get_network().local_peer, + self.results_with_data.len() + ); + match e { + TrySendError::Full(directive) => { + if let RelayerDirective::RunTenure(..) = directive { + // can drop this + } else { + // don't lose this data -- just try it again + self.results_with_data.push_front(directive); + } + break; + } + TrySendError::Disconnected(_) => { + info!("P2P: Relayer hang up with p2p channel"); + self.globals.signal_stop(); + return false; + } + } + } else { + debug!("P2P: Dispatched result to Relayer!"); + } + } + + true + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs new file mode 100644 index 0000000000..a90b17866f --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -0,0 +1,961 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use stacks::burnchains::{Burnchain, Txid}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::operations::leader_block_commit::{ + RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, +}; +use stacks::chainstate::burn::operations::{ + BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, +}; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; +use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::{ + get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, +}; +use stacks::core::mempool::MemPoolDB; +use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks::core::FIRST_STACKS_BLOCK_HASH; +use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; +use stacks::monitoring::increment_stx_blocks_mined_counter; +use stacks::net::db::LocalPeer; +use stacks::net::relay::Relayer; +use stacks::net::NetworkResult; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, +}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; +use std::collections::HashMap; +use std::sync::mpsc::Receiver; +use std::sync::mpsc::RecvTimeoutError; +use std::thread::JoinHandle; +use std::time::Duration; +use std::time::Instant; + +use super::Error as NakamotoNodeError; +use super::{ + fault_injection_skip_mining, open_chainstate_with_faults, BlockCommits, Config, + EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, +}; +use crate::burnchains::BurnchainController; +use crate::globals::Globals; +use crate::globals::RelayerDirective; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::nakamoto::RunLoop; +use crate::run_loop::RegisteredKey; +use crate::BitcoinRegtestController; + +/// Relayer thread +/// * accepts network results and stores blocks and microblocks +/// * forwards new blocks, microblocks, and transactions to the p2p thread +/// * processes burnchain state +/// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread) +pub struct RelayerThread { + /// Node config + pub(crate) config: Config, + /// Handle to the sortition DB (optional so we can take/replace it) + sortdb: Option, + /// Handle to the chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// Handle to the mempool DB (optional so we can take/replace it) + mempool: Option, + /// Handle to global state and inter-thread communication channels + pub(crate) globals: Globals, + /// Authoritative copy of the keychain state + pub(crate) keychain: Keychain, + /// Burnchian configuration + pub(crate) burnchain: Burnchain, + /// height of last VRF key registration request + last_vrf_key_burn_height: Option, + /// Set of blocks that we have mined, but are still potentially-broadcastable + // TODO: this field is a slow leak! + pub(crate) last_commits: BlockCommits, + /// client to the burnchain (used only for sending block-commits) + pub(crate) bitcoin_controller: BitcoinRegtestController, + /// client to the event dispatcher + pub(crate) event_dispatcher: EventDispatcher, + /// copy of the local peer state + local_peer: LocalPeer, + /// last observed burnchain block height from the p2p thread (obtained from network results) + last_network_block_height: u64, + /// time at which we observed a change in the network block height (epoch time in millis) + last_network_block_height_ts: u128, + /// last observed number of downloader state-machine passes from the p2p thread (obtained from + /// network results) + last_network_download_passes: u64, + /// last observed number of inventory state-machine passes from the p2p thread (obtained from + /// network results) + last_network_inv_passes: u64, + /// minimum number of downloader state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_download_passes: u64, + /// minimum number of inventory state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_inv_passes: u64, + + /// Inner relayer instance for forwarding broadcasted data back to the p2p thread for dispatch + /// to neighbors + relayer: Relayer, + + /// handle to the subordinate miner thread + miner_thread: Option>, + /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up + /// to check if it should issue a block commit or try to register a VRF key + next_initiative: Instant, + is_miner: bool, + /// This is the last snapshot in which the relayer committed + last_committed_at: Option, +} + +impl RelayerThread { + /// Instantiate off of a StacksNode, a runloop, and a relayer. + pub fn new(runloop: &RunLoop, local_peer: LocalPeer, relayer: Relayer) -> RelayerThread { + let config = runloop.config().clone(); + let globals = runloop.get_globals(); + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + let is_mainnet = config.is_mainnet(); + let chain_id = config.burnchain.chain_id; + let is_miner = runloop.is_miner(); + + let sortdb = SortitionDB::open(&burn_db_path, true, runloop.get_burnchain().pox_constants) + .expect("FATAL: failed to open burnchain DB"); + + let chainstate = + open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB"); + + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + is_mainnet, + chain_id, + &stacks_chainstate_path, + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + let keychain = Keychain::default(config.node.seed.clone()); + let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); + + RelayerThread { + config: config.clone(), + sortdb: Some(sortdb), + chainstate: Some(chainstate), + mempool: Some(mempool), + globals, + keychain, + burnchain: runloop.get_burnchain(), + last_vrf_key_burn_height: None, + last_commits: HashMap::new(), + bitcoin_controller, + event_dispatcher: runloop.get_event_dispatcher(), + local_peer, + + last_network_block_height: 0, + last_network_block_height_ts: 0, + last_network_download_passes: 0, + min_network_download_passes: 0, + last_network_inv_passes: 0, + min_network_inv_passes: 0, + + relayer, + + miner_thread: None, + is_miner, + next_initiative: Instant::now() + Duration::from_secs(10), + last_committed_at: None, + } + } + + /// Get an immutible ref to the sortdb + pub fn sortdb_ref(&self) -> &SortitionDB { + self.sortdb + .as_ref() + .expect("FATAL: tried to access sortdb while taken") + } + + /// Get an immutible ref to the chainstate + pub fn chainstate_ref(&self) -> &StacksChainState { + self.chainstate + .as_ref() + .expect("FATAL: tried to access chainstate while it was taken") + } + + /// Fool the borrow checker into letting us do something with the chainstate databases. + /// DOES NOT COMPOSE -- do NOT call this, or self.sortdb_ref(), or self.chainstate_ref(), within + /// `func`. You will get a runtime panic. + pub fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut RelayerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self + .sortdb + .take() + .expect("FATAL: tried to take sortdb while taken"); + let mut chainstate = self + .chainstate + .take() + .expect("FATAL: tried to take chainstate while taken"); + let mut mempool = self + .mempool + .take() + .expect("FATAL: tried to take mempool while taken"); + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + res + } + + /// have we waited for the right conditions under which to start mining a block off of our + /// chain tip? + pub fn has_waited_for_latest_blocks(&self) -> bool { + // a network download pass took place + (self.min_network_download_passes <= self.last_network_download_passes + // a network inv pass took place + && self.min_network_download_passes <= self.last_network_download_passes) + // we waited long enough for a download pass, but timed out waiting + || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() + // we're not supposed to wait at all + || !self.config.miner.wait_for_block_download + } + + /// Return debug string for waiting for latest blocks + pub fn debug_waited_for_latest_blocks(&self) -> String { + format!( + "({} <= {} && {} <= {}) || {} + {} < {} || {}", + self.min_network_download_passes, + self.last_network_download_passes, + self.min_network_inv_passes, + self.last_network_inv_passes, + self.last_network_block_height_ts, + self.config.node.wait_time_for_blocks, + get_epoch_time_ms(), + self.config.miner.wait_for_block_download + ) + } + + /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of + /// * preprocessing and storing new blocks and microblocks + /// * relaying blocks, microblocks, and transacctions + /// * updating unconfirmed state views + pub fn process_network_result(&mut self, mut net_result: NetworkResult) { + debug!( + "Relayer: Handle network result (from {})", + net_result.burn_height + ); + + if self.last_network_block_height != net_result.burn_height { + // burnchain advanced; disable mining until we also do a download pass. + self.last_network_block_height = net_result.burn_height; + self.min_network_download_passes = net_result.num_download_passes + 1; + self.min_network_inv_passes = net_result.num_inv_sync_passes + 1; + self.last_network_block_height_ts = get_epoch_time_ms(); + debug!( + "Relayer: block mining until the next download pass {}", + self.min_network_download_passes + ); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let net_receipts = self.with_chainstate(|relayer_thread, sortdb, chainstate, mempool| { + relayer_thread + .relayer + .process_network_result( + &relayer_thread.local_peer, + &mut net_result, + sortdb, + chainstate, + mempool, + relayer_thread.globals.sync_comms.get_ibd(), + Some(&relayer_thread.globals.coord_comms), + Some(&relayer_thread.event_dispatcher), + ) + .expect("BUG: failure processing network results") + }); + + if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { + // if we received any new block data that could invalidate our view of the chain tip, + // then stop mining until we process it + debug!("Relayer: block mining to process newly-arrived blocks or microblocks"); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let mempool_txs_added = net_receipts.mempool_txs_added.len(); + if mempool_txs_added > 0 { + self.event_dispatcher + .process_new_mempool_txs(net_receipts.mempool_txs_added); + } + + let num_unconfirmed_microblock_tx_receipts = + net_receipts.processed_unconfirmed_state.receipts.len(); + if num_unconfirmed_microblock_tx_receipts > 0 { + if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { + let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + self.event_dispatcher.process_new_microblocks( + canonical_tip, + net_receipts.processed_unconfirmed_state, + ); + } else { + warn!("Relayer: oops, unconfirmed state is uninitialized but there are microblock events"); + } + } + + // Dispatch retrieved attachments, if any. + if net_result.has_attachments() { + self.event_dispatcher + .process_new_attachments(&net_result.attachments); + } + + // synchronize unconfirmed tx index to p2p thread + self.with_chainstate(|relayer_thread, _sortdb, chainstate, _mempool| { + relayer_thread.globals.send_unconfirmed_txs(chainstate); + }); + + // resume mining if we blocked it, and if we've done the requisite download + // passes + self.last_network_download_passes = net_result.num_download_passes; + self.last_network_inv_passes = net_result.num_inv_sync_passes; + if self.has_waited_for_latest_blocks() { + debug!("Relayer: did a download pass, so unblocking mining"); + signal_mining_ready(self.globals.get_miner_status()); + } + } + + /// Given the pointer to a recently processed sortition, see if we won the sortition. + /// + /// Returns `true` if we won this last sortition. + pub fn process_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> MinerDirective { + let sn = + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); + + self.globals.set_last_sortition(sn.clone()); + + let won_sortition = + sn.sortition && self.last_commits.remove(&sn.winning_block_txid).is_some(); + + info!( + "Relayer: Process sortition"; + "sortition_ch" => %consensus_hash, + "burn_hash" => %burn_hash, + "burn_height" => sn.block_height, + "winning_txid" => %sn.winning_block_txid, + "committed_parent" => %committed_index_hash, + "won_sortition?" => won_sortition, + ); + + if won_sortition { + increment_stx_blocks_mined_counter(); + } + + if sn.sortition { + if won_sortition { + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::StopTenure + } + } else { + MinerDirective::ContinueTenure { + new_burn_view: consensus_hash, + } + } + } + + /// Constructs and returns a LeaderKeyRegisterOp out of the provided params + fn make_key_register_op( + vrf_public_key: VRFPublicKey, + consensus_hash: &ConsensusHash, + miner_pkh: &Hash160, + ) -> BlockstackOperationType { + BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { + public_key: vrf_public_key, + memo: miner_pkh.as_bytes().to_vec(), + consensus_hash: consensus_hash.clone(), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + }) + } + + /// Create and broadcast a VRF public key registration transaction. + /// Returns true if we succeed in doing so; false if not. + pub fn rotate_vrf_and_register(&mut self, burn_block: &BlockSnapshot) { + if self.last_vrf_key_burn_height.is_some() { + // already in-flight + return; + } + let cur_epoch = + SortitionDB::get_stacks_epoch(self.sortdb_ref().conn(), burn_block.block_height) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: no epoch defined") + .epoch_id; + let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height); + let burnchain_tip_consensus_hash = &burn_block.consensus_hash; + let miner_pkh = self.keychain.get_nakamoto_pkh(); + + debug!( + "Submitting LeaderKeyRegister"; + "vrf_pk" => vrf_pk.to_hex(), + "burn_block_height" => burn_block.block_height, + "miner_pkh" => miner_pkh.to_hex(), + ); + + let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh); + + let mut op_signer = self.keychain.generate_op_signer(); + if let Some(txid) = + self.bitcoin_controller + .submit_operation(cur_epoch, op, &mut op_signer, 1) + { + // advance key registration state + self.last_vrf_key_burn_height = Some(burn_block.block_height); + self.globals + .set_pending_leader_key_registration(burn_block.block_height, txid); + self.globals.counters.bump_naka_submitted_vrfs(); + } + } + + /// Produce the block-commit for this anchored block, if we can. + /// `target_ch` is the consensus-hash of the Tenure we will build off + /// `target_bh` is the block hash of the Tenure we will build off + /// Returns the (the most recent burn snapshot, the expected epoch, the commit-op) on success + /// Returns None if we fail somehow. + fn make_block_commit( + &mut self, + target_ch: &ConsensusHash, + target_bh: &BlockHeaderHash, + ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { + let chain_state = self + .chainstate + .as_mut() + .expect("FATAL: Failed to load chain state"); + let sort_db = self.sortdb.as_mut().expect("FATAL: Failed to load sortdb"); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; + + let parent_vrf_proof = + NakamotoChainState::get_block_vrf_proof(chain_state.db(), &target_ch) + .map_err(|_e| NakamotoNodeError::ParentNotFound)? + .unwrap_or_else(|| VRFProof::empty()); + + // let's figure out the recipient set! + let recipients = get_next_recipients( + &sort_tip, + chain_state, + sort_db, + &self.burnchain, + &OnChainRewardSetProvider(), + self.config.node.always_use_affirmation_maps, + ) + .map_err(|e| { + error!("Relayer: Failure fetching recipient set: {:?}", e); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let block_header = + NakamotoChainState::get_block_header_by_consensus_hash(chain_state.db(), target_ch) + .map_err(|e| { + error!("Relayer: Failed to get block header for parent tenure: {e:?}"); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("Relayer: Failed to find block header for parent tenure"); + NakamotoNodeError::ParentNotFound + })?; + + let parent_block_id = block_header.index_block_hash(); + if parent_block_id != StacksBlockId::new(target_ch, target_bh) { + error!("Relayer: Found block header for parent tenure, but mismatched block id"; + "expected_block_id" => %StacksBlockId::new(target_ch, target_bh), + "found_block_id" => %parent_block_id); + return Err(NakamotoNodeError::UnexpectedChainState); + } + + let Ok(Some(parent_sortition)) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), target_ch) + else { + error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + return Err(NakamotoNodeError::ParentNotFound); + }; + + let Ok(Some(target_epoch)) = + SortitionDB::get_stacks_epoch(sort_db.conn(), sort_tip.block_height + 1) + else { + error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + }; + + let parent_block_burn_height = parent_sortition.block_height; + let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( + sort_db.conn(), + &parent_sortition.winning_block_txid, + &parent_sortition.sortition_id, + ) else { + error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + }; + + let parent_winning_vtxindex = parent_winning_tx.vtxindex; + + // let burn_fee_cap = self.config.burnchain.burn_fee_cap; + let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); + let sunset_burn = self.burnchain.expected_sunset_burn( + sort_tip.block_height + 1, + burn_fee_cap, + target_epoch.epoch_id, + ); + let rest_commit = burn_fee_cap - sunset_burn; + + let commit_outs = if !self + .burnchain + .pox_constants + .is_after_pox_sunset_end(sort_tip.block_height, target_epoch.epoch_id) + && !self + .burnchain + .is_in_prepare_phase(sort_tip.block_height + 1) + { + RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) + } else { + vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] + }; + + // let's commit, but target the current burnchain tip with our modulus + let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS) + .map_err(|_| { + error!("Relayer: Block mining modulus is not u8"); + NakamotoNodeError::UnexpectedChainState + })?; + let sender = self.keychain.get_burnchain_signer(); + let key = self + .globals + .get_leader_key_registration_state() + .get_active() + .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; + let op = LeaderBlockCommitOp { + sunset_burn, + block_header_hash: BlockHeaderHash(parent_block_id.0), + burn_fee: rest_commit, + input: (Txid([0; 32]), 0), + apparent_sender: sender, + key_block_ptr: u32::try_from(key.block_height) + .expect("FATAL: burn block height exceeded u32"), + key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), + memo: vec![STACKS_EPOCH_3_0_MARKER], + new_seed: VRFSeed::from_proof(&parent_vrf_proof), + parent_block_ptr: u32::try_from(parent_block_burn_height) + .expect("FATAL: burn block height exceeded u32"), + parent_vtxindex: u16::try_from(parent_winning_vtxindex) + .expect("FATAL: vtxindex exceeded u16"), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + burn_parent_modulus, + commit_outs, + }; + + Ok((sort_tip, target_epoch.epoch_id, op)) + } + + /// Create the block miner thread state. + /// Only proceeds if all of the following are true: + /// * the miner is not blocked + /// * last_burn_block corresponds to the canonical sortition DB's chain tip + /// * the time of issuance is sufficiently recent + /// * there are no unprocessed stacks blocks in the staging DB + /// * the relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * a miner thread is not running already + fn create_block_miner( + &mut self, + registered_key: RegisteredKey, + last_burn_block: BlockSnapshot, + parent_tenure_id: StacksBlockId, + ) -> Result { + if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { + debug!( + "Relayer: fault injection skip mining at block height {}", + last_burn_block.block_height + ); + return Err(NakamotoNodeError::FaultInjection); + } + + let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + + if burn_chain_tip != burn_header_hash { + debug!( + "Relayer: Drop stale RunTenure for {}: current sortition is for {}", + &burn_header_hash, &burn_chain_tip + ); + self.globals.counters.bump_missed_tenures(); + return Err(NakamotoNodeError::MissedMiningOpportunity); + } + + debug!( + "Relayer: Spawn tenure thread"; + "height" => last_burn_block.block_height, + "burn_header_hash" => %burn_header_hash, + ); + + let miner_thread_state = + BlockMinerThread::new(self, registered_key, last_burn_block, parent_tenure_id); + Ok(miner_thread_state) + } + + fn start_new_tenure( + &mut self, + parent_tenure_start: StacksBlockId, + burn_tip: BlockSnapshot, + ) -> Result<(), NakamotoNodeError> { + // when starting a new tenure, block the mining thread if its currently running. + // the new mining thread will join it (so that the new mining thread stalls, not the relayer) + let prior_tenure_thread = self.miner_thread.take(); + let vrf_key = self + .globals + .get_leader_key_registration_state() + .get_active() + .ok_or_else(|| { + warn!("Trying to start new tenure, but no VRF key active"); + NakamotoNodeError::NoVRFKeyActive + })?; + let new_miner_state = self.create_block_miner(vrf_key, burn_tip, parent_tenure_start)?; + + let new_miner_handle = std::thread::Builder::new() + .name(format!("miner-{}", self.local_peer.data_url)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) + .map_err(|e| { + error!("Relayer: Failed to start tenure thread: {:?}", &e); + NakamotoNodeError::SpawnError(e) + })?; + + self.miner_thread.replace(new_miner_handle); + + Ok(()) + } + + fn stop_tenure(&mut self) -> Result<(), NakamotoNodeError> { + // when stopping a tenure, block the mining thread if its currently running, then join it. + // do this in a new thread will (so that the new thread stalls, not the relayer) + let Some(prior_tenure_thread) = self.miner_thread.take() else { + return Ok(()); + }; + let globals = self.globals.clone(); + + let stop_handle = std::thread::Builder::new() + .name(format!("tenure-stop-{}", self.local_peer.data_url)) + .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) + .map_err(|e| { + error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + NakamotoNodeError::SpawnError(e) + })?; + + self.miner_thread.replace(stop_handle); + + Ok(()) + } + + fn handle_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> bool { + let miner_instruction = + self.process_sortition(consensus_hash, burn_hash, committed_index_hash); + + match miner_instruction { + MinerDirective::BeginTenure { + parent_tenure_start, + burnchain_tip, + } => { + let _ = self.start_new_tenure(parent_tenure_start, burnchain_tip); + } + MinerDirective::ContinueTenure { new_burn_view: _ } => { + // TODO: in this case, we eventually want to undergo a tenure + // change to switch to the new burn view, but right now, we will + // simply end our current tenure if it exists + let _ = self.stop_tenure(); + } + MinerDirective::StopTenure => { + let _ = self.stop_tenure(); + } + } + + true + } + + fn issue_block_commit( + &mut self, + tenure_start_ch: ConsensusHash, + tenure_start_bh: BlockHeaderHash, + ) -> Result<(), NakamotoNodeError> { + let (last_committed_at, target_epoch_id, commit) = + self.make_block_commit(&tenure_start_ch, &tenure_start_bh)?; + let mut op_signer = self.keychain.generate_op_signer(); + let txid = self + .bitcoin_controller + .submit_operation( + target_epoch_id, + BlockstackOperationType::LeaderBlockCommit(commit), + &mut op_signer, + 1, + ) + .ok_or_else(|| { + warn!("Failed to submit block-commit bitcoin transaction"); + NakamotoNodeError::BurnchainSubmissionFailed + })?; + info!( + "Relayer: Submitted block-commit"; + "parent_consensus_hash" => %tenure_start_ch, + "parent_block_hash" => %tenure_start_bh, + "txid" => %txid, + ); + + self.last_commits.insert(txid, ()); + self.last_committed_at = Some(last_committed_at); + self.globals.counters.bump_naka_submitted_commits(); + + Ok(()) + } + + fn initiative(&mut self) -> Option { + if !self.is_miner { + return None; + } + + // TODO (nakamoto): the miner shouldn't issue either of these directives + // if we're still in IBD! + + // do we need a VRF key registration? + if matches!( + self.globals.get_leader_key_registration_state(), + LeaderKeyRegistrationState::Inactive + ) { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + else { + warn!("Failed to fetch sortition tip while needing to register VRF key"); + return None; + }; + return Some(RelayerDirective::RegisterKey(sort_tip)); + } + + // are we still waiting on a pending registration? + if !matches!( + self.globals.get_leader_key_registration_state(), + LeaderKeyRegistrationState::Active(_) + ) { + return None; + } + + // has there been a new sortition + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + else { + return None; + }; + + let should_commit = if let Some(last_committed_at) = self.last_committed_at.as_ref() { + // if the new sortition tip has a different consesus hash than the last commit, + // issue a new commit + sort_tip.consensus_hash != last_committed_at.consensus_hash + } else { + // if there was no last commit, issue a new commit + true + }; + + let Ok(Some(chain_tip_header)) = NakamotoChainState::get_canonical_block_header( + self.chainstate_ref().db(), + self.sortdb_ref(), + ) else { + info!("No known canonical tip, will issue a genesis block commit"); + return Some(RelayerDirective::NakamotoTenureStartProcessed( + FIRST_BURNCHAIN_CONSENSUS_HASH, + FIRST_STACKS_BLOCK_HASH, + )); + }; + + if should_commit { + // TODO: just use `get_block_header_by_consensus_hash`? + let first_block_hash = if chain_tip_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // if the parent block is a nakamoto block, find the starting block of its tenure + let Ok(Some(first_block)) = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + self.chainstate_ref().db(), + &chain_tip_header.consensus_hash, + ) + else { + warn!("Failure getting the first block of tenure in order to assemble block commit"; + "tenure_consensus_hash" => %chain_tip_header.consensus_hash, + "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + return None; + }; + first_block.anchored_header.block_hash() + } else { + // otherwise the parent block is a epoch2 block, just return its hash directly + chain_tip_header.anchored_header.block_hash() + }; + return Some(RelayerDirective::NakamotoTenureStartProcessed( + chain_tip_header.consensus_hash, + first_block_hash, + )); + } + + return None; + } + + /// Main loop of the relayer. + /// Runs in a separate thread. + /// Continuously receives + pub fn main(mut self, relay_rcv: Receiver) { + debug!("relayer thread ID is {:?}", std::thread::current().id()); + + self.next_initiative = Instant::now() + Duration::from_secs(10); + while self.globals.keep_running() { + let directive = if Instant::now() >= self.next_initiative { + self.next_initiative = Instant::now() + Duration::from_secs(10); + self.initiative() + } else { + None + }; + + let Some(timeout) = self.next_initiative.checked_duration_since(Instant::now()) else { + // next_initiative timeout occurred, so go to next loop iteration. + continue; + }; + + let directive = if let Some(directive) = directive { + directive + } else { + match relay_rcv.recv_timeout(timeout) { + Ok(directive) => directive, + // timed out, so go to next loop iteration + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => break, + } + }; + + if !self.handle_directive(directive) { + break; + } + } + + // kill miner if it's running + signal_mining_blocked(self.globals.get_miner_status()); + + // set termination flag so other threads die + self.globals.signal_stop(); + + debug!("Relayer exit!"); + } + + /// Top-level dispatcher + pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { + let continue_running = match directive { + RelayerDirective::HandleNetResult(net_result) => { + debug!("Relayer: directive Handle network result"); + self.process_network_result(net_result); + debug!("Relayer: directive Handled network result"); + true + } + // RegisterKey directives mean that the relayer should try to register a new VRF key. + // These are triggered by the relayer waking up without an active VRF key. + RelayerDirective::RegisterKey(last_burn_block) => { + if !self.is_miner { + return true; + } + debug!("Relayer: directive Register VRF key"); + self.rotate_vrf_and_register(&last_burn_block); + self.globals.counters.bump_blocks_processed(); + debug!("Relayer: directive Registered VRF key"); + true + } + // ProcessTenure directives correspond to a new sortition occurring. + // relayer should invoke `handle_sortition` to determine if they won the sortition, + // and to start their miner, or stop their miner if an active tenure is now ending + RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { + if !self.is_miner { + return true; + } + info!("Relayer: directive Process tenures"); + let res = self.handle_sortition( + consensus_hash, + burn_hash, + StacksBlockId(block_header_hash.0), + ); + info!("Relayer: directive Processed tenures"); + res + } + // NakamotoTenureStartProcessed directives mean that a new tenure start has been processed + // These are triggered by the relayer waking up, seeing a new consensus hash *and* a new first tenure block + RelayerDirective::NakamotoTenureStartProcessed(consensus_hash, block_hash) => { + if !self.is_miner { + return true; + } + debug!("Relayer: Nakamoto Tenure Start"); + if let Err(e) = self.issue_block_commit(consensus_hash, block_hash) { + warn!("Relayer failed to issue block commit"; "err" => ?e); + } + debug!("Relayer: Nakamoto Tenure Start"); + true + } + RelayerDirective::RunTenure(..) => { + // No Op: the nakamoto node does not use the RunTenure directive to control its + // miner thread. + true + } + RelayerDirective::Exit => false, + }; + + continue_running + } +} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 5ef68a4c28..c23bf1fc19 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -142,9 +142,7 @@ use std::collections::{HashMap, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::default::Default; use std::net::SocketAddr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::{Receiver, SyncSender, TrySendError}; -use std::sync::{Arc, Mutex}; +use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; use std::{mem, thread}; @@ -162,15 +160,13 @@ use stacks::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; -use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, - MinerStatus, StacksMicroblockBuilder, + StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -210,9 +206,10 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::run_loop::neon::{Counters, RunLoop}; +use crate::globals::Globals; +use crate::globals::RelayerDirective; +use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; -use crate::syncctl::PoxSyncWatchdogComms; use crate::ChainTip; pub const RELAYER_MAX_BUFFER: usize = 100; @@ -256,44 +253,6 @@ struct AssembledAnchorBlock { tenure_begin: u128, } -/// Command types for the relayer thread, issued to it by other threads -pub enum RelayerDirective { - /// Handle some new data that arrived on the network (such as blocks, transactions, and - /// microblocks) - HandleNetResult(NetworkResult), - /// Announce a new sortition. Process and broadcast the block if we won. - ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), - /// Try to mine a block - RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) - /// Try to register a VRF public key - RegisterKey(BlockSnapshot), - /// Stop the relayer thread - Exit, -} - -/// Inter-thread communication structure, shared between threads -#[derive(Clone)] -pub struct Globals { - /// Last sortition processed - last_sortition: Arc>>, - /// Status of the miner - miner_status: Arc>, - /// Communication link to the coordinator thread - coord_comms: CoordinatorChannels, - /// Unconfirmed transactions (shared between the relayer and p2p threads) - unconfirmed_txs: Arc>, - /// Writer endpoint to the relayer thread - relay_send: SyncSender, - /// Cointer state in the main thread - counters: Counters, - /// Connection to the PoX sync watchdog - sync_comms: PoxSyncWatchdogComms, - /// Global flag to see if we should keep running - pub should_keep_running: Arc, - /// Status of our VRF key registration state (shared between the main thread and the relayer) - leader_key_registration_state: Arc>, -} - /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { @@ -327,205 +286,6 @@ impl MinerTip { } } -impl Globals { - pub fn new( - coord_comms: CoordinatorChannels, - miner_status: Arc>, - relay_send: SyncSender, - counters: Counters, - sync_comms: PoxSyncWatchdogComms, - should_keep_running: Arc, - ) -> Globals { - Globals { - last_sortition: Arc::new(Mutex::new(None)), - miner_status, - coord_comms, - unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), - relay_send, - counters, - sync_comms, - should_keep_running, - leader_key_registration_state: Arc::new(Mutex::new( - LeaderKeyRegistrationState::Inactive, - )), - } - } - - /// Get the last sortition processed by the relayer thread - pub fn get_last_sortition(&self) -> Option { - match self.last_sortition.lock() { - Ok(sort_opt) => sort_opt.clone(), - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - } - } - - /// Set the last sortition processed - pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { - match self.last_sortition.lock() { - Ok(mut sortition_opt) => { - sortition_opt.replace(block_snapshot); - } - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - }; - } - - /// Get the status of the miner (blocked or ready) - pub fn get_miner_status(&self) -> Arc> { - self.miner_status.clone() - } - - /// Get the main thread's counters - pub fn get_counters(&self) -> Counters { - self.counters.clone() - } - - /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't - /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. - /// Clears the unconfirmed transactions, and replaces them with the chainstate's. - pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { - if let Some(ref unconfirmed) = chainstate.unconfirmed_state { - match self.unconfirmed_txs.lock() { - Ok(mut txs) => { - txs.clear(); - txs.extend(unconfirmed.mined_txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed tx arc mutex is poisoned: {:?}", &e); - panic!(); - } - }; - } - } - - /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. - /// Puts the shared unconfirmed transactions to chainstate. - pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { - if let Some(ref mut unconfirmed) = chainstate.unconfirmed_state { - match self.unconfirmed_txs.lock() { - Ok(txs) => { - unconfirmed.mined_txs.clear(); - unconfirmed.mined_txs.extend(txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed arc mutex is poisoned: {:?}", &e); - panic!(); - } - }; - } - } - - /// Signal system-wide stop - pub fn signal_stop(&self) { - self.should_keep_running.store(false, Ordering::SeqCst); - } - - /// Should we keep running? - pub fn keep_running(&self) -> bool { - self.should_keep_running.load(Ordering::SeqCst) - } - - /// Get the handle to the coordinator - pub fn coord(&self) -> &CoordinatorChannels { - &self.coord_comms - } - - /// Get the current leader key registration state. - /// Called from the runloop thread and relayer thread. - fn get_leader_key_registration_state(&self) -> LeaderKeyRegistrationState { - match self.leader_key_registration_state.lock() { - Ok(state) => (*state).clone(), - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: leader key registration mutex is poisoned: {:?}", &e); - panic!(); - } - } - } - - /// Set the initial leader key registration state. - /// Called from the runloop thread when booting up. - fn set_initial_leader_key_registration_state(&self, new_state: LeaderKeyRegistrationState) { - match self.leader_key_registration_state.lock() { - Ok(mut state) => { - *state = new_state; - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: leader key registration mutex is poisoned: {:?}", &e); - panic!(); - } - } - } - - /// Advance the leader key registration state to pending, given a txid we just sent. - /// Only the relayer thread calls this. - fn set_pending_leader_key_registration(&self, target_block_height: u64, txid: Txid) { - match self.leader_key_registration_state.lock() { - Ok(ref mut leader_key_registration_state) => { - **leader_key_registration_state = - LeaderKeyRegistrationState::Pending(target_block_height, txid); - } - Err(_e) => { - error!("FATAL: failed to lock leader key registration state mutex"); - panic!(); - } - } - } - - /// Advance the leader key registration state to active, given the VRF key registration ops - /// we've discovered in a given snapshot. - /// The runloop thread calls this whenever it processes a sortition. - pub fn try_activate_leader_key_registration( - &self, - burn_block_height: u64, - key_registers: Vec, - ) -> bool { - let mut activated = false; - match self.leader_key_registration_state.lock() { - Ok(ref mut leader_key_registration_state) => { - for op in key_registers.into_iter() { - if let LeaderKeyRegistrationState::Pending(target_block_height, txid) = - **leader_key_registration_state - { - info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid - ); - if txid == op.txid { - **leader_key_registration_state = - LeaderKeyRegistrationState::Active(RegisteredKey { - target_block_height, - vrf_public_key: op.public_key, - block_height: u64::from(op.block_height), - op_vtxindex: u32::from(op.vtxindex), - }); - activated = true; - } else { - debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid - ); - } - } - } - } - Err(_e) => { - error!("FATAL: failed to lock leader key registration state mutex"); - panic!(); - } - } - activated - } -} - /// Node implementation for both miners and followers. /// This struct is used to set up the node proper and launch the p2p thread and relayer thread. /// It is further used by the main thread to communicate with these two threads. @@ -653,7 +413,7 @@ struct ParentStacksBlockInfo { } #[derive(Clone)] -enum LeaderKeyRegistrationState { +pub enum LeaderKeyRegistrationState { /// Not started yet Inactive, /// Waiting for burnchain confirmation @@ -664,6 +424,16 @@ enum LeaderKeyRegistrationState { Active(RegisteredKey), } +impl LeaderKeyRegistrationState { + pub fn get_active(&self) -> Option { + if let Self::Active(registered_key) = self { + Some(registered_key.clone()) + } else { + None + } + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -3407,6 +3177,10 @@ impl RelayerThread { debug!("Relayer: directive Ran tenure"); true } + RelayerDirective::NakamotoTenureStartProcessed(_, _) => { + warn!("Relayer: Nakamoto tenure start notification received while still operating 2.x neon node"); + true + } RelayerDirective::Exit => false, }; if !continue_running { diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index c7aaf87b56..abfbe37c37 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -1,4 +1,5 @@ pub mod helium; +pub mod nakamoto; pub mod neon; use clarity::vm::costs::ExecutionCost; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs new file mode 100644 index 0000000000..f758a65d33 --- /dev/null +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -0,0 +1,1029 @@ +use std::sync::atomic::AtomicBool; +use std::sync::mpsc::sync_channel; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; +use std::{cmp, thread}; + +use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; +use stacks::chainstate::coordinator::{ + static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, + static_get_stacks_tip_affirmation_map, ChainsCoordinator, ChainsCoordinatorConfig, + CoordinatorCommunication, +}; +use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; +use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; +use stacks::core::StacksEpochId; +use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; +use stacks_common::types::PublicKey; +use stacks_common::util::hash::Hash160; +use stacks_common::util::{get_epoch_time_secs, sleep_ms}; +use stx_genesis::GenesisData; + +use super::RunLoopCallbacks; +use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals; +use crate::monitoring::start_serving_monitoring_metrics; +use crate::nakamoto_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon::RunLoopCounter; +use crate::node::{ + get_account_balances, get_account_lockups, get_names, get_namespaces, + use_test_genesis_chainstate, +}; +use crate::run_loop::neon; +use crate::run_loop::neon::Counters; +use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; +use crate::{ + run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, +}; + +pub const STDERR: i32 = 2; + +#[cfg(test)] +const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; + +#[cfg(not(test))] +const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300; + +/// Coordinating a node running in neon mode. +pub struct RunLoop { + config: Config, + pub callbacks: RunLoopCallbacks, + globals: Option, + counters: Counters, + coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, + should_keep_running: Arc, + event_dispatcher: EventDispatcher, + pox_watchdog: Option, // can't be instantiated until .start() is called + is_miner: Option, // not known until .start() is called + burnchain: Option, // not known until .start() is called + pox_watchdog_comms: PoxSyncWatchdogComms, + /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is + /// instantiated (namely, so the test framework can access it). + miner_status: Arc>, +} + +impl RunLoop { + /// Sets up a runloop and node, given a config. + pub fn new(config: Config) -> Self { + let channels = CoordinatorCommunication::instantiate(); + let should_keep_running = Arc::new(AtomicBool::new(true)); + let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone()); + let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready( + config.burnchain.burn_fee_cap, + ))); + + let mut event_dispatcher = EventDispatcher::new(); + for observer in config.events_observers.iter() { + event_dispatcher.register_observer(observer); + } + + Self { + config, + globals: None, + coordinator_channels: Some(channels), + callbacks: RunLoopCallbacks::new(), + counters: Counters::new(), + should_keep_running, + event_dispatcher, + pox_watchdog: None, + is_miner: None, + burnchain: None, + pox_watchdog_comms, + miner_status, + } + } + + pub fn get_globals(&self) -> Globals { + self.globals + .clone() + .expect("FATAL: globals not instantiated") + } + + fn set_globals(&mut self, globals: Globals) { + self.globals = Some(globals); + } + + pub fn get_coordinator_channel(&self) -> Option { + self.coordinator_channels.as_ref().map(|x| x.1.clone()) + } + + pub fn get_blocks_processed_arc(&self) -> RunLoopCounter { + self.counters.blocks_processed.clone() + } + + pub fn submitted_commits(&self) -> RunLoopCounter { + self.counters.naka_submitted_commits.clone() + } + + pub fn submitted_vrfs(&self) -> RunLoopCounter { + self.counters.naka_submitted_vrfs.clone() + } + + pub fn mined_blocks(&self) -> RunLoopCounter { + self.counters.naka_mined_blocks.clone() + } + + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } + + pub fn config(&self) -> &Config { + &self.config + } + + pub fn get_event_dispatcher(&self) -> EventDispatcher { + self.event_dispatcher.clone() + } + + pub fn is_miner(&self) -> bool { + self.is_miner.unwrap_or(false) + } + + pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms { + self.pox_watchdog_comms.clone() + } + + pub fn get_termination_switch(&self) -> Arc { + self.should_keep_running.clone() + } + + pub fn get_burnchain(&self) -> Burnchain { + self.burnchain + .clone() + .expect("FATAL: tried to get runloop burnchain before calling .start()") + } + + pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog { + self.pox_watchdog + .as_mut() + .expect("FATAL: tried to get PoX watchdog before calling .start()") + } + + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + /// Determine if we're the miner. + /// If there's a network error, then assume that we're not a miner. + fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { + if self.config.node.miner { + let keychain = Keychain::default(self.config.node.seed.clone()); + let mut op_signer = keychain.generate_op_signer(); + match burnchain.create_wallet_if_dne() { + Err(e) => warn!("Error when creating wallet: {:?}", e), + _ => {} + } + let mut btc_addrs = vec![( + StacksEpochId::Epoch2_05, + // legacy + BitcoinAddress::from_bytes_legacy( + self.config.burnchain.get_bitcoin_network().1, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0, + ) + .expect("FATAL: failed to construct legacy bitcoin address"), + )]; + if self.config.miner.segwit { + btc_addrs.push(( + StacksEpochId::Epoch21, + // segwit p2wpkh + BitcoinAddress::from_bytes_segwit_p2wpkh( + self.config.burnchain.get_bitcoin_network().1, + &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0, + ) + .expect("FATAL: failed to construct segwit p2wpkh address"), + )); + } + + for (epoch_id, btc_addr) in btc_addrs.into_iter() { + info!("Miner node: checking UTXOs at address: {}", &btc_addr); + let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.node.mock_mining { + info!("No UTXOs found, but configured to mock mine"); + return true; + } else { + return false; + } + } else { + info!("Will run as a Follower node"); + false + } + } + + /// Boot up the stacks chainstate. + /// Instantiate the chainstate and push out the boot receipts to observers + /// This is only public so we can test it. + pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis balances + let initial_balances = self + .config + .initial_balances + .iter() + .map(|e| (e.address.clone(), e.amount)) + .collect(); + + // TODO (nakamoto-neon): check if we're trying to setup a self-signing network + // and set the right genesis data + + // instantiate chainstate + let mut boot_data = ChainStateBootData { + initial_balances, + post_flight_callback: None, + first_burnchain_block_hash: burnchain_config.first_block_hash, + first_burnchain_block_height: burnchain_config.first_block_height as u32, + first_burnchain_block_timestamp: burnchain_config.first_block_timestamp, + pox_constants: burnchain_config.pox_constants.clone(), + get_bulk_initial_lockups: Some(Box::new(move || { + get_account_lockups(use_test_genesis_data) + })), + get_bulk_initial_balances: Some(Box::new(move || { + get_account_balances(use_test_genesis_data) + })), + get_bulk_initial_namespaces: Some(Box::new(move || { + get_namespaces(use_test_genesis_data) + })), + get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))), + }; + + let (chain_state_db, receipts) = StacksChainState::open_and_exec( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &self.config.get_chainstate_path_str(), + Some(&mut boot_data), + Some(self.config.node.get_marf_opts()), + ) + .unwrap(); + run_loop::announce_boot_receipts( + &mut self.event_dispatcher, + &chain_state_db, + &burnchain_config.pox_constants, + &receipts, + ); + chain_state_db + } + + /// Instantiate the Stacks chain state and start the chains coordinator thread. + /// Returns the coordinator thread handle, and the receiving end of the coordinator's atlas + /// attachment channel. + fn spawn_chains_coordinator( + &mut self, + burnchain_config: &Burnchain, + coordinator_receivers: CoordinatorReceivers, + miner_status: Arc>, + ) -> JoinHandle<()> { + let use_test_genesis_data = use_test_genesis_chainstate(&self.config); + + // load up genesis Atlas attachments + let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); + let genesis_attachments = GenesisData::new(use_test_genesis_data) + .read_name_zonefiles() + .into_iter() + .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) + .collect(); + atlas_config.genesis_attachments = Some(genesis_attachments); + + let chain_state_db = self.boot_chainstate(burnchain_config); + + // NOTE: re-instantiate AtlasConfig so we don't have to keep the genesis attachments around + let moved_atlas_config = self.config.atlas.clone(); + let moved_config = self.config.clone(); + let moved_burnchain_config = burnchain_config.clone(); + let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let atlas_db = AtlasDB::connect( + moved_atlas_config.clone(), + &self.config.get_atlas_db_file_path(), + true, + ) + .expect("Failed to connect Atlas DB during startup"); + let coordinator_indexer = + make_bitcoin_indexer(&self.config, Some(self.should_keep_running.clone())); + + let coordinator_thread_handle = thread::Builder::new() + .name(format!( + "chains-coordinator-{}", + &moved_config.node.rpc_bind + )) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + debug!( + "chains-coordinator thread ID is {:?}", + thread::current().id() + ); + let mut cost_estimator = moved_config.make_cost_estimator(); + let mut fee_estimator = moved_config.make_fee_estimator(); + + let coord_config = ChainsCoordinatorConfig { + always_use_affirmation_maps: moved_config.node.always_use_affirmation_maps, + require_affirmed_anchor_blocks: moved_config + .node + .require_affirmed_anchor_blocks, + ..ChainsCoordinatorConfig::new() + }; + ChainsCoordinator::run( + coord_config, + chain_state_db, + moved_burnchain_config, + &mut coordinator_dispatcher, + coordinator_receivers, + moved_atlas_config, + cost_estimator.as_deref_mut(), + fee_estimator.as_deref_mut(), + miner_status, + coordinator_indexer, + atlas_db, + ); + }) + .expect("FATAL: failed to start chains coordinator thread"); + + coordinator_thread_handle + } + + /// Start Prometheus logging + fn start_prometheus(&mut self) { + let prometheus_bind = self.config.node.prometheus_bind.clone(); + if let Some(prometheus_bind) = prometheus_bind { + thread::Builder::new() + .name("prometheus".to_string()) + .spawn(move || { + debug!("prometheus thread ID is {:?}", thread::current().id()); + start_serving_monitoring_metrics(prometheus_bind); + }) + .unwrap(); + } + } + + /// Get the sortition DB's highest block height, aligned to a reward cycle boundary, and the + /// highest sortition. + /// Returns (height at rc start, sortition) + fn get_reward_cycle_sortition_db_height( + sortdb: &SortitionDB, + burnchain_config: &Burnchain, + ) -> (u64, BlockSnapshot) { + let (stacks_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .expect("BUG: failed to load canonical stacks chain tip hash"); + + let sn = match SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_ch) + .expect("BUG: failed to query sortition DB") + { + Some(sn) => sn, + None => { + debug!("No canonical stacks chain tip hash present"); + let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + .expect("BUG: failed to get first-ever block snapshot"); + sn + } + }; + + ( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(sn.block_height) + .expect("BUG: snapshot preceeds first reward cycle"), + ), + sn, + ) + } + + /// Wake up and drive stacks block processing if there's been a PoX reorg. + /// Be careful not to saturate calls to announce new stacks blocks, because that will disable + /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making + /// progress). + fn drive_pox_reorg_stacks_block_processing( + globals: &Globals, + config: &Config, + burnchain: &Burnchain, + sortdb: &SortitionDB, + last_stacks_pox_reorg_recover_time: &mut u128, + ) { + let delay = cmp::max( + config.node.chain_liveness_poll_time_secs, + cmp::max( + config.miner.first_attempt_time_ms, + config.miner.subsequent_attempt_time_ms, + ) / 1000, + ); + + if *last_stacks_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { + // too soon + return; + } + + // compare stacks and heaviest AMs + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); + + let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( + &burnchain, + &indexer, + &burnchain_db, + sortdb, + &sn.sortition_id, + ) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find heaviest affirmation map: {:?}", &e); + return; + } + }; + + let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let canonical_burnchain_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: could not read burnchain DB"); + + let sortition_tip_affirmation_map = + match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find sortition affirmation map: {:?}", &e); + return; + } + }; + + let stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( + &burnchain_db, + sortdb, + &sn.sortition_id, + &sn.canonical_stacks_tip_consensus_hash, + &sn.canonical_stacks_tip_hash, + ) + .expect("FATAL: could not query stacks DB"); + + if stacks_tip_affirmation_map.len() < heaviest_affirmation_map.len() + || stacks_tip_affirmation_map + .find_divergence(&heaviest_affirmation_map) + .is_some() + { + // the sortition affirmation map might also be inconsistent, so we'll need to fix that + // (i.e. the underlying sortitions) before we can fix the stacks fork + if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() + || sortition_tip_affirmation_map + .find_divergence(&heaviest_affirmation_map) + .is_some() + { + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); + globals.coord().announce_new_burn_block(); + } else if highest_sn.block_height == sn.block_height + && sn.block_height == canonical_burnchain_tip.block_height + { + // need to force an affirmation reorg because there will be no more burn block + // announcements. + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); + globals.coord().announce_new_burn_block(); + } + + debug!( + "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", + &stacks_tip_affirmation_map, &heaviest_affirmation_map + ); + globals.coord().announce_new_stacks_block(); + } else { + debug!( + "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", + &stacks_tip_affirmation_map, &heaviest_affirmation_map + ); + + // announce a new stacks block to force the chains coordinator + // to wake up anyways. this isn't free, so we have to make sure + // the chain-liveness thread doesn't wake up too often + globals.coord().announce_new_stacks_block(); + } + + *last_stacks_pox_reorg_recover_time = get_epoch_time_secs().into(); + } + + /// Wake up and drive sortition processing if there's been a PoX reorg. + /// Be careful not to saturate calls to announce new burn blocks, because that will disable + /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making + /// progress). + /// + /// only call if no in ibd + fn drive_pox_reorg_burn_block_processing( + globals: &Globals, + config: &Config, + burnchain: &Burnchain, + sortdb: &SortitionDB, + chain_state_db: &StacksChainState, + last_burn_pox_reorg_recover_time: &mut u128, + last_announce_time: &mut u128, + ) { + let delay = cmp::max( + config.node.chain_liveness_poll_time_secs, + cmp::max( + config.miner.first_attempt_time_ms, + config.miner.subsequent_attempt_time_ms, + ) / 1000, + ); + + if *last_burn_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { + // too soon + return; + } + + // compare sortition and heaviest AMs + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + + let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let canonical_burnchain_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: could not read burnchain DB"); + + if canonical_burnchain_tip.block_height > highest_sn.block_height { + // still processing sortitions + test_debug!( + "Drive burn block processing: still processing sortitions ({} > {})", + canonical_burnchain_tip.block_height, + highest_sn.block_height + ); + return; + } + + // NOTE: this could be lower than the highest_sn + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: could not read sortition DB"); + + let sortition_tip_affirmation_map = + match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find sortition affirmation map: {:?}", &e); + return; + } + }; + + let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); + + let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( + &burnchain, + &indexer, + &burnchain_db, + sortdb, + &sn.sortition_id, + ) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find heaviest affirmation map: {:?}", &e); + return; + } + }; + + let canonical_affirmation_map = match static_get_canonical_affirmation_map( + &burnchain, + &indexer, + &burnchain_db, + sortdb, + &chain_state_db, + &sn.sortition_id, + ) { + Ok(am) => am, + Err(e) => { + warn!("Failed to find canonical affirmation map: {:?}", &e); + return; + } + }; + + if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() + || sortition_tip_affirmation_map + .find_divergence(&heaviest_affirmation_map) + .is_some() + || sn.block_height < highest_sn.block_height + { + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = heaviest_affirmation_map.len() + && sortition_tip_affirmation_map.len() <= canonical_affirmation_map.len() + { + if let Some(divergence_rc) = + canonical_affirmation_map.find_divergence(&sortition_tip_affirmation_map) + { + if divergence_rc + 1 >= (heaviest_affirmation_map.len() as u64) { + // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history + debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); + globals.coord().announce_new_burn_block(); + globals.coord().announce_new_stacks_block(); + *last_announce_time = get_epoch_time_secs().into(); + } + } + } else { + debug!( + "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} JoinHandle<()> { + let config = self.config.clone(); + let burnchain = self.get_burnchain(); + let sortdb = burnchain + .open_sortition_db(true) + .expect("FATAL: could not open sortition DB"); + + let (chain_state_db, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + Some(config.node.get_marf_opts()), + ) + .unwrap(); + + let liveness_thread_handle = thread::Builder::new() + .name(format!("chain-liveness-{}", config.node.rpc_bind)) + .stack_size(BLOCK_PROCESSOR_STACK_SIZE) + .spawn(move || { + Self::drive_chain_liveness(globals, config, burnchain, sortdb, chain_state_db) + }) + .expect("FATAL: failed to spawn chain liveness thread"); + + liveness_thread_handle + } + + /// Starts the node runloop. + /// + /// This function will block by looping infinitely. + /// It will start the burnchain (separate thread), set-up a channel in + /// charge of coordinating the new blocks coming from the burnchain and + /// the nodes, taking turns on tenures. + pub fn start(&mut self, burnchain_opt: Option, mut mine_start: u64) { + let (coordinator_receivers, coordinator_senders) = self + .coordinator_channels + .take() + .expect("Run loop already started, can only start once after initialization."); + + neon::RunLoop::setup_termination_handler(self.should_keep_running.clone()); + let mut burnchain = neon::RunLoop::instantiate_burnchain_state( + &self.config, + self.should_keep_running.clone(), + burnchain_opt, + coordinator_senders.clone(), + ); + + let burnchain_config = burnchain.get_burnchain(); + self.burnchain = Some(burnchain_config.clone()); + + // can we mine? + let is_miner = self.check_is_miner(&mut burnchain); + self.is_miner = Some(is_miner); + + // relayer linkup + let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + + // set up globals so other subsystems can instantiate off of the runloop state. + let globals = Globals::new( + coordinator_senders, + self.get_miner_status(), + relay_send, + self.counters.clone(), + self.pox_watchdog_comms.clone(), + self.should_keep_running.clone(), + ); + self.set_globals(globals.clone()); + + // have headers; boot up the chains coordinator and instantiate the chain state + let coordinator_thread_handle = self.spawn_chains_coordinator( + &burnchain_config, + coordinator_receivers, + globals.get_miner_status(), + ); + self.start_prometheus(); + + // We announce a new burn block so that the chains coordinator + // can resume prior work and handle eventual unprocessed sortitions + // stored during a previous session. + globals.coord().announce_new_burn_block(); + + // Make sure at least one sortition has happened, and make sure it's globally available + let sortdb = burnchain.sortdb_mut(); + let (rc_aligned_height, sn) = + RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + + let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { + // need at least one sortition to happen. + burnchain + .wait_for_sortitions(globals.coord().clone(), sn.block_height + 1) + .expect("Unable to get burnchain tip") + .block_snapshot + } else { + sn + }; + + globals.set_last_sortition(burnchain_tip_snapshot.clone()); + + // Boot up the p2p network and relayer, and figure out how many sortitions we have so far + // (it could be non-zero if the node is resuming from chainstate) + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); + let liveness_thread = self.spawn_chain_liveness_thread(globals.clone()); + + // Wait for all pending sortitions to process + let burnchain_db = burnchain_config + .open_burnchain_db(false) + .expect("FATAL: failed to open burnchain DB"); + let burnchain_db_tip = burnchain_db + .get_canonical_chain_tip() + .expect("FATAL: failed to query burnchain DB"); + let mut burnchain_tip = burnchain + .wait_for_sortitions(globals.coord().clone(), burnchain_db_tip.block_height) + .expect("Unable to get burnchain tip"); + + // Start the runloop + debug!("Runloop: Begin run loop"); + self.counters.bump_blocks_processed(); + + let mut sortition_db_height = rc_aligned_height; + let mut burnchain_height = sortition_db_height; + let mut num_sortitions_in_last_cycle; + + // prepare to fetch the first reward cycle! + let mut target_burnchain_block_height = cmp::min( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(burnchain_height) + .expect("BUG: block height is not in a reward cycle") + + 1, + ), + burnchain.get_headers_height() - 1, + ); + + debug!( + "Runloop: Begin main runloop starting a burnchain block {}", + sortition_db_height + ); + + let mut last_tenure_sortition_height = 0; + + loop { + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + node.join(); + liveness_thread.join().unwrap(); + + info!("Exiting stacks-node"); + break; + } + + let remote_chain_height = burnchain.get_headers_height() - 1; + + // wait for the p2p state-machine to do at least one pass + debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); + + let ibd = false; + + // calculate burnchain sync percentage + let percent: f64 = if remote_chain_height > 0 { + burnchain_tip.block_snapshot.block_height as f64 / remote_chain_height as f64 + } else { + 0.0 + }; + + // Download each burnchain block and process their sortitions. This, in turn, will + // cause the node's p2p and relayer threads to go fetch and download Stacks blocks and + // process them. This loop runs for one reward cycle, so that the next pass of the + // runloop will cause the PoX sync watchdog to wait until it believes that the node has + // obtained all the Stacks blocks it can. + debug!( + "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + burnchain_config + .block_height_to_reward_cycle(target_burnchain_block_height) + .expect("FATAL: target burnchain block height does not have a reward cycle"), + target_burnchain_block_height; + "total_burn_sync_percent" => %percent, + "local_burn_height" => burnchain_tip.block_snapshot.block_height, + "remote_tip_height" => remote_chain_height + ); + + loop { + if !globals.keep_running() { + break; + } + + let (next_burnchain_tip, tip_burnchain_height) = + match burnchain.sync(Some(target_burnchain_block_height)) { + Ok(x) => x, + Err(e) => { + warn!("Runloop: Burnchain controller stopped: {}", e); + continue; + } + }; + + // *now* we know the burnchain height + burnchain_tip = next_burnchain_tip; + burnchain_height = tip_burnchain_height; + + let sortition_tip = &burnchain_tip.block_snapshot.sortition_id; + let next_sortition_height = burnchain_tip.block_snapshot.block_height; + + if next_sortition_height != last_tenure_sortition_height { + info!( + "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", + burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + ); + } + + if next_sortition_height > sortition_db_height { + debug!( + "Runloop: New burnchain block height {} > {}", + next_sortition_height, sortition_db_height + ); + + let mut sort_count = 0; + + debug!("Runloop: block mining until we process all sortitions"); + signal_mining_blocked(globals.get_miner_status()); + + // first, let's process all blocks in (sortition_db_height, next_sortition_height] + for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) { + // stop mining so we can advance the sortition DB and so our + // ProcessTenure() directive (sent by relayer_sortition_notify() below) + // will be unblocked. + + let block = { + let ic = burnchain.sortdb_ref().index_conn(); + SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip) + .unwrap() + .expect( + "Failed to find block in fork processed by burnchain indexer", + ) + }; + if block.sortition { + sort_count += 1; + } + + let sortition_id = &block.sortition_id; + + // Have the node process the new block, that can include, or not, a sortition. + node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); + + // Now, tell the relayer to check if it won a sortition during this block, + // and, if so, to process and advertize the block. This is basically a + // no-op during boot-up. + // + // _this will block if the relayer's buffer is full_ + if !node.relayer_burnchain_notify() { + // relayer hung up, exit. + error!("Runloop: Block relayer and miner hung up, exiting."); + return; + } + } + + debug!("Runloop: enable miner after processing sortitions"); + signal_mining_ready(globals.get_miner_status()); + + num_sortitions_in_last_cycle = sort_count; + debug!( + "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", + next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + ); + + sortition_db_height = next_sortition_height; + } else if ibd { + // drive block processing after we reach the burnchain tip. + // we may have downloaded all the blocks already, + // so we can't rely on the relayer alone to + // drive it. + globals.coord().announce_new_stacks_block(); + } + + if burnchain_height >= target_burnchain_block_height + || burnchain_height >= remote_chain_height + { + break; + } + } + + // advance one reward cycle at a time. + // If we're still downloading, then this is simply target_burnchain_block_height + reward_cycle_len. + // Otherwise, this is burnchain_tip + reward_cycle_len + let next_target_burnchain_block_height = cmp::min( + burnchain_config.reward_cycle_to_block_height( + burnchain_config + .block_height_to_reward_cycle(target_burnchain_block_height) + .expect("FATAL: burnchain height before system start") + + 1, + ), + remote_chain_height, + ); + + debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + target_burnchain_block_height = next_target_burnchain_block_height; + + if sortition_db_height >= burnchain_height && !ibd { + let canonical_stacks_tip_height = + SortitionDB::get_canonical_burn_chain_tip(burnchain.sortdb_ref().conn()) + .map(|snapshot| snapshot.canonical_stacks_tip_height) + .unwrap_or(0); + if canonical_stacks_tip_height < mine_start { + info!( + "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", + canonical_stacks_tip_height, + mine_start + ); + } else { + // once we've synced to the chain tip once, don't apply this check again. + // this prevents a possible corner case in the event of a PoX fork. + mine_start = 0; + + // at tip, and not downloading. proceed to mine. + if last_tenure_sortition_height != sortition_db_height { + info!( + "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", + sortition_db_height + ); + last_tenure_sortition_height = sortition_db_height; + } + } + } + } + } +} diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c9368e9e3a..c10c9b88c3 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -31,8 +31,9 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; +use crate::globals::Globals; use crate::monitoring::start_serving_monitoring_metrics; -use crate::neon_node::{Globals, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, @@ -63,6 +64,10 @@ pub struct Counters { pub missed_tenures: RunLoopCounter, pub missed_microblock_tenures: RunLoopCounter, pub cancelled_commits: RunLoopCounter, + + pub naka_submitted_vrfs: RunLoopCounter, + pub naka_submitted_commits: RunLoopCounter, + pub naka_mined_blocks: RunLoopCounter, } impl Counters { @@ -74,6 +79,9 @@ impl Counters { missed_tenures: RunLoopCounter::new(AtomicU64::new(0)), missed_microblock_tenures: RunLoopCounter::new(AtomicU64::new(0)), cancelled_commits: RunLoopCounter::new(AtomicU64::new(0)), + naka_submitted_vrfs: RunLoopCounter::new(AtomicU64::new(0)), + naka_submitted_commits: RunLoopCounter::new(AtomicU64::new(0)), + naka_mined_blocks: RunLoopCounter::new(AtomicU64::new(0)), } } @@ -85,6 +93,9 @@ impl Counters { missed_tenures: (), missed_microblock_tenures: (), cancelled_commits: (), + naka_submitted_vrfs: (), + naka_submitted_commits: (), + naka_mined_blocks: (), } } @@ -124,6 +135,18 @@ impl Counters { Counters::inc(&self.cancelled_commits); } + pub fn bump_naka_submitted_vrfs(&self) { + Counters::inc(&self.naka_submitted_vrfs); + } + + pub fn bump_naka_submitted_commits(&self) { + Counters::inc(&self.naka_submitted_commits); + } + + pub fn bump_naka_mined_blocks(&self) { + Counters::inc(&self.naka_mined_blocks); + } + pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } @@ -251,7 +274,7 @@ impl RunLoop { } pub fn get_termination_switch(&self) -> Arc { - self.get_globals().should_keep_running.clone() + self.should_keep_running.clone() } pub fn get_burnchain(&self) -> Burnchain { @@ -272,8 +295,7 @@ impl RunLoop { /// Set up termination handler. Have a signal set the `should_keep_running` atomic bool to /// false. Panics of called more than once. - fn setup_termination_handler(&self) { - let keep_running_writer = self.should_keep_running.clone(); + pub fn setup_termination_handler(keep_running_writer: Arc) { let install = termination::set_handler(move |sig_id| match sig_id { SignalId::Bus => { let msg = "Caught SIGBUS; crashing immediately and dumping core\n"; @@ -355,17 +377,18 @@ impl RunLoop { /// Instantiate the burnchain client and databases. /// Fetches headers and instantiates the burnchain. /// Panics on failure. - fn instantiate_burnchain_state( - &mut self, + pub fn instantiate_burnchain_state( + config: &Config, + should_keep_running: Arc, burnchain_opt: Option, coordinator_senders: CoordinatorChannels, ) -> BitcoinRegtestController { // Initialize and start the burnchain. let mut burnchain_controller = BitcoinRegtestController::with_burnchain( - self.config.clone(), + config.clone(), Some(coordinator_senders), burnchain_opt, - Some(self.should_keep_running.clone()), + Some(should_keep_running.clone()), ); let burnchain = burnchain_controller.get_burnchain(); @@ -377,9 +400,9 @@ impl RunLoop { // Upgrade chainstate databases if they exist already match migrate_chainstate_dbs( &epochs, - &self.config.get_burn_db_file_path(), - &self.config.get_chainstate_path_str(), - Some(self.config.node.get_marf_opts()), + &config.get_burn_db_file_path(), + &config.get_chainstate_path_str(), + Some(config.node.get_marf_opts()), ) { Ok(_) => {} Err(coord_error::DBError(db_error::TooOldForEpoch)) => { @@ -951,9 +974,13 @@ impl RunLoop { .take() .expect("Run loop already started, can only start once after initialization."); - self.setup_termination_handler(); - let mut burnchain = - self.instantiate_burnchain_state(burnchain_opt, coordinator_senders.clone()); + Self::setup_termination_handler(self.should_keep_running.clone()); + let mut burnchain = Self::instantiate_burnchain_state( + &self.config, + self.should_keep_running.clone(), + burnchain_opt, + coordinator_senders.clone(), + ); let burnchain_config = burnchain.get_burnchain(); self.burnchain = Some(burnchain_config.clone()); diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index fdb09dd22c..454e92b50b 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -16,6 +16,7 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; +#[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), } diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index faea7f99d9..8ac9fcff53 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -43,6 +43,7 @@ mod epoch_23; mod epoch_24; mod integrations; mod mempool; +mod nakamoto_integrations; pub mod neon_integrations; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs new file mode 100644 index 0000000000..efa36ea1e5 --- /dev/null +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -0,0 +1,322 @@ +use clarity::vm::types::PrincipalData; +use stacks::burnchains::MagicBytes; +use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::core::{ + StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, +}; +use stacks_common::address::AddressHashMode; +use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; +use std::sync::atomic::Ordering; +use std::time::{Duration, Instant}; +use std::{env, thread}; + +use super::bitcoin_regtest::BitcoinCoreController; +use crate::mockamoto::signer::SelfSigner; +use crate::run_loop::nakamoto; +use crate::tests::neon_integrations::{ + next_block_and_wait, run_until_burnchain_height, submit_tx, wait_for_runloop, +}; +use crate::{ + neon, tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, +}; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: BLOCK_LIMIT_MAINNET_10.clone(), + network_epoch: PEER_VERSION_EPOCH_1_0 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_0 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: 2, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_05 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 2, + end_height: 3, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_1 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 3, + end_height: 4, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 4, + end_height: 5, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 5, + end_height: 6, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 6, + end_height: 220, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: 220, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, + ]; +} + +/// Return a working nakamoto-neon config and the miner's bitcoin address to fund +pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress) { + let mut conf = super::new_test_conf(); + conf.burnchain.mode = "nakamoto-neon".into(); + + // tests can override this, but these tests run with epoch 2.05 by default + conf.burnchain.epochs = Some(NAKAMOTO_INTEGRATION_EPOCHS.to_vec()); + + if let Some(seed) = seed { + conf.node.seed = seed.to_vec(); + } + + // instantiate the keychain so we can fund the bitcoin op signer + let keychain = Keychain::default(conf.node.seed.clone()); + + let mining_key = Secp256k1PrivateKey::from_seed(&[1]); + conf.miner.mining_key = Some(mining_key); + conf.miner.self_signing_key = Some(SelfSigner::single_signer()); + + conf.node.miner = true; + conf.node.wait_time_for_microblocks = 500; + conf.burnchain.burn_fee_cap = 20000; + + conf.burnchain.username = Some("neon-tester".into()); + conf.burnchain.password = Some("neon-tester-pass".into()); + conf.burnchain.peer_host = "127.0.0.1".into(); + conf.burnchain.local_mining_public_key = + Some(keychain.generate_op_signer().get_public_key().to_hex()); + conf.burnchain.commit_anchor_block_within = 0; + + // test to make sure config file parsing is correct + let mut cfile = ConfigFile::xenon(); + cfile.node.as_mut().map(|node| node.bootstrap_node.take()); + + if let Some(burnchain) = cfile.burnchain.as_mut() { + burnchain.peer_host = Some("127.0.0.1".to_string()); + } + + conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.poll_time_secs = 1; + conf.node.pox_sync_sample_secs = 0; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + // if there's just one node, then this must be true for tests to pass + conf.miner.wait_for_block_download = false; + + conf.node.mine_microblocks = false; + conf.miner.microblock_attempt_time_ms = 10; + conf.node.microblock_frequency = 0; + conf.node.wait_time_for_blocks = 200; + + let miner_account = keychain.origin_address(conf.is_mainnet()).unwrap(); + + conf.burnchain.pox_prepare_length = Some(5); + conf.burnchain.pox_reward_length = Some(20); + + (conf, miner_account) +} + +pub fn next_block_and( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + mut check: F, +) -> Result<(), String> +where + F: FnMut() -> Result, +{ + eprintln!("Issuing bitcoin block"); + btc_controller.build_next_block(1); + let start = Instant::now(); + while !check()? { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for block to process, trying to continue test"); + return Err("Timed out".into()); + } + thread::sleep(Duration::from_millis(100)); + } + Ok(()) +} + +#[test] +#[ignore] +fn simple_neon_integration() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let stacker_sk = Secp256k1PrivateKey::new(); + let stacker_address = tests::to_addr(&stacker_sk); + naka_conf.add_initial_balance( + PrincipalData::from(stacker_address.clone()).to_string(), + 100_000_000_000_000, + ); + + let epoch_2_conf = naka_conf.clone(); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + btc_regtest_controller.bootstrap_chain(201); + + info!("Chain bootstrapped to bitcoin block 201, starting a epoch-2x miner"); + + let mut run_loop = neon::RunLoop::new(epoch_2_conf.clone()); + + let epoch_2_stopper = run_loop.get_termination_switch(); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let epoch_2_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // stack enough to activate pox-4 + let pox_addr_tuple = clarity::vm::tests::execute(&format!( + "{{ hashbytes: 0x{}, version: 0x{:02x} }}", + to_hex(&[0; 20]), + AddressHashMode::SerializeP2PKH as u8, + )); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(99_000_000_000_000), + pox_addr_tuple, + clarity::vm::Value::UInt(205), + clarity::vm::Value::UInt(12), + ], + ); + + submit_tx(&http_origin, &stacking_tx); + + run_until_burnchain_height( + &mut btc_regtest_controller, + &blocks_processed, + 219, + &epoch_2_conf, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + epoch_2_stopper.store(false, Ordering::SeqCst); + + epoch_2_thread.join().unwrap(); + + let mut run_loop = nakamoto::RunLoop::new(naka_conf.clone()); + let epoch_3_stopper = run_loop.get_termination_switch(); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let vrfs_submitted = run_loop.submitted_vrfs(); + let commits_submitted = run_loop.submitted_commits(); + let blocks_mined = run_loop.submitted_commits(); + let coord_channel = run_loop.get_coordinator_channel().unwrap(); + + let epoch_3_thread = thread::spawn(move || run_loop.start(None, 0)); + + wait_for_runloop(&blocks_processed); + info!("Nakamoto miner started..."); + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let blocks_processed_before_mining = coord_channel.get_stacks_blocks_processed(); + + // this block should perform the sortition, wait until a block is mined + next_block_and(&mut btc_regtest_controller, 60, || { + let mined_count = blocks_mined.load(Ordering::SeqCst); + Ok(mined_count >= 1) + }) + .unwrap(); + + // wait until the coordinator has processed the new block(s) + while coord_channel.get_stacks_blocks_processed() <= blocks_processed_before_mining { + thread::sleep(Duration::from_secs(1)); + } + + // load the chain tip, and assert that it is a nakamoto block + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + + coord_channel.stop_chains_coordinator(); + + epoch_3_stopper.store(false, Ordering::SeqCst); + epoch_3_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b1e68d26d7..455e414208 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -483,7 +483,7 @@ pub mod test_observer { } } -const PANIC_TIMEOUT_SECS: u64 = 600; +const PANIC_TIMEOUT_SECS: u64 = 30; /// Returns `false` on a timeout, true otherwise. pub fn next_block_and_wait( @@ -556,7 +556,7 @@ pub fn next_block_and_iterate( /// reaches *exactly* `target_height`. /// /// Returns `false` if `next_block_and_wait` times out. -fn run_until_burnchain_height( +pub fn run_until_burnchain_height( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, target_height: u64, From c0ab89f312d8aa110ef7cdb29393603f380feeb2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 7 Dec 2023 14:13:34 -0600 Subject: [PATCH 02/16] expand first nakamoto-neon test, update block commit logic to issue commits at tenure_id changes, cargo fmt-stacks --- .../burnchains/bitcoin_regtest_controller.rs | 10 +- testnet/stacks-node/src/globals.rs | 13 +- testnet/stacks-node/src/nakamoto_node.rs | 14 +- .../stacks-node/src/nakamoto_node/miner.rs | 26 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 19 +- .../stacks-node/src/nakamoto_node/relayer.rs | 95 +++---- testnet/stacks-node/src/neon_node.rs | 3 +- .../src/tests/nakamoto_integrations.rs | 255 ++++++++++++++---- 8 files changed, 285 insertions(+), 150 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index ad83dd6f57..0ed1bb0e03 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -8,7 +8,8 @@ use async_h1::client; use async_std::io::ReadExt; use async_std::net::TcpStream; use base64::encode; - +#[cfg(test)] +use clarity::vm::types::PrincipalData; use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; @@ -51,15 +52,12 @@ use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_ use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +#[cfg(test)] +use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -#[cfg(test)] -use clarity::vm::types::PrincipalData; -#[cfg(test)] -use stacks_common::types::chainstate::StacksAddress; - use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index acace012f8..7e9e47a8fe 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -1,8 +1,6 @@ -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::SyncSender; -use std::sync::Arc; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use stacks::burnchains::Txid; use stacks::chainstate::burn::operations::LeaderKeyRegisterOp; @@ -12,16 +10,13 @@ use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; use stacks::net::NetworkResult; -use stacks_common::types::chainstate::BlockHeaderHash; -use stacks_common::types::chainstate::BurnchainHeaderHash; -use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; use crate::neon::Counters; +use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; use crate::syncctl::PoxSyncWatchdogComms; -use crate::neon_node::LeaderKeyRegistrationState; - /// Command types for the relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 1c71b09045..de0d04cfb5 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -20,13 +20,6 @@ use std::sync::mpsc::Receiver; use std::thread; use std::thread::JoinHandle; -use super::{Config, EventDispatcher, Keychain}; -use crate::burnchains::bitcoin_regtest_controller::addr2str; -use crate::globals::Globals; -use crate::globals::RelayerDirective; -use crate::neon_node::LeaderKeyRegistrationState; -use crate::run_loop::nakamoto::RunLoop; -use crate::run_loop::RegisteredKey; use clarity::vm::ast::ASTRules; use clarity::vm::types::QualifiedContractIdentifier; use stacks::burnchains::{Burnchain, BurnchainSigner, Txid}; @@ -52,6 +45,13 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::Secp256k1PrivateKey; +use super::{Config, EventDispatcher, Keychain}; +use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::globals::{Globals, RelayerDirective}; +use crate::neon_node::LeaderKeyRegistrationState; +use crate::run_loop::nakamoto::RunLoop; +use crate::run_loop::RegisteredKey; + pub mod miner; pub mod peer; pub mod relayer; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cb9942d451..2d2d88293a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -18,14 +18,6 @@ use std::thread; use std::thread::JoinHandle; use std::time::Instant; -use super::relayer::RelayerThread; -use super::Error as NakamotoNodeError; -use super::{Config, EventDispatcher, Keychain}; -use crate::globals::Globals; -use crate::mockamoto::signer::SelfSigner; -use crate::nakamoto_node::VRF_MOCK_MINER_KEY; -use crate::run_loop::RegisteredKey; -use crate::ChainTip; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, BurnchainParameters}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -33,12 +25,9 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureStart}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::chainstate::stacks::TenureChangeCause; -use stacks::chainstate::stacks::TenureChangePayload; -use stacks::chainstate::stacks::ThresholdSignature; use stacks::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, + CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, + TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; use stacks::core::mempool::MemPoolDB; @@ -46,11 +35,18 @@ use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; -use stacks_common::types::PrivateKey; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; +use super::relayer::RelayerThread; +use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; +use crate::globals::Globals; +use crate::mockamoto::signer::SelfSigner; +use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::RegisteredKey; +use crate::ChainTip; + pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure BeginTenure { diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 8fe688972e..9f2a37c50d 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -13,45 +13,32 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cmp; use std::collections::VecDeque; - use std::default::Default; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; - -use std::thread; use std::time::Duration; +use std::{cmp, thread}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::PoxConstants; use stacks::chainstate::burn::db::sortdb::SortitionDB; - use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::signal_mining_blocked; - use stacks::core::mempool::MemPoolDB; - use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; - use stacks::net::dns::{DNSClient, DNSResolver}; use stacks::net::p2p::PeerNetwork; - use stacks::net::RPCHandlerArgs; - use stacks_common::util::hash::Sha256Sum; +use super::open_chainstate_with_faults; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; -use crate::globals::RelayerDirective; - +use crate::globals::{Globals, RelayerDirective}; use crate::run_loop::nakamoto::RunLoop; - use crate::{Config, EventDispatcher}; -use super::open_chainstate_with_faults; - /// Thread that runs the network state machine, handling both p2p and http requests. pub struct PeerThread { /// Node config diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a90b17866f..6aa4568d0b 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -13,6 +13,11 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; +use std::sync::mpsc::{Receiver, RecvTimeoutError}; +use std::thread::JoinHandle; +use std::time::{Duration, Instant}; + use stacks::burnchains::{Burnchain, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::leader_block_commit::{ @@ -30,9 +35,9 @@ use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::core::FIRST_STACKS_BLOCK_HASH; -use stacks::core::STACKS_EPOCH_3_0_MARKER; +use stacks::core::{ + FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, +}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks::monitoring::increment_stx_blocks_mined_counter; @@ -46,21 +51,13 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use std::collections::HashMap; -use std::sync::mpsc::Receiver; -use std::sync::mpsc::RecvTimeoutError; -use std::thread::JoinHandle; -use std::time::Duration; -use std::time::Instant; -use super::Error as NakamotoNodeError; use super::{ fault_injection_skip_mining, open_chainstate_with_faults, BlockCommits, Config, - EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, + Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::globals::Globals; -use crate::globals::RelayerDirective; +use crate::globals::{Globals, RelayerDirective}; use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::nakamoto::RunLoop; @@ -127,8 +124,9 @@ pub struct RelayerThread { /// to check if it should issue a block commit or try to register a VRF key next_initiative: Instant, is_miner: bool, - /// This is the last snapshot in which the relayer committed - last_committed_at: Option, + /// This is the last snapshot in which the relayer committed, and the parent_tenure_id + /// which was committed to + last_committed: Option<(BlockSnapshot, StacksBlockId)>, } impl RelayerThread { @@ -193,7 +191,7 @@ impl RelayerThread { miner_thread: None, is_miner, next_initiative: Instant::now() + Duration::from_secs(10), - last_committed_at: None, + last_committed: None, } } @@ -759,7 +757,10 @@ impl RelayerThread { ); self.last_commits.insert(txid, ()); - self.last_committed_at = Some(last_committed_at); + self.last_committed = Some(( + last_committed_at, + StacksBlockId::new(&tenure_start_ch, &tenure_start_bh), + )); self.globals.counters.bump_naka_submitted_commits(); Ok(()) @@ -800,7 +801,10 @@ impl RelayerThread { return None; }; - let should_commit = if let Some(last_committed_at) = self.last_committed_at.as_ref() { + // check if the burnchain changed, if so, we should issue a commit. + // if not, we may still want to update a commit if we've received a new tenure start block + let burnchain_changed = if let Some((last_committed_at, ..)) = self.last_committed.as_ref() + { // if the new sortition tip has a different consesus hash than the last commit, // issue a new commit sort_tip.consensus_hash != last_committed_at.consensus_hash @@ -820,37 +824,38 @@ impl RelayerThread { )); }; - if should_commit { - // TODO: just use `get_block_header_by_consensus_hash`? - let first_block_hash = if chain_tip_header - .anchored_header - .as_stacks_nakamoto() - .is_some() - { - // if the parent block is a nakamoto block, find the starting block of its tenure - let Ok(Some(first_block)) = - NakamotoChainState::get_nakamoto_tenure_start_block_header( - self.chainstate_ref().db(), - &chain_tip_header.consensus_hash, - ) - else { - warn!("Failure getting the first block of tenure in order to assemble block commit"; - "tenure_consensus_hash" => %chain_tip_header.consensus_hash, - "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); - return None; - }; - first_block.anchored_header.block_hash() + // get the starting block of the chain tip's tenure + let Ok(Some(chain_tip_tenure_start)) = + NakamotoChainState::get_block_header_by_consensus_hash( + self.chainstate_ref().db(), + &chain_tip_header.consensus_hash, + ) + else { + warn!("Failure getting the first block of tenure in order to assemble block commit"; + "tenure_consensus_hash" => %chain_tip_header.consensus_hash, + "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + return None; + }; + + let chain_tip_tenure_id = chain_tip_tenure_start.index_block_hash(); + let should_commit = burnchain_changed + || if let Some((_, last_committed_tenure_id)) = self.last_committed.as_ref() { + // if the tenure ID of the chain tip has changed, issue a new commit + last_committed_tenure_id != &chain_tip_tenure_id } else { - // otherwise the parent block is a epoch2 block, just return its hash directly - chain_tip_header.anchored_header.block_hash() + // should be unreachable, but either way, if + // `self.last_committed` is None, we should issue a commit + true }; - return Some(RelayerDirective::NakamotoTenureStartProcessed( + + if should_commit { + Some(RelayerDirective::NakamotoTenureStartProcessed( chain_tip_header.consensus_hash, - first_block_hash, - )); + chain_tip_header.anchored_header.block_hash(), + )) + } else { + None } - - return None; } /// Main loop of the relayer. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c23bf1fc19..a3821fae2b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -206,8 +206,7 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; -use crate::globals::RelayerDirective; +use crate::globals::{Globals, RelayerDirective}; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index efa36ea1e5..a7be83272f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1,32 +1,43 @@ +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use std::{env, thread}; + +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; +use lazy_static::lazy_static; use stacks::burnchains::MagicBytes; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::StacksChainState; use stacks::core::{ - StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + MemPoolDB, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, }; +use stacks::cost_estimates::metrics::UnitMetric; +use stacks::cost_estimates::UnitEstimator; use stacks_common::address::AddressHashMode; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PrivateKey; -use std::sync::atomic::Ordering; -use std::time::{Duration, Instant}; -use std::{env, thread}; use super::bitcoin_regtest::BitcoinCoreController; +use crate::config::{EventKeyType, EventObserverConfig}; use crate::mockamoto::signer::SelfSigner; use crate::run_loop::nakamoto; +use crate::tests::make_stacks_transfer; use crate::tests::neon_integrations::{ - next_block_and_wait, run_until_burnchain_height, submit_tx, wait_for_runloop, + next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; use crate::{ neon, tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, }; -use lazy_static::lazy_static; + +static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; +static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -179,44 +190,83 @@ where Ok(()) } -#[test] -#[ignore] -fn simple_neon_integration() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } +/// Mine a bitcoin block, and wait until: +/// (1) a new block has been processed by the coordinator +/// (2) 2 block commits have been issued ** or ** more than 10 seconds have +/// passed since (1) occurred +fn next_block_and_mine_commit( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + coord_channels: &CoordinatorChannels, + commits_submitted: &Arc, +) -> Result<(), String> { + let commits_submitted = commits_submitted.clone(); + let blocks_processed_before = coord_channels.get_stacks_blocks_processed(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + let mut block_processed_time: Option = None; + next_block_and(btc_controller, timeout_secs, || { + if let Some(block_processed_time) = block_processed_time.as_ref() { + let commits_sent = commits_submitted.load(Ordering::SeqCst); + if commits_sent >= commits_before + 2 { + return Ok(true); + } + if commits_sent >= commits_before + 1 + && block_processed_time.elapsed() > Duration::from_secs(10) + { + return Ok(true); + } + Ok(false) + } else { + let blocks_processed = coord_channels.get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + block_processed_time.replace(Instant::now()); + } + Ok(false) + } + }) +} - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); +fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { let stacker_sk = Secp256k1PrivateKey::new(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( PrincipalData::from(stacker_address.clone()).to_string(), - 100_000_000_000_000, + POX_4_DEFAULT_STACKER_BALANCE, ); + stacker_sk +} +/// +/// * `stacker_sk` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +fn boot_to_epoch_3( + naka_conf: &Config, + stacker_sk: Secp256k1PrivateKey, + btc_regtest_controller: &mut BitcoinRegtestController, +) { let epoch_2_conf = naka_conf.clone(); - - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); - info!("Chain bootstrapped to bitcoin block 201, starting a epoch-2x miner"); + let epochs = epoch_2_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + + info!( + "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &epoch_2_conf.node.rpc_bind); let mut run_loop = neon::RunLoop::new(epoch_2_conf.clone()); let epoch_2_stopper = run_loop.get_termination_switch(); let blocks_processed = run_loop.get_blocks_processed_arc(); let epoch_2_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); // first mined stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // stack enough to activate pox-4 let pox_addr_tuple = clarity::vm::tests::execute(&format!( "{{ hashbytes: 0x{}, version: 0x{:02x} }}", @@ -232,7 +282,7 @@ fn simple_neon_integration() { "pox-4", "stack-stx", &[ - clarity::vm::Value::UInt(99_000_000_000_000), + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), pox_addr_tuple, clarity::vm::Value::UInt(205), clarity::vm::Value::UInt(12), @@ -242,23 +292,82 @@ fn simple_neon_integration() { submit_tx(&http_origin, &stacking_tx); run_until_burnchain_height( - &mut btc_regtest_controller, + btc_regtest_controller, &blocks_processed, - 219, + epoch_3.start_height - 1, &epoch_2_conf, ); - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + info!("Bootstrapped to Epoch-3.0 boundary, stopping Epoch2x miner"); epoch_2_stopper.store(false, Ordering::SeqCst); - epoch_2_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + + boot_to_epoch_3(&naka_conf, stacker_sk, &mut btc_regtest_controller); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); let mut run_loop = nakamoto::RunLoop::new(naka_conf.clone()); let epoch_3_stopper = run_loop.get_termination_switch(); let blocks_processed = run_loop.get_blocks_processed_arc(); let vrfs_submitted = run_loop.submitted_vrfs(); let commits_submitted = run_loop.submitted_commits(); - let blocks_mined = run_loop.submitted_commits(); let coord_channel = run_loop.get_coordinator_channel().unwrap(); let epoch_3_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -279,41 +388,87 @@ fn simple_neon_integration() { }) .unwrap(); - let blocks_processed_before_mining = coord_channel.get_stacks_blocks_processed(); - - // this block should perform the sortition, wait until a block is mined - next_block_and(&mut btc_regtest_controller, 60, || { - let mined_count = blocks_mined.load(Ordering::SeqCst); - Ok(mined_count >= 1) - }) - .unwrap(); - - // wait until the coordinator has processed the new block(s) - while coord_channel.get_stacks_blocks_processed() <= blocks_processed_before_mining { - thread::sleep(Duration::from_secs(1)); + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); } - // load the chain tip, and assert that it is a nakamoto block + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); - let burnchain = naka_conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - let (chainstate, _) = StacksChainState::open( + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = MemPoolDB::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, &naka_conf.get_chainstate_path_str(), - None, + Box::new(UnitEstimator), + Box::new(UnitMetric), ) - .unwrap(); + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); info!( "Latest tip"; + "height" => tip.stacks_block_height, "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), ); + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); coord_channel.stop_chains_coordinator(); From 7b7a5101de7ddf634ffe787f46d9ccdd4ffe436f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 9 Dec 2023 13:02:21 -0600 Subject: [PATCH 03/16] feat: add boot_nakamoto to wrap the 2.x/3.x node handoff --- stackslib/src/burnchains/bitcoin/indexer.rs | 13 +- stackslib/src/core/mod.rs | 27 +++ testnet/stacks-node/src/main.rs | 4 +- .../stacks-node/src/nakamoto_node/miner.rs | 3 - .../stacks-node/src/run_loop/boot_nakamoto.rs | 205 ++++++++++++++++++ testnet/stacks-node/src/run_loop/mod.rs | 1 + testnet/stacks-node/src/run_loop/nakamoto.rs | 11 +- .../src/tests/nakamoto_integrations.rs | 99 +++++---- 8 files changed, 300 insertions(+), 63 deletions(-) create mode 100644 testnet/stacks-node/src/run_loop/boot_nakamoto.rs diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index c273a38de4..6f6b82ceec 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -46,7 +46,8 @@ use crate::burnchains::{ Burnchain, BurnchainBlockHeader, Error as burnchain_error, MagicBytes, BLOCKSTACK_MAGIC_MAINNET, }; use crate::core::{ - StacksEpoch, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, + StacksEpoch, StacksEpochExtension, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, + STACKS_EPOCHS_TESTNET, }; use crate::util_lib::db::Error as DBError; @@ -91,7 +92,7 @@ impl TryFrom for BitcoinNetworkType { /// Get the default epochs definitions for the given BitcoinNetworkType. /// Should *not* be used except by the BitcoinIndexer when no epochs vector /// was specified. -fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { +pub fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { match network_id { BitcoinNetworkType::Mainnet => STACKS_EPOCHS_MAINNET.to_vec(), BitcoinNetworkType::Testnet => STACKS_EPOCHS_TESTNET.to_vec(), @@ -1030,13 +1031,7 @@ impl BurnchainIndexer for BitcoinIndexer { /// /// It is an error (panic) to set custom epochs if running on `Mainnet`. fn get_stacks_epochs(&self) -> Vec { - match self.config.epochs { - Some(ref epochs) => { - assert!(self.runtime.network_id != BitcoinNetworkType::Mainnet); - epochs.clone() - } - None => get_bitcoin_stacks_epochs(self.runtime.network_id), - } + StacksEpoch::get_epochs(self.runtime.network_id, self.config.epochs.as_ref()) } /// Read downloaded headers within a range diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index b03fe0c8e0..38f383194e 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -25,6 +25,8 @@ pub use stacks_common::types::StacksEpochId; use stacks_common::util::log; pub use self::mempool::MemPoolDB; +use crate::burnchains::bitcoin::indexer::get_bitcoin_stacks_epochs; +use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; @@ -604,9 +606,34 @@ pub trait StacksEpochExtension { epoch_2_1_block_height: u64, ) -> Vec; fn validate_epochs(epochs: &[StacksEpoch]) -> Vec; + /// This method gets the epoch vector. + /// + /// Choose according to: + /// 1) Use the custom epochs defined on the underlying `BitcoinIndexerConfig`, if they exist. + /// 2) Use hard-coded static values, otherwise. + /// + /// It is an error (panic) to set custom epochs if running on `Mainnet`. + /// + fn get_epochs( + bitcoin_network: BitcoinNetworkType, + configured_epochs: Option<&Vec>, + ) -> Vec; } impl StacksEpochExtension for StacksEpoch { + fn get_epochs( + bitcoin_network: BitcoinNetworkType, + configured_epochs: Option<&Vec>, + ) -> Vec { + match configured_epochs { + Some(epochs) => { + assert!(bitcoin_network != BitcoinNetworkType::Mainnet); + epochs.clone() + } + None => get_bitcoin_stacks_epochs(bitcoin_network), + } + } + #[cfg(test)] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 8675b43132..d180aead8b 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -46,7 +46,7 @@ pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; use crate::mockamoto::MockamotoNode; -use crate::run_loop::nakamoto; +use crate::run_loop::boot_nakamoto; fn main() { panic::set_hook(Box::new(|panic_info| { @@ -213,7 +213,7 @@ fn main() { let mut mockamoto = MockamotoNode::new(&conf).unwrap(); mockamoto.run(); } else if conf.burnchain.mode == "nakamoto-neon" { - let mut run_loop = nakamoto::RunLoop::new(conf); + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf).unwrap(); run_loop.start(None, 0); } else { println!("Burnchain mode '{}' not supported", conf.burnchain.mode); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 2d2d88293a..bc684a07bf 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -16,7 +16,6 @@ use std::convert::TryFrom; use std::thread; use std::thread::JoinHandle; -use std::time::Instant; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, BurnchainParameters}; @@ -398,8 +397,6 @@ impl BlockMinerThread { ) .expect("Database failure opening mempool"); - let assembly_start = Instant::now(); - let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) .ok()? diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs new file mode 100644 index 0000000000..1b54c24f5a --- /dev/null +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -0,0 +1,205 @@ +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; +use std::time::Duration; +use std::{fs, thread}; + +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::core::StacksEpochExtension; +use stacks_common::types::{StacksEpoch, StacksEpochId}; + +use crate::neon::Counters; +use crate::run_loop::nakamoto::RunLoop as NakaRunLoop; +use crate::run_loop::neon::RunLoop as NeonRunLoop; +use crate::Config; + +/// This runloop handles booting to Nakamoto: +/// During epochs [1.0, 2.5], it runs a neon run_loop. +/// Once epoch 3.0 is reached, it stops the neon run_loop +/// and starts nakamoto. +pub struct BootRunLoop { + config: Config, + active_loop: InnerLoops, + coordinator_channels: Arc>, +} + +enum InnerLoops { + Epoch2(NeonRunLoop), + Epoch3(NakaRunLoop), +} + +impl BootRunLoop { + pub fn new(config: Config) -> Result { + let (coordinator_channels, active_loop) = if !Self::reached_epoch_30_transition(&config)? { + let neon = NeonRunLoop::new(config.clone()); + ( + neon.get_coordinator_channel().unwrap(), + InnerLoops::Epoch2(neon), + ) + } else { + let naka = NakaRunLoop::new(config.clone(), None, None); + ( + naka.get_coordinator_channel().unwrap(), + InnerLoops::Epoch3(naka), + ) + }; + + Ok(BootRunLoop { + config, + active_loop, + coordinator_channels: Arc::new(Mutex::new(coordinator_channels)), + }) + } + + /// Get a mutex-guarded pointer to this run-loops coordinator channels. + /// The reason this must be mutex guarded is that the run loop will switch + /// from a "neon" coordinator to a "nakamoto" coordinator, and update the + /// backing coordinator channel. That way, anyone still holding the Arc<> + /// should be able to query the new coordinator channel. + pub fn coordinator_channels(&self) -> Arc> { + self.coordinator_channels.clone() + } + + /// Get the runtime counters for the inner runloop. The nakamoto + /// runloop inherits the counters object from the neon node, + /// so no need for another layer of indirection/mutex. + pub fn counters(&self) -> Counters { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_counters(), + InnerLoops::Epoch3(x) => x.get_counters(), + } + } + + /// Get the termination switch from the active run loop. + pub fn get_termination_switch(&self) -> Arc { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_termination_switch(), + InnerLoops::Epoch3(x) => x.get_termination_switch(), + } + } + + /// The main entry point for the run loop. This starts either a 2.x-neon or 3.x-nakamoto + /// node depending on the current burnchain height. + pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { + match self.active_loop { + InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + } + } + + fn start_from_naka(&mut self, burnchain_opt: Option, mine_start: u64) { + let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { + panic!("FATAL: unexpectedly invoked start_from_naka when active loop wasn't nakamoto"); + }; + naka_loop.start(burnchain_opt, mine_start) + } + + fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { + let InnerLoops::Epoch2(ref mut neon_loop) = self.active_loop else { + panic!("FATAL: unexpectedly invoked start_from_neon when active loop wasn't neon"); + }; + let termination_switch = neon_loop.get_termination_switch(); + let counters = neon_loop.get_counters(); + + let boot_thread = Self::spawn_stopper(&self.config, neon_loop) + .expect("FATAL: failed to spawn epoch-2/3-boot thread"); + neon_loop.start(burnchain_opt.clone(), mine_start); + + // did we exit because of the epoch-3.0 transition, or some other reason? + let exited_for_transition = boot_thread + .join() + .expect("FATAL: failed to join epoch-2/3-boot thread"); + if !exited_for_transition { + info!("Shutting down epoch-2/3 transition thread"); + return; + } + info!("Reached Epoch-3.0 boundary, starting nakamoto node"); + termination_switch.store(true, Ordering::SeqCst); + let naka = NakaRunLoop::new( + self.config.clone(), + Some(termination_switch), + Some(counters), + ); + let new_coord_channels = naka + .get_coordinator_channel() + .expect("FATAL: should have coordinator channel in newly instantiated runloop"); + { + let mut coord_channel = self.coordinator_channels.lock().expect("Mutex poisoned"); + *coord_channel = new_coord_channels; + } + self.active_loop = InnerLoops::Epoch3(naka); + let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { + panic!("FATAL: unexpectedly found epoch2 loop after setting epoch3 active"); + }; + naka_loop.start(burnchain_opt, mine_start) + } + + fn spawn_stopper( + config: &Config, + neon: &NeonRunLoop, + ) -> Result, std::io::Error> { + let neon_term_switch = neon.get_termination_switch(); + let config = config.clone(); + thread::Builder::new() + .name("epoch-2/3-boot".into()) + .spawn(move || { + loop { + let do_transition = Self::reached_epoch_30_transition(&config) + .unwrap_or_else(|err| { + warn!("Error checking for Epoch-3.0 transition: {err:?}. Assuming transition did not occur yet."); + false + }); + if do_transition { + break; + } + if !neon_term_switch.load(Ordering::SeqCst) { + info!("Stop requested, exiting epoch-2/3-boot thread"); + return false; + } + thread::sleep(Duration::from_secs(1)); + } + // if loop exited, do the transition + info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); + neon_term_switch.store(false, Ordering::SeqCst); + return true + }) + } + + fn reached_epoch_30_transition(config: &Config) -> Result { + let burn_height = Self::get_burn_height(config)?; + let epochs = StacksEpoch::get_epochs( + config.burnchain.get_bitcoin_network().1, + config.burnchain.epochs.as_ref(), + ); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) + .ok_or("No Epoch-3.0 defined")?]; + + Ok(u64::from(burn_height) >= epoch_3.start_height - 1) + } + + fn get_burn_height(config: &Config) -> Result { + let burnchain = config.get_burnchain(); + let sortdb_path = config.get_burn_db_file_path(); + if fs::metadata(&sortdb_path).is_err() { + // if the sortition db doesn't exist yet, don't try to open() it, because that creates the + // db file even if it doesn't instantiate the tables, which breaks connect() logic. + info!("Failed to open Sortition DB while checking current burn height, assuming height = 0"); + return Ok(0); + } + + let Ok(sortdb) = SortitionDB::open(&sortdb_path, false, burnchain.pox_constants.clone()) + else { + info!("Failed to open Sortition DB while checking current burn height, assuming height = 0"); + return Ok(0); + }; + + let Ok(tip_sn) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) else { + info!("Failed to query Sortition DB for current burn height, assuming height = 0"); + return Ok(0); + }; + + Ok(u32::try_from(tip_sn.block_height).expect("FATAL: burn height exceeded u32")) + } +} diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index abfbe37c37..9ad4fd583e 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -1,3 +1,4 @@ +pub mod boot_nakamoto; pub mod helium; pub mod nakamoto; pub mod neon; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index f758a65d33..e6a835abb8 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -68,9 +68,14 @@ pub struct RunLoop { impl RunLoop { /// Sets up a runloop and node, given a config. - pub fn new(config: Config) -> Self { + pub fn new( + config: Config, + should_keep_running: Option>, + counters: Option, + ) -> Self { let channels = CoordinatorCommunication::instantiate(); - let should_keep_running = Arc::new(AtomicBool::new(true)); + let should_keep_running = + should_keep_running.unwrap_or_else(|| Arc::new(AtomicBool::new(true))); let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone()); let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready( config.burnchain.burn_fee_cap, @@ -86,7 +91,7 @@ impl RunLoop { globals: None, coordinator_channels: Some(channels), callbacks: RunLoopCallbacks::new(), - counters: Counters::new(), + counters: counters.unwrap_or_else(|| Counters::new()), should_keep_running, event_dispatcher, pox_watchdog: None, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a7be83272f..ad9c473992 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1,5 +1,5 @@ use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -27,14 +27,13 @@ use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig}; use crate::mockamoto::signer::SelfSigner; -use crate::run_loop::nakamoto; +use crate::neon::{Counters, RunLoopCounter}; +use crate::run_loop::boot_nakamoto; use crate::tests::make_stacks_transfer; use crate::tests::neon_integrations::{ next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; -use crate::{ - neon, tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, -}; +use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; @@ -197,11 +196,14 @@ where fn next_block_and_mine_commit( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &CoordinatorChannels, + coord_channels: &Arc>, commits_submitted: &Arc, ) -> Result<(), String> { let commits_submitted = commits_submitted.clone(); - let blocks_processed_before = coord_channels.get_stacks_blocks_processed(); + let blocks_processed_before = coord_channels + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); let commits_before = commits_submitted.load(Ordering::SeqCst); let mut block_processed_time: Option = None; next_block_and(btc_controller, timeout_secs, || { @@ -217,7 +219,10 @@ fn next_block_and_mine_commit( } Ok(false) } else { - let blocks_processed = coord_channels.get_stacks_blocks_processed(); + let blocks_processed = coord_channels + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); if blocks_processed > blocks_processed_before { block_processed_time.replace(Instant::now()); } @@ -241,27 +246,18 @@ fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { /// for pox-4 to activate fn boot_to_epoch_3( naka_conf: &Config, + blocks_processed: &RunLoopCounter, stacker_sk: Secp256k1PrivateKey, btc_regtest_controller: &mut BitcoinRegtestController, ) { - let epoch_2_conf = naka_conf.clone(); - btc_regtest_controller.bootstrap_chain(201); - - let epochs = epoch_2_conf.burnchain.epochs.clone().unwrap(); - + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; info!( "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); - let http_origin = format!("http://{}", &epoch_2_conf.node.rpc_bind); - let mut run_loop = neon::RunLoop::new(epoch_2_conf.clone()); - - let epoch_2_stopper = run_loop.get_termination_switch(); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let epoch_2_thread = thread::spawn(move || run_loop.start(None, 0)); - wait_for_runloop(&blocks_processed); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); next_block_and_wait(btc_regtest_controller, &blocks_processed); next_block_and_wait(btc_regtest_controller, &blocks_processed); // first mined stacks block @@ -295,19 +291,18 @@ fn boot_to_epoch_3( btc_regtest_controller, &blocks_processed, epoch_3.start_height - 1, - &epoch_2_conf, + &naka_conf, ); - info!("Bootstrapped to Epoch-3.0 boundary, stopping Epoch2x miner"); - epoch_2_stopper.store(false, Ordering::SeqCst); - epoch_2_thread.join().unwrap(); + info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } #[test] #[ignore] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches -/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. /// This test makes three assertions: /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 @@ -330,13 +325,39 @@ fn simple_neon_integration() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + .. + } = run_loop.counters(); - boot_to_epoch_3(&naka_conf, stacker_sk, &mut btc_regtest_controller); + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + stacker_sk, + &mut btc_regtest_controller, + ); info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); @@ -356,23 +377,6 @@ fn simple_neon_integration() { .unwrap() .stacks_block_height; - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut run_loop = nakamoto::RunLoop::new(naka_conf.clone()); - let epoch_3_stopper = run_loop.get_termination_switch(); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let vrfs_submitted = run_loop.submitted_vrfs(); - let commits_submitted = run_loop.submitted_commits(); - let coord_channel = run_loop.get_coordinator_channel().unwrap(); - - let epoch_3_thread = thread::spawn(move || run_loop.start(None, 0)); - - wait_for_runloop(&blocks_processed); info!("Nakamoto miner started..."); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { @@ -470,8 +474,11 @@ fn simple_neon_integration() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); - coord_channel.stop_chains_coordinator(); + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); - epoch_3_stopper.store(false, Ordering::SeqCst); - epoch_3_thread.join().unwrap(); + run_loop_thread.join().unwrap(); } From 50a6a115b916fa52ce1e7816469b464f685bc6b7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 9 Dec 2023 15:05:14 -0600 Subject: [PATCH 04/16] add copyright headers, some code cleanup --- testnet/stacks-node/src/config.rs | 29 +- testnet/stacks-node/src/globals.rs | 32 +- testnet/stacks-node/src/mockamoto.rs | 17 +- testnet/stacks-node/src/nakamoto_node.rs | 401 +----------------- .../stacks-node/src/nakamoto_node/miner.rs | 37 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 149 ++----- .../stacks-node/src/nakamoto_node/relayer.rs | 196 ++++----- testnet/stacks-node/src/neon_node.rs | 88 ++-- .../stacks-node/src/run_loop/boot_nakamoto.rs | 15 + testnet/stacks-node/src/run_loop/nakamoto.rs | 37 +- testnet/stacks-node/src/run_loop/neon.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 30 +- 12 files changed, 295 insertions(+), 738 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index feaa0208ac..526c2a90da 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -17,17 +17,18 @@ use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use stacks::chainstate::stacks::MAX_BLOCK_LEN; use stacks::core::mempool::MemPoolWalkSettings; use stacks::core::{ - StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, - PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, + MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, + CHAIN_ID_TESTNET, PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, }; use stacks::cost_estimates::fee_medians::WeightedMedianFeeRateEstimator; use stacks::cost_estimates::fee_rate_fuzzer::FeeRateFuzzer; use stacks::cost_estimates::fee_scalar::ScalarFeeRateEstimator; -use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct}; -use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator}; +use stacks::cost_estimates::metrics::{CostMetric, ProportionalDotProduct, UnitMetric}; +use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, UnitEstimator}; use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; +use stacks::util_lib::db::Error as DBError; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; @@ -510,6 +511,26 @@ impl Config { Ok(self.burnchain.clone()) } } + + /// Connect to the MempoolDB using the configured cost estimation + pub fn connect_mempool_db(&self) -> Result { + // create estimators, metric instances for RPC handler + let cost_estimator = self + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = self + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + MemPoolDB::open( + self.is_mainnet(), + self.burnchain.chain_id, + &self.get_chainstate_path_str(), + cost_estimator, + metric, + ) + } + /// Apply any test settings to this burnchain config struct fn apply_test_settings(&self, burnchain: &mut Burnchain) { if self.burnchain.get_bitcoin_network().1 == BitcoinNetworkType::Mainnet { diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 7e9e47a8fe..6c60e9a591 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -17,6 +17,8 @@ use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::RegisteredKey; use crate::syncctl::PoxSyncWatchdogComms; +pub type NeonGlobals = Globals; + /// Command types for the relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and @@ -34,8 +36,7 @@ pub enum RelayerDirective { } /// Inter-thread communication structure, shared between threads -#[derive(Clone)] -pub struct Globals { +pub struct Globals { /// Last sortition processed last_sortition: Arc>>, /// Status of the miner @@ -45,7 +46,7 @@ pub struct Globals { /// Unconfirmed transactions (shared between the relayer and p2p threads) unconfirmed_txs: Arc>, /// Writer endpoint to the relayer thread - pub relay_send: SyncSender, + pub relay_send: SyncSender, /// Cointer state in the main thread pub counters: Counters, /// Connection to the PoX sync watchdog @@ -56,15 +57,34 @@ pub struct Globals { leader_key_registration_state: Arc>, } -impl Globals { +// Need to manually implement Clone, because [derive(Clone)] requires +// all trait bounds to implement Clone, even though T doesn't need Clone +// because it's behind SyncSender. +impl Clone for Globals { + fn clone(&self) -> Self { + Self { + last_sortition: self.last_sortition.clone(), + miner_status: self.miner_status.clone(), + coord_comms: self.coord_comms.clone(), + unconfirmed_txs: self.unconfirmed_txs.clone(), + relay_send: self.relay_send.clone(), + counters: self.counters.clone(), + sync_comms: self.sync_comms.clone(), + should_keep_running: self.should_keep_running.clone(), + leader_key_registration_state: self.leader_key_registration_state.clone(), + } + } +} + +impl Globals { pub fn new( coord_comms: CoordinatorChannels, miner_status: Arc>, - relay_send: SyncSender, + relay_send: SyncSender, counters: Counters, sync_comms: PoxSyncWatchdogComms, should_keep_running: Arc, - ) -> Globals { + ) -> Globals { Globals { last_sortition: Arc::new(Mutex::new(None)), miner_status, diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 845f838828..0929a67743 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::AtomicBool; use std::sync::mpsc::{sync_channel, Receiver, RecvTimeoutError}; use std::sync::{Arc, Mutex}; @@ -69,7 +84,7 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use self::signer::SelfSigner; -use crate::globals::{Globals, RelayerDirective}; +use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::neon::Counters; use crate::neon_node::{PeerThread, StacksNode, BLOCK_PROCESSOR_STACK_SIZE}; use crate::syncctl::PoxSyncWatchdogComms; diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index de0d04cfb5..0482bbfb05 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -14,42 +14,25 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::HashMap; -use std::convert::TryFrom; -use std::net::SocketAddr; use std::sync::mpsc::Receiver; use std::thread; use std::thread::JoinHandle; -use clarity::vm::ast::ASTRules; -use clarity::vm::types::QualifiedContractIdentifier; -use stacks::burnchains::{Burnchain, BurnchainSigner, Txid}; +use stacks::burnchains::{BurnchainSigner, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; -use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::core::mempool::MemPoolDB; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks::monitoring; use stacks::monitoring::update_active_miners_count_gauge; -use stacks::net::atlas::{AtlasConfig, AtlasDB}; -use stacks::net::db::PeerDB; -use stacks::net::p2p::PeerNetwork; +use stacks::net::atlas::AtlasConfig; use stacks::net::relay::Relayer; -use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; -use stacks::net::{Error as NetError, PeerNetworkComms, ServiceFlags}; -use stacks::util_lib::strings::{UrlString, VecDisplay}; +use stacks::net::stackerdb::StackerDBs; use stacks_common::types::chainstate::SortitionId; -use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::{Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::addr2str; -use crate::globals::{Globals, RelayerDirective}; -use crate::neon_node::LeaderKeyRegistrationState; -use crate::run_loop::nakamoto::RunLoop; +use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; +use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; pub mod miner; @@ -57,7 +40,7 @@ pub mod peer; pub mod relayer; use self::peer::PeerThread; -use self::relayer::RelayerThread; +use self::relayer::{RelayerDirective, RelayerThread}; pub const RELAYER_MAX_BUFFER: usize = 100; const VRF_MOCK_MINER_KEY: u64 = 1; @@ -82,88 +65,6 @@ pub struct StacksNode { pub relayer_thread_handle: JoinHandle<()>, } -/// Fault injection logic to artificially increase the length of a tenure. -/// Only used in testing -#[cfg(test)] -fn fault_injection_long_tenure() { - // simulated slow block - match std::env::var("STX_TEST_SLOW_TENURE") { - Ok(tenure_str) => match tenure_str.parse::() { - Ok(tenure_time) => { - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); - stacks_common::util::sleep_ms(tenure_time); - } - Err(_) => { - error!("Parse error for STX_TEST_SLOW_TENURE"); - panic!(); - } - }, - _ => {} - } -} - -#[cfg(not(test))] -fn fault_injection_long_tenure() {} - -/// Fault injection to skip mining in this bitcoin block height -/// Only used in testing -#[cfg(test)] -fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { - match std::env::var("STACKS_DISABLE_MINER") { - Ok(disable_heights) => { - let disable_schedule: serde_json::Value = - serde_json::from_str(&disable_heights).unwrap(); - let disable_schedule = disable_schedule.as_array().unwrap(); - for disabled in disable_schedule { - let target_miner_rpc_bind = disabled - .get("rpc_bind") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if target_miner_rpc_bind != rpc_bind { - continue; - } - let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); - for target_block_value in target_block_heights { - let target_block = target_block_value.as_i64().unwrap() as u64; - if target_block == target_burn_height { - return true; - } - } - } - return false; - } - Err(_) => { - return false; - } - } -} - -#[cfg(not(test))] -fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { - false -} - -/// Open the chainstate, and inject faults from the config file -pub(crate) fn open_chainstate_with_faults( - config: &Config, -) -> Result { - let stacks_chainstate_path = config.get_chainstate_path_str(); - let (mut chainstate, _) = StacksChainState::open( - config.is_mainnet(), - config.burnchain.chain_id, - &stacks_chainstate_path, - Some(config.node.get_marf_opts()), - )?; - - chainstate.fault_injection.hide_blocks = config.node.fault_injection_hide_blocks; - Ok(chainstate) -} - /// Types of errors that can arise during mining #[derive(Debug)] enum Error { @@ -186,284 +87,6 @@ enum Error { } impl StacksNode { - /// Set up the AST size-precheck height, if configured - fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { - if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { - info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height - ); - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height( - &mut tx, - ASTRules::PrecheckSize, - ast_precheck_size_height, - ) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } - } - - /// Set up the mempool DB by making sure it exists. - /// Panics on failure. - fn setup_mempool_db(config: &Config) -> MemPoolDB { - // force early mempool instantiation - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - - let mempool = MemPoolDB::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - cost_estimator, - metric, - ) - .expect("BUG: failed to instantiate mempool"); - - mempool - } - - /// Set up the Peer DB and update any soft state from the config file. This includes: - /// * blacklisted/whitelisted nodes - /// * node keys - /// * bootstrap nodes - /// Returns the instantiated PeerDB - /// Panics on failure. - fn setup_peer_db( - config: &Config, - burnchain: &Burnchain, - stackerdb_contract_ids: &[QualifiedContractIdentifier], - ) -> PeerDB { - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); - let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { - info!( - "Will bootstrap from peers {}", - VecDisplay(&initial_neighbors) - ); - } else { - warn!("Without a peer to bootstrap from, the node will start mining a new chain"); - } - - let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.p2p_bind - )); - let p2p_addr: SocketAddr = config.node.p2p_address.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.p2p_address - )); - let node_privkey = Secp256k1PrivateKey::from_seed(&config.node.local_peer_seed); - - let mut peerdb = PeerDB::connect( - &config.get_peer_db_file_path(), - true, - config.burnchain.chain_id, - burnchain.network_id, - Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), - PeerAddress::from_socketaddr(&p2p_addr), - p2p_sock.port(), - data_url, - &[], - Some(&initial_neighbors), - stackerdb_contract_ids, - ) - .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); - panic!(); - }) - .unwrap(); - - // allow all bootstrap nodes - { - let mut tx = peerdb.tx_begin().unwrap(); - for initial_neighbor in initial_neighbors.iter() { - // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); - PeerDB::set_allow_peer( - &mut tx, - initial_neighbor.addr.network_id, - &initial_neighbor.addr.addrbytes, - initial_neighbor.addr.port, - -1, - ) - .unwrap(); - } - tx.commit().unwrap(); - } - - if !config.node.deny_nodes.is_empty() { - warn!("Will ignore nodes {:?}", &config.node.deny_nodes); - } - - // deny all config-denied peers - { - let mut tx = peerdb.tx_begin().unwrap(); - for denied in config.node.deny_nodes.iter() { - PeerDB::set_deny_peer( - &mut tx, - denied.addr.network_id, - &denied.addr.addrbytes, - denied.addr.port, - get_epoch_time_secs() + 24 * 365 * 3600, - ) - .unwrap(); - } - tx.commit().unwrap(); - } - - // update services to indicate we can support mempool sync - { - let mut tx = peerdb.tx_begin().unwrap(); - PeerDB::set_local_services( - &mut tx, - (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), - ) - .unwrap(); - tx.commit().unwrap(); - } - - peerdb - } - - /// Set up the PeerNetwork, but do not bind it. - pub fn setup_peer_network( - config: &Config, - atlas_config: &AtlasConfig, - burnchain: Burnchain, - ) -> PeerNetwork { - let sortdb = SortitionDB::open( - &config.get_burn_db_file_path(), - true, - burnchain.pox_constants.clone(), - ) - .expect("Error while instantiating sor/tition db"); - - let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) - .expect("Error while loading stacks epochs"); - - let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) - .expect("Failed to get sortition tip"); - SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) - .unwrap() - }; - - let atlasdb = - AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); - - let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); - - let mut chainstate = - open_chainstate_with_faults(config).expect("FATAL: could not open chainstate DB"); - - let mut stackerdb_machines = HashMap::new(); - for stackerdb_contract_id in config.node.stacker_dbs.iter() { - // attempt to load the config - let (instantiate, stacker_db_config) = match StackerDBConfig::from_smart_contract( - &mut chainstate, - &sortdb, - stackerdb_contract_id, - ) { - Ok(c) => (true, c), - Err(e) => { - warn!( - "Failed to load StackerDB config for {}: {:?}", - stackerdb_contract_id, &e - ); - (false, StackerDBConfig::noop()) - } - }; - let mut stackerdbs = - StackerDBs::connect(&config.get_stacker_db_file_path(), true).unwrap(); - - if instantiate { - match stackerdbs.get_stackerdb_id(stackerdb_contract_id) { - Ok(..) => { - // reconfigure - let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); - tx.reconfigure_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) - .expect(&format!( - "FATAL: failed to reconfigure StackerDB replica {}", - stackerdb_contract_id - )); - tx.commit().unwrap(); - } - Err(NetError::NoSuchStackerDB(..)) => { - // instantiate replica - let tx = stackerdbs.tx_begin(stacker_db_config.clone()).unwrap(); - tx.create_stackerdb(stackerdb_contract_id, &stacker_db_config.signers) - .expect(&format!( - "FATAL: failed to instantiate StackerDB replica {}", - stackerdb_contract_id - )); - tx.commit().unwrap(); - } - Err(e) => { - panic!("FATAL: failed to query StackerDB state: {:?}", &e); - } - } - } - let stacker_db_sync = match StackerDBSync::new( - stackerdb_contract_id.clone(), - &stacker_db_config, - PeerNetworkComms::new(), - stackerdbs, - ) { - Ok(s) => s, - Err(e) => { - warn!( - "Failed to instantiate StackerDB sync machine for {}: {:?}", - stackerdb_contract_id, &e - ); - continue; - } - }; - - stackerdb_machines.insert( - stackerdb_contract_id.clone(), - (stacker_db_config, stacker_db_sync), - ); - } - - let stackerdb_contract_ids: Vec<_> = - stackerdb_machines.keys().map(|sc| sc.clone()).collect(); - let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); - - let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { - Ok(local_peer) => local_peer, - _ => panic!("Unable to retrieve local peer"), - }; - - let p2p_net = PeerNetwork::new( - peerdb, - atlasdb, - stackerdbs, - local_peer, - config.burnchain.peer_version, - burnchain, - view, - config.connection_options.clone(), - stackerdb_machines, - epochs, - ); - - p2p_net - } - /// This function sets the global var `GLOBAL_BURNCHAIN_SIGNER`. /// /// This variable is used for prometheus monitoring (which only @@ -507,11 +130,13 @@ impl StacksNode { ) .expect("Error while instantiating sortition db"); - Self::setup_ast_size_precheck(&config, &mut sortdb); + NeonNode::setup_ast_size_precheck(&config, &mut sortdb); - let _ = Self::setup_mempool_db(&config); + let _ = config + .connect_mempool_db() + .expect("FATAL: database failure opening mempool"); - let mut p2p_net = Self::setup_peer_network(&config, &atlas_config, burnchain.clone()); + let mut p2p_net = NeonNode::setup_peer_network(&config, &atlas_config, burnchain.clone()); let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); @@ -602,7 +227,7 @@ impl StacksNode { return self .globals .relay_send - .send(RelayerDirective::ProcessTenure( + .send(RelayerDirective::ProcessedBurnBlock( snapshot.consensus_hash.clone(), snapshot.parent_burn_header_hash.clone(), snapshot.winning_stacks_block_hash.clone(), diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index bc684a07bf..ae2781ce7b 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -29,10 +29,7 @@ use stacks::chainstate::stacks::{ TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; -use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; @@ -40,11 +37,11 @@ use stacks_common::util::vrf::VRFProof; use super::relayer::RelayerThread; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; -use crate::globals::Globals; use crate::mockamoto::signer::SelfSigner; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; -use crate::ChainTip; +use crate::{neon_node, ChainTip}; pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure @@ -161,7 +158,7 @@ impl BlockMinerThread { mut block: NakamotoBlock, ) -> Result<(), ChainstateError> { signer.sign_nakamoto_block(&mut block); - let mut chain_state = super::open_chainstate_with_faults(&self.config) + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let chainstate_config = chain_state.config(); let sort_db = SortitionDB::open( @@ -365,19 +362,9 @@ impl BlockMinerThread { /// Return None if we couldn't build a block for whatever reason. fn mine_block(&mut self) -> Option { debug!("block miner thread ID is {:?}", thread::current().id()); - super::fault_injection_long_tenure(); + neon_node::fault_injection_long_tenure(); let burn_db_path = self.config.get_burn_db_file_path(); - let stacks_chainstate_path = self.config.get_chainstate_path_str(); - - let cost_estimator = self - .config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = self - .config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); // NOTE: read-write access is needed in order to be able to query the recipient set. // This is an artifact of the way the MARF is built (see #1449) @@ -385,17 +372,13 @@ impl BlockMinerThread { SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let mut chain_state = super::open_chainstate_with_faults(&self.config) + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let mut mem_pool = MemPoolDB::open( - self.config.is_mainnet(), - self.config.burnchain.chain_id, - &stacks_chainstate_path, - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 9f2a37c50d..762aa45eda 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -33,10 +33,10 @@ use stacks::net::p2p::PeerNetwork; use stacks::net::RPCHandlerArgs; use stacks_common::util::hash::Sha256Sum; -use super::open_chainstate_with_faults; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::{Globals, RelayerDirective}; -use crate::run_loop::nakamoto::RunLoop; +use crate::nakamoto_node::relayer::RelayerDirective; +use crate::neon_node::open_chainstate_with_faults; +use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::{Config, EventDispatcher}; /// Thread that runs the network state machine, handling both p2p and http requests. @@ -44,17 +44,17 @@ pub struct PeerThread { /// Node config config: Config, /// instance of the peer network. Made optional in order to trick the borrow checker. - net: Option, + net: PeerNetwork, /// handle to global inter-thread comms globals: Globals, /// how long to wait for network messages on each poll, in millis poll_timeout: u64, - /// handle to the sortition DB (optional so we can take/replace it) - sortdb: Option, - /// handle to the chainstate DB (optional so we can take/replace it) - chainstate: Option, - /// handle to the mempool DB (optional so we can take/replace it) - mempool: Option, + /// handle to the sortition DB + sortdb: SortitionDB, + /// handle to the chainstate DB + chainstate: StacksChainState, + /// handle to the mempool DB + mempool: MemPoolDB, /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet /// (i.e. due to backpressure). We track this separately, instead of just using a bigger /// channel, because we need to know when backpressure occurs in order to throttle the p2p @@ -141,28 +141,6 @@ impl PeerThread { info!("P2P thread exit!"); } - /// set up the mempool DB connection - pub fn connect_mempool_db(config: &Config) -> MemPoolDB { - // create estimators, metric instances for RPC handler - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - - let mempool = MemPoolDB::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); - - mempool - } - /// Instantiate the p2p thread. /// Binds the addresses in the config (which may panic if the port is blocked). /// This is so the node will crash "early" before any new threads start if there's going to be @@ -183,7 +161,9 @@ impl PeerThread { mut net: PeerNetwork, ) -> Self { let config = config.clone(); - let mempool = Self::connect_mempool_db(&config); + let mempool = config + .connect_mempool_db() + .expect("FATAL: database failure opening mempool"); let burn_db_path = config.get_burn_db_file_path(); let sortdb = SortitionDB::open(&burn_db_path, false, pox_constants) @@ -208,12 +188,12 @@ impl PeerThread { PeerThread { config, - net: Some(net), + net, globals, poll_timeout, - sortdb: Some(sortdb), - chainstate: Some(chainstate), - mempool: Some(mempool), + sortdb, + chainstate, + mempool, results_with_data: VecDeque::new(), num_p2p_state_machine_passes: 0, num_inv_sync_passes: 0, @@ -222,50 +202,6 @@ impl PeerThread { } } - /// Do something with mutable references to the mempool, sortdb, and chainstate - /// Fools the borrow checker. - /// NOT COMPOSIBLE - fn with_chainstate(&mut self, func: F) -> R - where - F: FnOnce(&mut PeerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, - { - let mut sortdb = self.sortdb.take().expect("BUG: sortdb already taken"); - let mut chainstate = self - .chainstate - .take() - .expect("BUG: chainstate already taken"); - let mut mempool = self.mempool.take().expect("BUG: mempool already taken"); - - let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); - - self.sortdb = Some(sortdb); - self.chainstate = Some(chainstate); - self.mempool = Some(mempool); - - res - } - - /// Get an immutable ref to the inner network. - /// DO NOT USE WITHIN with_network() - fn get_network(&self) -> &PeerNetwork { - self.net.as_ref().expect("BUG: did not replace net") - } - - /// Do something with mutable references to the network. - /// Fools the borrow checker. - /// NOT COMPOSIBLE. DO NOT CALL THIS OR get_network() IN func - fn with_network(&mut self, func: F) -> R - where - F: FnOnce(&mut PeerThread, &mut PeerNetwork) -> R, - { - let mut net = self.net.take().expect("BUG: net already taken"); - - let res = func(self, &mut net); - - self.net = Some(net); - res - } - /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not pub fn run_one_pass( @@ -280,12 +216,12 @@ impl PeerThread { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); let download_backpressure = self.results_with_data.len() > 0; - let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { + let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( "P2P: backpressure: {}, more downloads: {}", download_backpressure, - self.get_network().has_more_downloads() + self.net.has_more_downloads() ); 1 } else { @@ -293,15 +229,11 @@ impl PeerThread { }; // do one pass - let p2p_res = self.with_chainstate(|p2p_thread, sortdb, chainstate, mempool| { + let p2p_res = { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: p2p_thread - .config - .burnchain - .process_exit_at_block_height - .clone(), + exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -310,21 +242,18 @@ impl PeerThread { fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), ..RPCHandlerArgs::default() }; - p2p_thread.with_network(|_, net| { - net.run( - indexer, - sortdb, - chainstate, - mempool, - dns_client_opt, - download_backpressure, - ibd, - poll_ms, - &handler_args, - ) - }) - }); - + self.net.run( + indexer, + &self.sortdb, + &mut self.chainstate, + &mut self.mempool, + dns_client_opt, + download_backpressure, + ibd, + poll_ms, + &handler_args, + ) + }; match p2p_res { Ok(network_result) => { let mut have_update = false; @@ -376,17 +305,13 @@ impl PeerThread { if let Err(e) = self.globals.relay_send.try_send(next_result) { debug!( "P2P: {:?}: download backpressure detected (bufferred {})", - &self.get_network().local_peer, + &self.net.local_peer, self.results_with_data.len() ); match e { TrySendError::Full(directive) => { - if let RelayerDirective::RunTenure(..) = directive { - // can drop this - } else { - // don't lose this data -- just try it again - self.results_with_data.push_front(directive); - } + // don't lose this data -- just try it again + self.results_with_data.push_front(directive); break; } TrySendError::Disconnected(_) => { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 6aa4568d0b..04f04241e0 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -53,17 +53,35 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use super::{ - fault_injection_skip_mining, open_chainstate_with_faults, BlockCommits, Config, - Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, + BlockCommits, Config, Error as NakamotoNodeError, EventDispatcher, Keychain, + BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::globals::{Globals, RelayerDirective}; use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; -use crate::neon_node::LeaderKeyRegistrationState; -use crate::run_loop::nakamoto::RunLoop; +use crate::neon_node::{ + fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, +}; +use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; +/// Command types for the Nakamoto relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + HandleNetResult(NetworkResult), + /// A new burn block has been processed by the SortitionDB, check if this miner won sortition, + /// and if so, start the miner thread + ProcessedBurnBlock(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Either a new burn block has been processed (without a miner active yet) or a + /// nakamoto tenure's first block has been processed, so the relayer should issue + /// a block commit + IssueBlockCommit(ConsensusHash, BlockHeaderHash), + /// Try to register a VRF public key + RegisterKey(BlockSnapshot), + /// Stop the relayer thread + Exit, +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -72,12 +90,12 @@ use crate::BitcoinRegtestController; pub struct RelayerThread { /// Node config pub(crate) config: Config, - /// Handle to the sortition DB (optional so we can take/replace it) - sortdb: Option, - /// Handle to the chainstate DB (optional so we can take/replace it) - chainstate: Option, - /// Handle to the mempool DB (optional so we can take/replace it) - mempool: Option, + /// Handle to the sortition DB + sortdb: SortitionDB, + /// Handle to the chainstate DB + chainstate: StacksChainState, + /// Handle to the mempool DB + mempool: MemPoolDB, /// Handle to global state and inter-thread communication channels pub(crate) globals: Globals, /// Authoritative copy of the keychain state @@ -167,9 +185,9 @@ impl RelayerThread { RelayerThread { config: config.clone(), - sortdb: Some(sortdb), - chainstate: Some(chainstate), - mempool: Some(mempool), + sortdb, + chainstate, + mempool, globals, keychain, burnchain: runloop.get_burnchain(), @@ -195,46 +213,6 @@ impl RelayerThread { } } - /// Get an immutible ref to the sortdb - pub fn sortdb_ref(&self) -> &SortitionDB { - self.sortdb - .as_ref() - .expect("FATAL: tried to access sortdb while taken") - } - - /// Get an immutible ref to the chainstate - pub fn chainstate_ref(&self) -> &StacksChainState { - self.chainstate - .as_ref() - .expect("FATAL: tried to access chainstate while it was taken") - } - - /// Fool the borrow checker into letting us do something with the chainstate databases. - /// DOES NOT COMPOSE -- do NOT call this, or self.sortdb_ref(), or self.chainstate_ref(), within - /// `func`. You will get a runtime panic. - pub fn with_chainstate(&mut self, func: F) -> R - where - F: FnOnce(&mut RelayerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, - { - let mut sortdb = self - .sortdb - .take() - .expect("FATAL: tried to take sortdb while taken"); - let mut chainstate = self - .chainstate - .take() - .expect("FATAL: tried to take chainstate while taken"); - let mut mempool = self - .mempool - .take() - .expect("FATAL: tried to take mempool while taken"); - let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); - self.sortdb = Some(sortdb); - self.chainstate = Some(chainstate); - self.mempool = Some(mempool); - res - } - /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? pub fn has_waited_for_latest_blocks(&self) -> bool { @@ -286,21 +264,19 @@ impl RelayerThread { signal_mining_blocked(self.globals.get_miner_status()); } - let net_receipts = self.with_chainstate(|relayer_thread, sortdb, chainstate, mempool| { - relayer_thread - .relayer - .process_network_result( - &relayer_thread.local_peer, - &mut net_result, - sortdb, - chainstate, - mempool, - relayer_thread.globals.sync_comms.get_ibd(), - Some(&relayer_thread.globals.coord_comms), - Some(&relayer_thread.event_dispatcher), - ) - .expect("BUG: failure processing network results") - }); + let net_receipts = self + .relayer + .process_network_result( + &self.local_peer, + &mut net_result, + &mut self.sortdb, + &mut self.chainstate, + &mut self.mempool, + self.globals.sync_comms.get_ibd(), + Some(&self.globals.coord_comms), + Some(&self.event_dispatcher), + ) + .expect("BUG: failure processing network results"); if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { // if we received any new block data that could invalidate our view of the chain tip, @@ -318,7 +294,7 @@ impl RelayerThread { let num_unconfirmed_microblock_tx_receipts = net_receipts.processed_unconfirmed_state.receipts.len(); if num_unconfirmed_microblock_tx_receipts > 0 { - if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { + if let Some(unconfirmed_state) = self.chainstate.unconfirmed_state.as_ref() { let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); self.event_dispatcher.process_new_microblocks( canonical_tip, @@ -336,16 +312,14 @@ impl RelayerThread { } // synchronize unconfirmed tx index to p2p thread - self.with_chainstate(|relayer_thread, _sortdb, chainstate, _mempool| { - relayer_thread.globals.send_unconfirmed_txs(chainstate); - }); + self.globals.send_unconfirmed_txs(&self.chainstate); // resume mining if we blocked it, and if we've done the requisite download // passes self.last_network_download_passes = net_result.num_download_passes; self.last_network_inv_passes = net_result.num_inv_sync_passes; if self.has_waited_for_latest_blocks() { - debug!("Relayer: did a download pass, so unblocking mining"); + info!("Relayer: did a download pass, so unblocking mining"); signal_mining_ready(self.globals.get_miner_status()); } } @@ -359,10 +333,9 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> MinerDirective { - let sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &consensus_hash) - .expect("FATAL: failed to query sortition DB") - .expect("FATAL: unknown consensus hash"); + let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); self.globals.set_last_sortition(sn.clone()); @@ -423,11 +396,10 @@ impl RelayerThread { // already in-flight return; } - let cur_epoch = - SortitionDB::get_stacks_epoch(self.sortdb_ref().conn(), burn_block.block_height) - .expect("FATAL: failed to query sortition DB") - .expect("FATAL: no epoch defined") - .epoch_id; + let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), burn_block.block_height) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: no epoch defined") + .epoch_id; let (vrf_pk, _) = self.keychain.make_vrf_keypair(burn_block.block_height); let burnchain_tip_consensus_hash = &burn_block.consensus_hash; let miner_pkh = self.keychain.get_nakamoto_pkh(); @@ -464,24 +436,19 @@ impl RelayerThread { target_ch: &ConsensusHash, target_bh: &BlockHeaderHash, ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { - let chain_state = self - .chainstate - .as_mut() - .expect("FATAL: Failed to load chain state"); - let sort_db = self.sortdb.as_mut().expect("FATAL: Failed to load sortdb"); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; let parent_vrf_proof = - NakamotoChainState::get_block_vrf_proof(chain_state.db(), &target_ch) + NakamotoChainState::get_block_vrf_proof(self.chainstate.db(), &target_ch) .map_err(|_e| NakamotoNodeError::ParentNotFound)? .unwrap_or_else(|| VRFProof::empty()); // let's figure out the recipient set! let recipients = get_next_recipients( &sort_tip, - chain_state, - sort_db, + &mut self.chainstate, + &mut self.sortdb, &self.burnchain, &OnChainRewardSetProvider(), self.config.node.always_use_affirmation_maps, @@ -492,7 +459,7 @@ impl RelayerThread { })?; let block_header = - NakamotoChainState::get_block_header_by_consensus_hash(chain_state.db(), target_ch) + NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) .map_err(|e| { error!("Relayer: Failed to get block header for parent tenure: {e:?}"); NakamotoNodeError::ParentNotFound @@ -511,14 +478,14 @@ impl RelayerThread { } let Ok(Some(parent_sortition)) = - SortitionDB::get_block_snapshot_consensus(sort_db.conn(), target_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), target_ch) else { error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); return Err(NakamotoNodeError::ParentNotFound); }; let Ok(Some(target_epoch)) = - SortitionDB::get_stacks_epoch(sort_db.conn(), sort_tip.block_height + 1) + SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) else { error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); @@ -526,7 +493,7 @@ impl RelayerThread { let parent_block_burn_height = parent_sortition.block_height; let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( - sort_db.conn(), + self.sortdb.conn(), &parent_sortition.winning_block_txid, &parent_sortition.sortition_id, ) else { @@ -621,7 +588,7 @@ impl RelayerThread { } let burn_header_hash = last_burn_block.burn_header_hash.clone(); - let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); @@ -779,8 +746,7 @@ impl RelayerThread { self.globals.get_leader_key_registration_state(), LeaderKeyRegistrationState::Inactive ) { - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) - else { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { warn!("Failed to fetch sortition tip while needing to register VRF key"); return None; }; @@ -796,8 +762,7 @@ impl RelayerThread { } // has there been a new sortition - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) - else { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { return None; }; @@ -813,12 +778,11 @@ impl RelayerThread { true }; - let Ok(Some(chain_tip_header)) = NakamotoChainState::get_canonical_block_header( - self.chainstate_ref().db(), - self.sortdb_ref(), - ) else { + let Ok(Some(chain_tip_header)) = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) + else { info!("No known canonical tip, will issue a genesis block commit"); - return Some(RelayerDirective::NakamotoTenureStartProcessed( + return Some(RelayerDirective::IssueBlockCommit( FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, )); @@ -827,7 +791,7 @@ impl RelayerThread { // get the starting block of the chain tip's tenure let Ok(Some(chain_tip_tenure_start)) = NakamotoChainState::get_block_header_by_consensus_hash( - self.chainstate_ref().db(), + self.chainstate.db(), &chain_tip_header.consensus_hash, ) else { @@ -849,7 +813,7 @@ impl RelayerThread { }; if should_commit { - Some(RelayerDirective::NakamotoTenureStartProcessed( + Some(RelayerDirective::IssueBlockCommit( chain_tip_header.consensus_hash, chain_tip_header.anchored_header.block_hash(), )) @@ -924,10 +888,10 @@ impl RelayerThread { debug!("Relayer: directive Registered VRF key"); true } - // ProcessTenure directives correspond to a new sortition occurring. + // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring. // relayer should invoke `handle_sortition` to determine if they won the sortition, // and to start their miner, or stop their miner if an active tenure is now ending - RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { + RelayerDirective::ProcessedBurnBlock(consensus_hash, burn_hash, block_header_hash) => { if !self.is_miner { return true; } @@ -940,9 +904,8 @@ impl RelayerThread { info!("Relayer: directive Processed tenures"); res } - // NakamotoTenureStartProcessed directives mean that a new tenure start has been processed - // These are triggered by the relayer waking up, seeing a new consensus hash *and* a new first tenure block - RelayerDirective::NakamotoTenureStartProcessed(consensus_hash, block_hash) => { + // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block + RelayerDirective::IssueBlockCommit(consensus_hash, block_hash) => { if !self.is_miner { return true; } @@ -953,11 +916,6 @@ impl RelayerThread { debug!("Relayer: Nakamoto Tenure Start"); true } - RelayerDirective::RunTenure(..) => { - // No Op: the nakamoto node does not use the RunTenure directive to control its - // miner thread. - true - } RelayerDirective::Exit => false, }; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index a3821fae2b..284d63a1c3 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -206,7 +206,7 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::{Globals, RelayerDirective}; +use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -304,71 +304,59 @@ pub struct StacksNode { /// Fault injection logic to artificially increase the length of a tenure. /// Only used in testing #[cfg(test)] -fn fault_injection_long_tenure() { +pub(crate) fn fault_injection_long_tenure() { // simulated slow block - match std::env::var("STX_TEST_SLOW_TENURE") { - Ok(tenure_str) => match tenure_str.parse::() { - Ok(tenure_time) => { - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); - stacks_common::util::sleep_ms(tenure_time); - } - Err(_) => { - error!("Parse error for STX_TEST_SLOW_TENURE"); - panic!(); - } - }, - _ => {} - } + let Ok(tenure_str) = std::env::var("STX_TEST_SLOW_TENURE") else { + return; + }; + let Ok(tenure_time) = tenure_str.parse::() else { + error!("Parse error for STX_TEST_SLOW_TENURE"); + panic!(); + }; + info!( + "Fault injection: sleeping for {} milliseconds to simulate a long tenure", + tenure_time + ); + stacks_common::util::sleep_ms(tenure_time); } #[cfg(not(test))] -fn fault_injection_long_tenure() {} +pub(crate) fn fault_injection_long_tenure() {} /// Fault injection to skip mining in this bitcoin block height /// Only used in testing #[cfg(test)] -fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { - match std::env::var("STACKS_DISABLE_MINER") { - Ok(disable_heights) => { - let disable_schedule: serde_json::Value = - serde_json::from_str(&disable_heights).unwrap(); - let disable_schedule = disable_schedule.as_array().unwrap(); - for disabled in disable_schedule { - let target_miner_rpc_bind = disabled - .get("rpc_bind") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if target_miner_rpc_bind != rpc_bind { - continue; - } - let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); - for target_block_value in target_block_heights { - let target_block = target_block_value.as_i64().unwrap() as u64; - if target_block == target_burn_height { - return true; - } - } - } - return false; +pub(crate) fn fault_injection_skip_mining(rpc_bind: &str, target_burn_height: u64) -> bool { + let Ok(disable_heights) = std::env::var("STACKS_DISABLE_MINER") else { + return false; + }; + let disable_schedule: serde_json::Value = serde_json::from_str(&disable_heights).unwrap(); + let disable_schedule = disable_schedule.as_array().unwrap(); + for disabled in disable_schedule { + let target_miner_rpc_bind = disabled.get("rpc_bind").unwrap().as_str().unwrap(); + if target_miner_rpc_bind != rpc_bind { + continue; } - Err(_) => { - return false; + let target_block_heights = disabled.get("blocks").unwrap().as_array().unwrap(); + for target_block_value in target_block_heights { + let target_block = u64::try_from(target_block_value.as_i64().unwrap()).unwrap(); + if target_block == target_burn_height { + return true; + } } } + false } #[cfg(not(test))] -fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { +pub(crate) fn fault_injection_skip_mining(_rpc_bind: &str, _target_burn_height: u64) -> bool { false } /// Open the chainstate, and inject faults from the config file -fn open_chainstate_with_faults(config: &Config) -> Result { +pub(crate) fn open_chainstate_with_faults( + config: &Config, +) -> Result { let stacks_chainstate_path = config.get_chainstate_path_str(); let (mut chainstate, _) = StacksChainState::open( config.is_mainnet(), @@ -3635,7 +3623,7 @@ impl StacksNode { } /// Set up the AST size-precheck height, if configured - fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { + pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( "Override burnchain height of {:?} to {}", @@ -3788,7 +3776,7 @@ impl StacksNode { } /// Set up the PeerNetwork, but do not bind it. - pub fn setup_peer_network( + pub(crate) fn setup_peer_network( config: &Config, atlas_config: &AtlasConfig, burnchain: Burnchain, diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 1b54c24f5a..e70784ce42 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index e6a835abb8..b3458a4ce6 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::AtomicBool; use std::sync::mpsc::sync_channel; use std::sync::{Arc, Mutex}; @@ -25,10 +40,9 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; +use crate::globals::Globals as GenericGlobals; use crate::monitoring::start_serving_monitoring_metrics; -use crate::nakamoto_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; -use crate::neon::RunLoopCounter; +use crate::nakamoto_node::{self, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, @@ -41,6 +55,7 @@ use crate::{ }; pub const STDERR: i32 = 2; +pub type Globals = GenericGlobals; #[cfg(test)] const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; @@ -116,22 +131,6 @@ impl RunLoop { self.coordinator_channels.as_ref().map(|x| x.1.clone()) } - pub fn get_blocks_processed_arc(&self) -> RunLoopCounter { - self.counters.blocks_processed.clone() - } - - pub fn submitted_commits(&self) -> RunLoopCounter { - self.counters.naka_submitted_commits.clone() - } - - pub fn submitted_vrfs(&self) -> RunLoopCounter { - self.counters.naka_submitted_vrfs.clone() - } - - pub fn mined_blocks(&self) -> RunLoopCounter { - self.counters.naka_mined_blocks.clone() - } - pub fn get_counters(&self) -> Counters { self.counters.clone() } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index c10c9b88c3..cffcd1aa10 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -31,7 +31,7 @@ use stx_genesis::GenesisData; use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; -use crate::globals::Globals; +use crate::globals::NeonGlobals as Globals; use crate::monitoring::start_serving_monitoring_metrics; use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; use crate::node::{ diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ad9c473992..2b4fdfa540 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; @@ -11,13 +26,11 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::StacksChainState; use stacks::core::{ - MemPoolDB, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, }; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks_common::address::AddressHashMode; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksAddress; @@ -411,14 +424,9 @@ fn simple_neon_integration() { .unwrap() .unwrap(); - let mut mempool = MemPoolDB::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - Box::new(UnitEstimator), - Box::new(UnitMetric), - ) - .expect("Database failure opening mempool"); + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); mempool .submit_raw( From f075a99fc7b554479d8b43834ab2d6a94d3cfa75 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sun, 10 Dec 2023 09:58:40 -0600 Subject: [PATCH 05/16] chore: comments, cleanup unused functions --- testnet/stacks-node/src/globals.rs | 10 +- testnet/stacks-node/src/nakamoto_node.rs | 59 ++++++------ testnet/stacks-node/src/nakamoto_node/peer.rs | 4 +- .../stacks-node/src/nakamoto_node/relayer.rs | 91 ++++++++----------- testnet/stacks-node/src/run_loop/nakamoto.rs | 62 ++++++------- 5 files changed, 102 insertions(+), 124 deletions(-) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 6c60e9a591..bd1560477c 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -35,7 +35,9 @@ pub enum RelayerDirective { Exit, } -/// Inter-thread communication structure, shared between threads +/// Inter-thread communication structure, shared between threads. This +/// is generic over the relayer communication channel: nakamoto and +/// neon nodes use different relayer directives. pub struct Globals { /// Last sortition processed last_sortition: Arc>>, @@ -100,6 +102,12 @@ impl Globals { } } + /// Does the inventory sync watcher think we still need to + /// catch up to the chain tip? + pub fn in_initial_block_download(&self) -> bool { + self.sync_comms.get_ibd() + } + /// Get the last sortition processed by the relayer thread pub fn get_last_sortition(&self) -> Option { self.last_sortition diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 0482bbfb05..3584a5d864 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -65,15 +65,18 @@ pub struct StacksNode { pub relayer_thread_handle: JoinHandle<()>, } -/// Types of errors that can arise during mining +/// Types of errors that can arise during Nakamoto StacksNode operation #[derive(Debug)] -enum Error { +pub enum Error { /// Can't find the block sortition snapshot for the chain tip SnapshotNotFoundForChainTip, /// The burnchain tip changed while this operation was in progress BurnchainTipChanged, + /// Error while spawning a subordinate thread SpawnError(std::io::Error), + /// Injected testing errors FaultInjection, + /// This miner was elected, but another sortition occurred before mining started MissedMiningOpportunity, /// Attempted to mine while there was no active VRF key NoVRFKeyActive, @@ -83,7 +86,10 @@ enum Error { UnexpectedChainState, /// A burnchain operation failed when submitting it to the burnchain BurnchainSubmissionFailed, + /// A new parent has been discovered since mining started NewParentDiscovered, + // The thread that we tried to send to has closed + ChannelClosed, } impl StacksNode { @@ -201,19 +207,14 @@ impl StacksNode { /// telling it to process the block and begin mining if this miner won. /// returns _false_ if the relayer hung up the channel. /// Called from the main thread. - pub fn relayer_burnchain_notify(&self) -> bool { + fn relayer_burnchain_notify(&self, snapshot: BlockSnapshot) -> Result<(), Error> { if !self.is_miner { - // node is a follower, don't try to process my own tenure. - return true; + // node is a follower, don't need to notify the relayer of these events. + return Ok(()); } - let Some(snapshot) = self.globals.get_last_sortition() else { - debug!("Tenure: Notify sortition! No last burn block"); - return true; - }; - - debug!( - "Tenure: Notify sortition!"; + info!( + "Tenure: Notify burn block!"; "consensus_hash" => %snapshot.consensus_hash, "burn_block_hash" => %snapshot.burn_header_hash, "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, @@ -224,15 +225,14 @@ impl StacksNode { // unlike in neon_node, the nakamoto node should *always* notify the relayer of // a new burnchain block - return self - .globals + self.globals .relay_send .send(RelayerDirective::ProcessedBurnBlock( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, )) - .is_ok(); + .map_err(|_| Error::ChannelClosed) } /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp @@ -244,9 +244,7 @@ impl StacksNode { sortdb: &SortitionDB, sort_id: &SortitionId, ibd: bool, - ) -> Option { - let mut last_sortitioned_block = None; - + ) -> Result<(), Error> { let ic = sortdb.index_conn(); let block_snapshot = SortitionDB::get_block_snapshot(&ic, sort_id) @@ -268,14 +266,11 @@ impl StacksNode { "Received burnchain block #{} including block_commit_op (winning) - {} ({})", block_height, op.apparent_sender, &op.block_header_hash ); - last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); - } else { - if self.is_miner { - info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash - ); - } + } else if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); } } @@ -296,8 +291,10 @@ impl StacksNode { "in_initial_block_download?" => ibd, ); - self.globals.set_last_sortition(block_snapshot); - last_sortitioned_block.map(|x| x.0) + self.globals.set_last_sortition(block_snapshot.clone()); + + // notify the relayer thread of the new sortition state + self.relayer_burnchain_notify(block_snapshot) } /// Join all inner threads diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 762aa45eda..376c437723 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -154,7 +154,7 @@ impl PeerThread { ) } - pub fn new_all( + fn new_all( globals: Globals, config: &Config, pox_constants: PoxConstants, @@ -204,7 +204,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not - pub fn run_one_pass( + pub(crate) fn run_one_pass( &mut self, indexer: &B, dns_client_opt: Option<&mut DNSClient>, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 04f04241e0..68ca5d723a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1,3 +1,4 @@ +use core::fmt; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2023 Stacks Open Internet Foundation // @@ -38,8 +39,6 @@ use stacks::core::mempool::MemPoolDB; use stacks::core::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, }; -use stacks::cost_estimates::metrics::UnitMetric; -use stacks::cost_estimates::UnitEstimator; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; use stacks::net::relay::Relayer; @@ -82,10 +81,23 @@ pub enum RelayerDirective { Exit, } +impl fmt::Display for RelayerDirective { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RelayerDirective::HandleNetResult(_) => write!(f, "HandleNetResult"), + RelayerDirective::ProcessedBurnBlock(_, _, _) => write!(f, "ProcessedBurnBlock"), + RelayerDirective::IssueBlockCommit(_, _) => write!(f, "IssueBlockCommit"), + RelayerDirective::RegisterKey(_) => write!(f, "RegisterKey"), + RelayerDirective::Exit => write!(f, "Exit"), + } + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread -/// * processes burnchain state +/// * issues (and re-issues) block commits to participate as a miner +/// * processes burnchain state to determine if selected as a miner /// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread) pub struct RelayerThread { /// Node config @@ -148,14 +160,12 @@ pub struct RelayerThread { } impl RelayerThread { - /// Instantiate off of a StacksNode, a runloop, and a relayer. + /// Instantiate relayer thread. + /// Uses `runloop` to obtain globals, config, and `is_miner`` status pub fn new(runloop: &RunLoop, local_peer: LocalPeer, relayer: Relayer) -> RelayerThread { let config = runloop.config().clone(); let globals = runloop.get_globals(); let burn_db_path = config.get_burn_db_file_path(); - let stacks_chainstate_path = config.get_chainstate_path_str(); - let is_mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; let is_miner = runloop.is_miner(); let sortdb = SortitionDB::open(&burn_db_path, true, runloop.get_burnchain().pox_constants) @@ -164,21 +174,9 @@ impl RelayerThread { let chainstate = open_chainstate_with_faults(&config).expect("FATAL: failed to open chainstate DB"); - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - - let mempool = MemPoolDB::open( - is_mainnet, - chain_id, - &stacks_chainstate_path, - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); + let mempool = config + .connect_mempool_db() + .expect("Database failure opening mempool"); let keychain = Keychain::default(config.node.seed.clone()); let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); @@ -215,7 +213,7 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? - pub fn has_waited_for_latest_blocks(&self) -> bool { + fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place (self.min_network_download_passes <= self.last_network_download_passes // a network inv pass took place @@ -226,21 +224,6 @@ impl RelayerThread { || !self.config.miner.wait_for_block_download } - /// Return debug string for waiting for latest blocks - pub fn debug_waited_for_latest_blocks(&self) -> String { - format!( - "({} <= {} && {} <= {}) || {} + {} < {} || {}", - self.min_network_download_passes, - self.last_network_download_passes, - self.min_network_inv_passes, - self.last_network_inv_passes, - self.last_network_block_height_ts, - self.config.node.wait_time_for_blocks, - get_epoch_time_ms(), - self.config.miner.wait_for_block_download - ) - } - /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of /// * preprocessing and storing new blocks and microblocks /// * relaying blocks, microblocks, and transacctions @@ -503,7 +486,6 @@ impl RelayerThread { let parent_winning_vtxindex = parent_winning_tx.vtxindex; - // let burn_fee_cap = self.config.burnchain.burn_fee_cap; let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); let sunset_burn = self.burnchain.expected_sunset_burn( sort_tip.block_height + 1, @@ -738,9 +720,6 @@ impl RelayerThread { return None; } - // TODO (nakamoto): the miner shouldn't issue either of these directives - // if we're still in IBD! - // do we need a VRF key registration? if matches!( self.globals.get_leader_key_registration_state(), @@ -869,11 +848,10 @@ impl RelayerThread { /// Top-level dispatcher pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { + info!("Relayer: handling directive"; "directive" => %directive); let continue_running = match directive { RelayerDirective::HandleNetResult(net_result) => { - debug!("Relayer: directive Handle network result"); self.process_network_result(net_result); - debug!("Relayer: directive Handled network result"); true } // RegisterKey directives mean that the relayer should try to register a new VRF key. @@ -882,10 +860,12 @@ impl RelayerThread { if !self.is_miner { return true; } - debug!("Relayer: directive Register VRF key"); + if self.globals.in_initial_block_download() { + info!("In initial block download, will not submit VRF registration"); + return true; + } self.rotate_vrf_and_register(&last_burn_block); self.globals.counters.bump_blocks_processed(); - debug!("Relayer: directive Registered VRF key"); true } // ProcessedBurnBlock directives correspond to a new sortition perhaps occurring. @@ -895,30 +875,33 @@ impl RelayerThread { if !self.is_miner { return true; } - info!("Relayer: directive Process tenures"); - let res = self.handle_sortition( + if self.globals.in_initial_block_download() { + debug!("In initial block download, will not check sortition for miner"); + return true; + } + self.handle_sortition( consensus_hash, burn_hash, StacksBlockId(block_header_hash.0), - ); - info!("Relayer: directive Processed tenures"); - res + ) } // These are triggered by the relayer waking up, seeing a new consensus hash *or* a new first tenure block RelayerDirective::IssueBlockCommit(consensus_hash, block_hash) => { if !self.is_miner { return true; } - debug!("Relayer: Nakamoto Tenure Start"); + if self.globals.in_initial_block_download() { + debug!("In initial block download, will not issue block commit"); + return true; + } if let Err(e) = self.issue_block_commit(consensus_hash, block_hash) { warn!("Relayer failed to issue block commit"; "err" => ?e); } - debug!("Relayer: Nakamoto Tenure Start"); true } RelayerDirective::Exit => false, }; - + debug!("Relayer: handled directive"; "continue_running" => continue_running); continue_running } } diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index b3458a4ce6..e429e79c91 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -38,7 +38,6 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; -use super::RunLoopCallbacks; use crate::burnchains::make_bitcoin_indexer; use crate::globals::Globals as GenericGlobals; use crate::monitoring::start_serving_monitoring_metrics; @@ -63,18 +62,18 @@ const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; #[cfg(not(test))] const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300; -/// Coordinating a node running in neon mode. +/// Coordinating a node running in nakamoto mode. This runloop operates very similarly to the neon runloop. pub struct RunLoop { config: Config, - pub callbacks: RunLoopCallbacks, globals: Option, counters: Counters, coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, should_keep_running: Arc, event_dispatcher: EventDispatcher, + #[allow(dead_code)] pox_watchdog: Option, // can't be instantiated until .start() is called - is_miner: Option, // not known until .start() is called - burnchain: Option, // not known until .start() is called + is_miner: Option, // not known until .start() is called + burnchain: Option, // not known until .start() is called pox_watchdog_comms: PoxSyncWatchdogComms, /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is /// instantiated (namely, so the test framework can access it). @@ -105,7 +104,6 @@ impl RunLoop { config, globals: None, coordinator_channels: Some(channels), - callbacks: RunLoopCallbacks::new(), counters: counters.unwrap_or_else(|| Counters::new()), should_keep_running, event_dispatcher, @@ -117,7 +115,7 @@ impl RunLoop { } } - pub fn get_globals(&self) -> Globals { + pub(crate) fn get_globals(&self) -> Globals { self.globals .clone() .expect("FATAL: globals not instantiated") @@ -127,47 +125,37 @@ impl RunLoop { self.globals = Some(globals); } - pub fn get_coordinator_channel(&self) -> Option { + pub(crate) fn get_coordinator_channel(&self) -> Option { self.coordinator_channels.as_ref().map(|x| x.1.clone()) } - pub fn get_counters(&self) -> Counters { + pub(crate) fn get_counters(&self) -> Counters { self.counters.clone() } - pub fn config(&self) -> &Config { + pub(crate) fn config(&self) -> &Config { &self.config } - pub fn get_event_dispatcher(&self) -> EventDispatcher { + pub(crate) fn get_event_dispatcher(&self) -> EventDispatcher { self.event_dispatcher.clone() } - pub fn is_miner(&self) -> bool { + pub(crate) fn is_miner(&self) -> bool { self.is_miner.unwrap_or(false) } - pub fn get_pox_sync_comms(&self) -> PoxSyncWatchdogComms { - self.pox_watchdog_comms.clone() - } - - pub fn get_termination_switch(&self) -> Arc { + pub(crate) fn get_termination_switch(&self) -> Arc { self.should_keep_running.clone() } - pub fn get_burnchain(&self) -> Burnchain { + pub(crate) fn get_burnchain(&self) -> Burnchain { self.burnchain .clone() .expect("FATAL: tried to get runloop burnchain before calling .start()") } - pub fn get_pox_watchdog(&mut self) -> &mut PoxSyncWatchdog { - self.pox_watchdog - .as_mut() - .expect("FATAL: tried to get PoX watchdog before calling .start()") - } - - pub fn get_miner_status(&self) -> Arc> { + pub(crate) fn get_miner_status(&self) -> Arc> { self.miner_status.clone() } @@ -228,7 +216,7 @@ impl RunLoop { /// Boot up the stacks chainstate. /// Instantiate the chainstate and push out the boot receipts to observers /// This is only public so we can test it. - pub fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { + fn boot_chainstate(&mut self, burnchain_config: &Burnchain) -> StacksChainState { let use_test_genesis_data = use_test_genesis_chainstate(&self.config); // load up genesis balances @@ -862,7 +850,14 @@ impl RunLoop { // wait for the p2p state-machine to do at least one pass debug!("Runloop: Wait until Stacks block downloads reach a quiescent state before processing more burnchain blocks"; "remote_chain_height" => remote_chain_height, "local_chain_height" => burnchain_height); + // TODO: for now, we just set initial block download false. + // I think that the sync watchdog probably needs to change a fair bit + // for nakamoto. There may be some opportunity to refactor this runloop + // as well (e.g., the `mine_start` should be integrated with the + // watchdog so that there's just one source of truth about ibd), + // but I think all of this can be saved for post-neon work. let ibd = false; + self.pox_watchdog_comms.set_ibd(ibd); // calculate burnchain sync percentage let percent: f64 = if remote_chain_height > 0 { @@ -947,16 +942,11 @@ impl RunLoop { let sortition_id = &block.sortition_id; // Have the node process the new block, that can include, or not, a sortition. - node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); - - // Now, tell the relayer to check if it won a sortition during this block, - // and, if so, to process and advertize the block. This is basically a - // no-op during boot-up. - // - // _this will block if the relayer's buffer is full_ - if !node.relayer_burnchain_notify() { - // relayer hung up, exit. - error!("Runloop: Block relayer and miner hung up, exiting."); + if let Err(e) = + node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd) + { + // relayer errored, exit. + error!("Runloop: Block relayer and miner errored, exiting."; "err" => ?e); return; } } From 54916105fd6dfcf3341c64b68efa9a53c33248fc Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 09:48:21 -0600 Subject: [PATCH 06/16] chore: handle merge/rebase artifacts, address PR feedback --- Cargo.lock | 4 + stackslib/src/chainstate/nakamoto/miner.rs | 10 +- .../stacks-node/src/nakamoto_node/miner.rs | 56 +-- testnet/stacks-node/src/run_loop/nakamoto.rs | 339 +----------------- 4 files changed, 41 insertions(+), 368 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a90cb48536..b9f59752b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,6 +2354,8 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256k1" version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5afcf536d20c074ef45371ee9a654dcfc46fb2dde18ecc54ec30c936eb850fa2" dependencies = [ "bindgen", "bitvec", @@ -4711,6 +4713,8 @@ dependencies = [ [[package]] name = "wsts" version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c250118354755b4abb091a83cb8d659b511c0ae211ccdb3b1254e3db199cb86" dependencies = [ "aes-gcm 0.10.2", "bs58 0.5.0", diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 1f75cd55ac..5b511f6aa2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -570,9 +570,13 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - let initial_txs: Vec<_> = - [new_tenure_info.tenure_change_tx.cloned(), - new_tenure_info.coinbase_tx.cloned()].into_iter().filter_map(|x| x).collect(); + let initial_txs: Vec<_> = [ + tenure_info.tenure_change_tx.clone(), + tenure_info.coinbase_tx.clone(), + ] + .into_iter() + .filter_map(|x| x) + .collect(); let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( &mut tenure_tx, &mut builder, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ae2781ce7b..07efbedaca 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -21,7 +21,7 @@ use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, BurnchainParameters}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureStart}; +use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ @@ -56,9 +56,8 @@ pub enum MinerDirective { } struct ParentTenureInfo { - #[allow(dead_code)] - parent_tenure_start: StacksBlockId, parent_tenure_blocks: u64, + parent_tenure_consensus_hash: ConsensusHash, } /// Metadata required for beginning a new tenure @@ -167,12 +166,12 @@ impl BlockMinerThread { self.burnchain.pox_constants.clone(), ) .expect("FATAL: could not open sortition DB"); - let sortition_handle = sort_db.index_handle_at_tip(); + let mut sortition_handle = sort_db.index_handle_at_tip(); let staging_tx = chain_state.staging_db_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, block, - &sortition_handle, + &mut sortition_handle, &staging_tx, &signer.aggregate_public_key, )?; @@ -194,6 +193,7 @@ impl BlockMinerThread { &mut self, nonce: u64, parent_block_id: StacksBlockId, + parent_tenure_consensus_hash: ConsensusHash, parent_tenure_blocks: u64, miner_pkh: Hash160, ) -> Option { @@ -203,17 +203,18 @@ impl BlockMinerThread { } let is_mainnet = self.config.is_mainnet(); let chain_id = self.config.burnchain.chain_id; - let tenure_change_tx_payload = TransactionPayload::TenureChange( - TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: u32::try_from(parent_tenure_blocks) - .expect("FATAL: more than u32 blocks in a tenure"), - cause: TenureChangeCause::BlockFound, - pubkey_hash: miner_pkh, - signers: vec![], - }, - ThresholdSignature::mock(), - ); + let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: self.burn_block.consensus_hash.clone(), + prev_tenure_consensus_hash: parent_tenure_consensus_hash, + burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), + previous_tenure_end: parent_block_id, + previous_tenure_blocks: u32::try_from(parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), + cause: TenureChangeCause::BlockFound, + pubkey_hash: miner_pkh, + signers: vec![], + signature: ThresholdSignature::mock(), + }); let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(nonce); @@ -297,7 +298,7 @@ impl BlockMinerThread { return Some(ParentStacksBlockInfo { parent_tenure: Some(ParentTenureInfo { - parent_tenure_start: chain_tip.metadata.index_block_hash(), + parent_tenure_consensus_hash: chain_tip.metadata.consensus_hash, parent_tenure_blocks: 0, }), stacks_parent_header: chain_tip.metadata, @@ -404,6 +405,7 @@ impl BlockMinerThread { let tenure_change_tx = self.generate_tenure_change_tx( current_miner_nonce, parent_block_id, + par_tenure_info.parent_tenure_consensus_hash, par_tenure_info.parent_tenure_blocks, self.keychain.get_nakamoto_pkh(), )?; @@ -412,16 +414,15 @@ impl BlockMinerThread { target_epoch_id, vrf_proof.clone(), ); - Some(NakamotoTenureStart { - coinbase_tx, - // TODO (refactor): the nakamoto block builder doesn't use this VRF proof, - // it has to be included in the coinbase tx, which is an arg to the builder. - // we should probably just remove this from the nakamoto block builder. - vrf_proof: vrf_proof.clone(), - tenure_change_tx, - }) + NakamotoTenureInfo { + coinbase_tx: Some(coinbase_tx), + tenure_change_tx: Some(tenure_change_tx), + } } else { - None + NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + } }; parent_block_info.stacks_parent_header.microblock_tail = None; @@ -584,9 +585,10 @@ impl ParentStacksBlockInfo { } else { 1 }; + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); Some(ParentTenureInfo { - parent_tenure_start: parent_tenure_id.clone(), parent_tenure_blocks, + parent_tenure_consensus_hash, }) } else { None diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index e429e79c91..83382f869e 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -25,9 +25,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; use stacks::chainstate::coordinator::comm::{CoordinatorChannels, CoordinatorReceivers}; use stacks::chainstate::coordinator::{ - static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, ChainsCoordinator, ChainsCoordinatorConfig, - CoordinatorCommunication, + ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication, }; use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; @@ -35,7 +33,6 @@ use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; use stacks_common::types::PublicKey; use stacks_common::util::hash::Hash160; -use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; use crate::burnchains::make_bitcoin_indexer; @@ -56,12 +53,6 @@ use crate::{ pub const STDERR: i32 = 2; pub type Globals = GenericGlobals; -#[cfg(test)] -const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30; - -#[cfg(not(test))] -const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300; - /// Coordinating a node running in nakamoto mode. This runloop operates very similarly to the neon runloop. pub struct RunLoop { config: Config, @@ -389,332 +380,6 @@ impl RunLoop { ) } - /// Wake up and drive stacks block processing if there's been a PoX reorg. - /// Be careful not to saturate calls to announce new stacks blocks, because that will disable - /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making - /// progress). - fn drive_pox_reorg_stacks_block_processing( - globals: &Globals, - config: &Config, - burnchain: &Burnchain, - sortdb: &SortitionDB, - last_stacks_pox_reorg_recover_time: &mut u128, - ) { - let delay = cmp::max( - config.node.chain_liveness_poll_time_secs, - cmp::max( - config.miner.first_attempt_time_ms, - config.miner.subsequent_attempt_time_ms, - ) / 1000, - ); - - if *last_stacks_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { - // too soon - return; - } - - // compare stacks and heaviest AMs - let burnchain_db = burnchain - .open_burnchain_db(false) - .expect("FATAL: failed to open burnchain DB"); - - let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); - - let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, - &indexer, - &burnchain_db, - sortdb, - &sn.sortition_id, - ) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); - return; - } - }; - - let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let canonical_burnchain_tip = burnchain_db - .get_canonical_chain_tip() - .expect("FATAL: could not read burnchain DB"); - - let sortition_tip_affirmation_map = - match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); - return; - } - }; - - let stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( - &burnchain_db, - sortdb, - &sn.sortition_id, - &sn.canonical_stacks_tip_consensus_hash, - &sn.canonical_stacks_tip_hash, - ) - .expect("FATAL: could not query stacks DB"); - - if stacks_tip_affirmation_map.len() < heaviest_affirmation_map.len() - || stacks_tip_affirmation_map - .find_divergence(&heaviest_affirmation_map) - .is_some() - { - // the sortition affirmation map might also be inconsistent, so we'll need to fix that - // (i.e. the underlying sortitions) before we can fix the stacks fork - if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() - || sortition_tip_affirmation_map - .find_divergence(&heaviest_affirmation_map) - .is_some() - { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); - globals.coord().announce_new_burn_block(); - } else if highest_sn.block_height == sn.block_height - && sn.block_height == canonical_burnchain_tip.block_height - { - // need to force an affirmation reorg because there will be no more burn block - // announcements. - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); - globals.coord().announce_new_burn_block(); - } - - debug!( - "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map - ); - globals.coord().announce_new_stacks_block(); - } else { - debug!( - "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map - ); - - // announce a new stacks block to force the chains coordinator - // to wake up anyways. this isn't free, so we have to make sure - // the chain-liveness thread doesn't wake up too often - globals.coord().announce_new_stacks_block(); - } - - *last_stacks_pox_reorg_recover_time = get_epoch_time_secs().into(); - } - - /// Wake up and drive sortition processing if there's been a PoX reorg. - /// Be careful not to saturate calls to announce new burn blocks, because that will disable - /// mining (which would prevent a miner attempting to fix a hidden PoX anchor block from making - /// progress). - /// - /// only call if no in ibd - fn drive_pox_reorg_burn_block_processing( - globals: &Globals, - config: &Config, - burnchain: &Burnchain, - sortdb: &SortitionDB, - chain_state_db: &StacksChainState, - last_burn_pox_reorg_recover_time: &mut u128, - last_announce_time: &mut u128, - ) { - let delay = cmp::max( - config.node.chain_liveness_poll_time_secs, - cmp::max( - config.miner.first_attempt_time_ms, - config.miner.subsequent_attempt_time_ms, - ) / 1000, - ); - - if *last_burn_pox_reorg_recover_time + (delay as u128) >= get_epoch_time_secs().into() { - // too soon - return; - } - - // compare sortition and heaviest AMs - let burnchain_db = burnchain - .open_burnchain_db(false) - .expect("FATAL: failed to open burnchain DB"); - - let highest_sn = SortitionDB::get_highest_known_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let canonical_burnchain_tip = burnchain_db - .get_canonical_chain_tip() - .expect("FATAL: could not read burnchain DB"); - - if canonical_burnchain_tip.block_height > highest_sn.block_height { - // still processing sortitions - test_debug!( - "Drive burn block processing: still processing sortitions ({} > {})", - canonical_burnchain_tip.block_height, - highest_sn.block_height - ); - return; - } - - // NOTE: this could be lower than the highest_sn - let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: could not read sortition DB"); - - let sortition_tip_affirmation_map = - match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); - return; - } - }; - - let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); - - let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, - &indexer, - &burnchain_db, - sortdb, - &sn.sortition_id, - ) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); - return; - } - }; - - let canonical_affirmation_map = match static_get_canonical_affirmation_map( - &burnchain, - &indexer, - &burnchain_db, - sortdb, - &chain_state_db, - &sn.sortition_id, - ) { - Ok(am) => am, - Err(e) => { - warn!("Failed to find canonical affirmation map: {:?}", &e); - return; - } - }; - - if sortition_tip_affirmation_map.len() < heaviest_affirmation_map.len() - || sortition_tip_affirmation_map - .find_divergence(&heaviest_affirmation_map) - .is_some() - || sn.block_height < highest_sn.block_height - { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = heaviest_affirmation_map.len() - && sortition_tip_affirmation_map.len() <= canonical_affirmation_map.len() - { - if let Some(divergence_rc) = - canonical_affirmation_map.find_divergence(&sortition_tip_affirmation_map) - { - if divergence_rc + 1 >= (heaviest_affirmation_map.len() as u64) { - // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history - debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); - globals.coord().announce_new_burn_block(); - globals.coord().announce_new_stacks_block(); - *last_announce_time = get_epoch_time_secs().into(); - } - } - } else { - debug!( - "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} JoinHandle<()> { - let config = self.config.clone(); - let burnchain = self.get_burnchain(); - let sortdb = burnchain - .open_sortition_db(true) - .expect("FATAL: could not open sortition DB"); - - let (chain_state_db, _) = StacksChainState::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - Some(config.node.get_marf_opts()), - ) - .unwrap(); - - let liveness_thread_handle = thread::Builder::new() - .name(format!("chain-liveness-{}", config.node.rpc_bind)) - .stack_size(BLOCK_PROCESSOR_STACK_SIZE) - .spawn(move || { - Self::drive_chain_liveness(globals, config, burnchain, sortdb, chain_state_db) - }) - .expect("FATAL: failed to spawn chain liveness thread"); - - liveness_thread_handle - } - /// Starts the node runloop. /// /// This function will block by looping infinitely. @@ -789,7 +454,6 @@ impl RunLoop { // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) let mut node = StacksNode::spawn(self, globals.clone(), relay_recv); - let liveness_thread = self.spawn_chain_liveness_thread(globals.clone()); // Wait for all pending sortitions to process let burnchain_db = burnchain_config @@ -839,7 +503,6 @@ impl RunLoop { globals.coord().stop_chains_coordinator(); coordinator_thread_handle.join().unwrap(); node.join(); - liveness_thread.join().unwrap(); info!("Exiting stacks-node"); break; From 1ec878f6c0f4b8c2c75cc32fb229ff7cbe79babf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 10:21:33 -0600 Subject: [PATCH 07/16] remove unconfirmed tx handling in nakamoto RelayerThread --- .../stacks-node/src/nakamoto_node/relayer.rs | 21 ++----------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 68ca5d723a..8c83bb35b9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1,4 +1,3 @@ -use core::fmt; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2023 Stacks Open Internet Foundation // @@ -14,6 +13,7 @@ use core::fmt; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use core::fmt; use std::collections::HashMap; use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::thread::JoinHandle; @@ -261,7 +261,7 @@ impl RelayerThread { ) .expect("BUG: failure processing network results"); - if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { + if net_receipts.num_new_blocks > 0 { // if we received any new block data that could invalidate our view of the chain tip, // then stop mining until we process it debug!("Relayer: block mining to process newly-arrived blocks or microblocks"); @@ -274,29 +274,12 @@ impl RelayerThread { .process_new_mempool_txs(net_receipts.mempool_txs_added); } - let num_unconfirmed_microblock_tx_receipts = - net_receipts.processed_unconfirmed_state.receipts.len(); - if num_unconfirmed_microblock_tx_receipts > 0 { - if let Some(unconfirmed_state) = self.chainstate.unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); - self.event_dispatcher.process_new_microblocks( - canonical_tip, - net_receipts.processed_unconfirmed_state, - ); - } else { - warn!("Relayer: oops, unconfirmed state is uninitialized but there are microblock events"); - } - } - // Dispatch retrieved attachments, if any. if net_result.has_attachments() { self.event_dispatcher .process_new_attachments(&net_result.attachments); } - // synchronize unconfirmed tx index to p2p thread - self.globals.send_unconfirmed_txs(&self.chainstate); - // resume mining if we blocked it, and if we've done the requisite download // passes self.last_network_download_passes = net_result.num_download_passes; From be055d1a7ba01d730b79d9b90c7bc82a68f21935 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 11:49:11 -0600 Subject: [PATCH 08/16] add epoch-3.0 burnchain configuration assertions --- testnet/stacks-node/src/config.rs | 25 +++++++++++++++++++ .../src/tests/nakamoto_integrations.rs | 6 ++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 526c2a90da..8b1f7a8578 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -624,6 +624,31 @@ impl Config { ); burnchain.pox_constants.sunset_end = sunset_end.into(); } + + // check if the Epoch 3.0 burnchain settings as configured are going to be valid. + let epochs = StacksEpoch::get_epochs( + self.burnchain.get_bitcoin_network().1, + self.burnchain.epochs.as_ref(), + ); + let Some(epoch_30) = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) + .map(|epoch_ix| epochs[epoch_ix].clone()) + else { + // no Epoch 3.0, so just return + return; + }; + if burnchain.pox_constants.prepare_length < 3 { + panic!( + "FATAL: Nakamoto rules require a prepare length >= 3. Prepare length set to {}", + burnchain.pox_constants.prepare_length + ); + } + if burnchain.is_in_prepare_phase(epoch_30.start_height) { + panic!( + "FATAL: Epoch 3.0 must start *during* a reward phase, not a prepare phase. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", + epoch_30.start_height, + &burnchain.pox_constants + ); + } } /// Load up a Burnchain and apply config settings to it. diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2b4fdfa540..0b1d79ffa3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -105,13 +105,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 6, - end_height: 220, + end_height: 221, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 220, + start_height: 221, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 @@ -226,7 +226,7 @@ fn next_block_and_mine_commit( return Ok(true); } if commits_sent >= commits_before + 1 - && block_processed_time.elapsed() > Duration::from_secs(10) + && block_processed_time.elapsed() > Duration::from_secs(6) { return Ok(true); } From 636230795a167b8ecf5f34a384ce27e815d6f3b1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 12 Dec 2023 14:58:03 -0600 Subject: [PATCH 09/16] requirements for configuring nakamoto-neon via CLI/toml --- Cargo.lock | 3 +- Cargo.toml | 2 + testnet/stacks-node/Cargo.toml | 3 +- testnet/stacks-node/src/config.rs | 7 ++- testnet/stacks-node/src/keychain.rs | 7 ++- testnet/stacks-node/src/mockamoto/signer.rs | 13 ++++- testnet/stacks-node/src/nakamoto_node.rs | 14 ++++- .../stacks-node/src/nakamoto_node/miner.rs | 52 ++++++++++++------- .../stacks-node/src/nakamoto_node/relayer.rs | 8 ++- testnet/stacks-node/src/run_loop/nakamoto.rs | 3 +- testnet/stacks-node/src/run_loop/neon.rs | 7 +-- 11 files changed, 83 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9f59752b4..78c3a9e1e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3551,8 +3551,7 @@ dependencies = [ "libc", "libsigner", "pico-args", - "rand 0.7.3", - "rand_core 0.6.4", + "rand 0.8.5", "regex", "reqwest", "ring", diff --git a/Cargo.toml b/Cargo.toml index a861f143e9..3d2d9d066d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,8 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] wsts = "5.0" +rand_core = "0.6" +rand = "0.8" # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 780b65116e..9e0c8a74e7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -9,7 +9,6 @@ rust-version = "1.61" [dependencies] lazy_static = "1.4.0" pico-args = "0.3.1" -rand = "0.7.3" serde = "1" serde_derive = "1" serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] } @@ -29,7 +28,7 @@ chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } wsts = { workspace = true } -rand_core = "0.6" +rand = { workspace = true } [dev-dependencies] ring = "0.16.19" diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 8b1f7a8578..9018511b5a 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1150,7 +1150,11 @@ impl Config { .as_ref() .map(|x| Secp256k1PrivateKey::from_hex(x)) .transpose()?, - self_signing_key: None, + self_signing_key: miner + .self_signing_seed + .as_ref() + .map(|x| SelfSigner::from_seed(*x)) + .or(miner_default_config.self_signing_key), }, None => miner_default_config, }; @@ -2300,6 +2304,7 @@ pub struct MinerConfigFile { pub candidate_retry_cache_size: Option, pub unprocessed_block_deadline_secs: Option, pub mining_key: Option, + pub self_signing_seed: Option, } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 712fa0b662..d2575cb2b9 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -51,11 +51,16 @@ impl Keychain { Hash160::from_node_public_key(&pk) } - /// Get the secrete key of the nakamoto mining key + /// Get the secret key of the nakamoto mining key pub fn get_nakamoto_sk(&self) -> &Secp256k1PrivateKey { &self.nakamoto_mining_key } + /// Set the secret key of the nakamoto mining key + pub fn set_nakamoto_sk(&mut self, mining_key: Secp256k1PrivateKey) { + self.nakamoto_mining_key = mining_key; + } + /// Create a default keychain from the seed, with a default nakamoto mining key derived /// from the same seed ( pub fn default(seed: Vec) -> Keychain { diff --git a/testnet/stacks-node/src/mockamoto/signer.rs b/testnet/stacks-node/src/mockamoto/signer.rs index c0d4af0b69..7e577b24f2 100644 --- a/testnet/stacks-node/src/mockamoto/signer.rs +++ b/testnet/stacks-node/src/mockamoto/signer.rs @@ -1,3 +1,4 @@ +use rand::{CryptoRng, RngCore, SeedableRng}; use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::ThresholdSignature; use wsts::curve::point::Point; @@ -22,9 +23,17 @@ pub struct SelfSigner { } impl SelfSigner { + pub fn from_seed(seed: u64) -> Self { + let rng = rand::rngs::StdRng::seed_from_u64(seed); + Self::from_rng::(rng) + } + pub fn single_signer() -> Self { - let mut rng = rand_core::OsRng::default(); + let rng = rand::rngs::OsRng::default(); + Self::from_rng::(rng) + } + fn from_rng(mut rng: RNG) -> Self { // Create the parties let mut signer_parties = [wsts::v2::Party::new(0, &[0], 1, 1, 1, &mut rng)]; @@ -54,7 +63,7 @@ impl SelfSigner { } pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock) { - let mut rng = rand_core::OsRng; + let mut rng = rand::rngs::OsRng::default(); let msg = block .header .signer_signature_hash() diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 3584a5d864..cf88877e10 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -21,6 +21,7 @@ use std::thread::JoinHandle; use stacks::burnchains::{BurnchainSigner, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::stacks::Error as ChainstateError; use stacks::monitoring; use stacks::monitoring::update_active_miners_count_gauge; use stacks::net::atlas::AtlasConfig; @@ -88,6 +89,11 @@ pub enum Error { BurnchainSubmissionFailed, /// A new parent has been discovered since mining started NewParentDiscovered, + /// A failure occurred while constructing a VRF Proof + BadVrfConstruction, + CannotSelfSign, + MiningFailure(ChainstateError), + SigningError(&'static str), // The thread that we tried to send to has closed ChannelClosed, } @@ -125,7 +131,10 @@ impl StacksNode { let is_miner = runloop.is_miner(); let burnchain = runloop.get_burnchain(); let atlas_config = config.atlas.clone(); - let keychain = Keychain::default(config.node.seed.clone()); + let mut keychain = Keychain::default(config.node.seed.clone()); + if let Some(mining_key) = config.miner.mining_key.clone() { + keychain.set_nakamoto_sk(mining_key); + } // we can call _open_ here rather than _connect_, since connect is first called in // make_genesis_block @@ -166,7 +175,8 @@ impl StacksNode { }; globals.set_initial_leader_key_registration_state(leader_key_registration_state); - let relayer_thread = RelayerThread::new(runloop, local_peer.clone(), relayer); + let relayer_thread = + RelayerThread::new(runloop, local_peer.clone(), relayer, keychain.clone()); StacksNode::set_monitoring_miner_address(&keychain, &relayer_thread); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 07efbedaca..b38225f31f 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -132,9 +132,12 @@ impl BlockMinerThread { } // now, actually run this tenure - let Some(new_block) = self.mine_block() else { - warn!("Failed to mine block"); - return; + let new_block = match self.mine_block() { + Ok(x) => x, + Err(e) => { + warn!("Failed to mine block: {e:?}"); + return; + } }; if let Some(self_signer) = self.config.self_signing() { @@ -196,10 +199,11 @@ impl BlockMinerThread { parent_tenure_consensus_hash: ConsensusHash, parent_tenure_blocks: u64, miner_pkh: Hash160, - ) -> Option { + ) -> Result { if self.config.self_signing().is_none() { // if we're not self-signing, then we can't generate a tenure change tx: it has to come from the signers. - return None; + warn!("Tried to generate a tenure change transaction, but we aren't self-signing"); + return Err(NakamotoNodeError::CannotSelfSign); } let is_mainnet = self.config.is_mainnet(); let chain_id = self.config.burnchain.chain_id; @@ -232,7 +236,7 @@ impl BlockMinerThread { let mut tx_signer = StacksTransactionSigner::new(&tx); self.keychain.sign_as_origin(&mut tx_signer); - Some(tx_signer.get_tx().unwrap()) + Ok(tx_signer.get_tx().unwrap()) } /// Create a coinbase transaction. @@ -279,7 +283,7 @@ impl BlockMinerThread { &self, burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, - ) -> Option { + ) -> Result { let Some(stacks_tip) = NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) .expect("FATAL: could not query chain tip") @@ -296,7 +300,7 @@ impl BlockMinerThread { burnchain_params.first_block_timestamp.into(), ); - return Some(ParentStacksBlockInfo { + return Ok(ParentStacksBlockInfo { parent_tenure: Some(ParentTenureInfo { parent_tenure_consensus_hash: chain_tip.metadata.consensus_hash, parent_tenure_blocks: 0, @@ -319,12 +323,12 @@ impl BlockMinerThread { &self.parent_tenure_id, stacks_tip, ) { - Ok(parent_info) => Some(parent_info), + Ok(parent_info) => Ok(parent_info), Err(NakamotoNodeError::BurnchainTipChanged) => { self.globals.counters.bump_missed_tenures(); - None + Err(NakamotoNodeError::BurnchainTipChanged) } - Err(..) => None, + Err(e) => Err(e), } } @@ -361,7 +365,7 @@ impl BlockMinerThread { /// burnchain block-commit transaction. If we succeed, then return the assembled block data as /// well as the microblock private key to use to produce microblocks. /// Return None if we couldn't build a block for whatever reason. - fn mine_block(&mut self) -> Option { + fn mine_block(&mut self) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); neon_node::fault_injection_long_tenure(); @@ -383,18 +387,20 @@ impl BlockMinerThread { let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1) - .ok()? + .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)? .expect("FATAL: no epoch defined") .epoch_id; let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; - let vrf_proof = self.make_vrf_proof()?; + let vrf_proof = self + .make_vrf_proof() + .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; if self.last_mined_blocks.is_empty() { if parent_block_info.parent_tenure.is_none() { warn!( "Miner should be starting a new tenure, but failed to load parent tenure info" ); - return None; + return Err(NakamotoNodeError::ParentNotFound); } } @@ -452,14 +458,20 @@ impl BlockMinerThread { Ok(block) => block, Err(e) => { error!("Relayer: Failure mining anchored block: {}", e); - return None; + return Err(NakamotoNodeError::MiningFailure(e)); } }; let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key - .sign(block.header.signature_hash().ok()?.as_bytes()) - .ok()?; + .sign( + block + .header + .signature_hash() + .map_err(|_| NakamotoNodeError::SigningError("Could not create sighash"))? + .as_bytes(), + ) + .map_err(NakamotoNodeError::SigningError)?; block.header.miner_signature = miner_signature; info!( @@ -483,10 +495,10 @@ impl BlockMinerThread { if cur_burn_chain_tip.consensus_hash != block.header.consensus_hash { info!("Miner: Cancel block assembly; burnchain tip has changed"); self.globals.counters.bump_missed_tenures(); - return None; + return Err(NakamotoNodeError::BurnchainTipChanged); } - Some(block) + Ok(block) } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8c83bb35b9..b4aac584bb 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -162,7 +162,12 @@ pub struct RelayerThread { impl RelayerThread { /// Instantiate relayer thread. /// Uses `runloop` to obtain globals, config, and `is_miner`` status - pub fn new(runloop: &RunLoop, local_peer: LocalPeer, relayer: Relayer) -> RelayerThread { + pub fn new( + runloop: &RunLoop, + local_peer: LocalPeer, + relayer: Relayer, + keychain: Keychain, + ) -> RelayerThread { let config = runloop.config().clone(); let globals = runloop.get_globals(); let burn_db_path = config.get_burn_db_file_path(); @@ -178,7 +183,6 @@ impl RelayerThread { .connect_mempool_db() .expect("Database failure opening mempool"); - let keychain = Keychain::default(config.node.seed.clone()); let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); RelayerThread { diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 83382f869e..f18f236da6 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -392,7 +392,8 @@ impl RunLoop { .take() .expect("Run loop already started, can only start once after initialization."); - neon::RunLoop::setup_termination_handler(self.should_keep_running.clone()); + // setup the termination handler, allow it to error if a prior runloop already set it + neon::RunLoop::setup_termination_handler(self.should_keep_running.clone(), true); let mut burnchain = neon::RunLoop::instantiate_burnchain_state( &self.config, self.should_keep_running.clone(), diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index cffcd1aa10..68e13dc511 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -295,7 +295,7 @@ impl RunLoop { /// Set up termination handler. Have a signal set the `should_keep_running` atomic bool to /// false. Panics of called more than once. - pub fn setup_termination_handler(keep_running_writer: Arc) { + pub fn setup_termination_handler(keep_running_writer: Arc, allow_err: bool) { let install = termination::set_handler(move |sig_id| match sig_id { SignalId::Bus => { let msg = "Caught SIGBUS; crashing immediately and dumping core\n"; @@ -313,7 +313,8 @@ impl RunLoop { if let Err(e) = install { // integration tests can do this - if cfg!(test) { + if cfg!(test) || allow_err { + info!("Error setting up signal handler, may have already been set"); } else { panic!("FATAL: error setting termination handler - {}", e); } @@ -974,7 +975,7 @@ impl RunLoop { .take() .expect("Run loop already started, can only start once after initialization."); - Self::setup_termination_handler(self.should_keep_running.clone()); + Self::setup_termination_handler(self.should_keep_running.clone(), false); let mut burnchain = Self::instantiate_burnchain_state( &self.config, self.should_keep_running.clone(), From 02c64574834ae87dd441ff8b6826715a04d7f8df Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 13 Dec 2023 07:58:12 -0600 Subject: [PATCH 10/16] hashmap -> hashset --- testnet/stacks-node/src/nakamoto_node.rs | 4 ++-- testnet/stacks-node/src/nakamoto_node/relayer.rs | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index cf88877e10..ddcbc197f7 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::HashSet; use std::sync::mpsc::Receiver; use std::thread; use std::thread::JoinHandle; @@ -48,7 +48,7 @@ const VRF_MOCK_MINER_KEY: u64 = 1; pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB -pub type BlockCommits = HashMap; +pub type BlockCommits = HashSet; /// Node implementation for both miners and followers. /// This struct is used to set up the node proper and launch the p2p thread and relayer thread. diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index b4aac584bb..f10a327b60 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use core::fmt; -use std::collections::HashMap; +use std::collections::HashSet; use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -194,7 +194,7 @@ impl RelayerThread { keychain, burnchain: runloop.get_burnchain(), last_vrf_key_burn_height: None, - last_commits: HashMap::new(), + last_commits: HashSet::new(), bitcoin_controller, event_dispatcher: runloop.get_event_dispatcher(), local_peer, @@ -309,8 +309,7 @@ impl RelayerThread { self.globals.set_last_sortition(sn.clone()); - let won_sortition = - sn.sortition && self.last_commits.remove(&sn.winning_block_txid).is_some(); + let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); info!( "Relayer: Process sortition"; @@ -692,7 +691,7 @@ impl RelayerThread { "txid" => %txid, ); - self.last_commits.insert(txid, ()); + self.last_commits.insert(txid); self.last_committed = Some(( last_committed_at, StacksBlockId::new(&tenure_start_ch, &tenure_start_bh), From 52a5cd353cbe191f0c002a6a4e3c7da0b35dce6a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 13 Dec 2023 10:29:41 -0600 Subject: [PATCH 11/16] fix: mockamoto config must pass config assertions --- .../burnchains/bitcoin_regtest_controller.rs | 4 ---- testnet/stacks-node/src/config.rs | 2 +- testnet/stacks-node/src/mockamoto/tests.rs | 21 +++++++++++++++---- .../src/tests/neon_integrations.rs | 18 ++++++++++------ 4 files changed, 30 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 0ed1bb0e03..7d1a2aec08 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -8,8 +8,6 @@ use async_h1::client; use async_std::io::ReadExt; use async_std::net::TcpStream; use base64::encode; -#[cfg(test)] -use clarity::vm::types::PrincipalData; use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; @@ -52,8 +50,6 @@ use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_ use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; -#[cfg(test)] -use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 9018511b5a..87c9169676 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -316,7 +316,7 @@ impl ConfigFile { password: Some("blockstacksystem".into()), magic_bytes: Some("M3".into()), epochs: Some(epochs), - pox_prepare_length: Some(2), + pox_prepare_length: Some(3), pox_reward_length: Some(36), ..BurnchainConfigFile::default() }; diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs index b7914dcba8..7d7f65f852 100644 --- a/testnet/stacks-node/src/mockamoto/tests.rs +++ b/testnet/stacks-node/src/mockamoto/tests.rs @@ -6,6 +6,7 @@ use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::db::StacksChainState; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks_common::types::StacksEpochId; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::to_hex; use super::MockamotoNode; @@ -18,6 +19,12 @@ use crate::{Config, ConfigFile}; #[test] fn observe_100_blocks() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); + conf.node.working_dir = format!( + "/tmp/stacks-node-tests/mock_observe_100_blocks-{}", + get_epoch_time_secs() + ); + conf.node.rpc_bind = "127.0.0.1:19343".into(); + conf.node.p2p_bind = "127.0.0.1:19344".into(); conf.node.mockamoto_time_ms = 10; let submitter_sk = StacksPrivateKey::from_seed(&[1]); @@ -25,8 +32,8 @@ fn observe_100_blocks() { conf.add_initial_balance(submitter_addr.to_string(), 1_000_000); let recipient_addr = StacksAddress::burn_address(false).into(); - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; + let observer_port = 19300; + test_observer::spawn_at(observer_port); conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], @@ -129,6 +136,12 @@ fn observe_100_blocks() { #[test] fn mempool_rpc_submit() { let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap(); + conf.node.working_dir = format!( + "/tmp/stacks-node-tests/mempool_rpc_submit-{}", + get_epoch_time_secs() + ); + conf.node.rpc_bind = "127.0.0.1:19743".into(); + conf.node.p2p_bind = "127.0.0.1:19744".into(); conf.node.mockamoto_time_ms = 10; let submitter_sk = StacksPrivateKey::from_seed(&[1]); @@ -136,8 +149,8 @@ fn mempool_rpc_submit() { conf.add_initial_balance(submitter_addr.to_string(), 1_000); let recipient_addr = StacksAddress::burn_address(false).into(); - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; + let observer_port = 19800; + test_observer::spawn_at(observer_port); conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 455e414208..8915e53020 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -9,7 +9,7 @@ use std::{cmp, env, fs, thread}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}; +use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; use rand::Rng; use rusqlite::types::ToSql; @@ -24,7 +24,6 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ signal_mining_blocked, signal_mining_ready, TransactionErrorEvent, TransactionEvent, @@ -55,7 +54,6 @@ use stacks::net::atlas::{ }; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::{query_row_columns, query_rows, u64_to_sql}; -use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, @@ -403,7 +401,7 @@ pub mod test_observer { } /// each path here should correspond to one of the paths listed in `event_dispatcher.rs` - async fn serve() { + async fn serve(port: u16) { let new_blocks = warp::path!("new_block") .and(warp::post()) .and(warp::body::json()) @@ -458,7 +456,7 @@ pub mod test_observer { .or(mined_nakamoto_blocks) .or(new_stackerdb_chunks), ) - .run(([127, 0, 0, 1], EVENT_OBSERVER_PORT)) + .run(([127, 0, 0, 1], port)) .await } @@ -466,7 +464,15 @@ pub mod test_observer { clear(); thread::spawn(|| { let rt = tokio::runtime::Runtime::new().expect("Failed to initialize tokio"); - rt.block_on(serve()); + rt.block_on(serve(EVENT_OBSERVER_PORT)); + }); + } + + pub fn spawn_at(port: u16) { + clear(); + thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().expect("Failed to initialize tokio"); + rt.block_on(serve(port)); }); } From e26c1fe4d799f9c031a3203cb9edd8cc8714098e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 13 Dec 2023 10:36:13 -0600 Subject: [PATCH 12/16] ci: add tests::nakamoto_integrations::simple_neon_integration to the CI --- .github/workflows/epoch-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/epoch-tests.yml b/.github/workflows/epoch-tests.yml index a50e0d344d..eabd635246 100644 --- a/.github/workflows/epoch-tests.yml +++ b/.github/workflows/epoch-tests.yml @@ -54,6 +54,7 @@ jobs: - tests::epoch_23::trait_invocation_behavior - tests::epoch_24::fix_to_pox_contract - tests::epoch_24::verify_auto_unlock_behavior + - tests::nakamoto_integrations::simple_neon_integration steps: ## Setup test environment - name: Setup Test Environment From d37bf3bda1e9ef9f5655b69eb9e06e2ec421dc54 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 13 Dec 2023 11:17:18 -0600 Subject: [PATCH 13/16] fix: change PANIC_TIMEOUT_SECS back to original setting, move the nakamoto_integrations test to bitcoin-tests group --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4acac1c8a0..069857ed44 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -69,6 +69,7 @@ jobs: - tests::neon_integrations::test_problematic_txs_are_not_stored - tests::neon_integrations::use_latest_tip_integration_test - tests::should_succeed_handling_malformed_and_valid_txs + - tests::nakamoto_integrations::simple_neon_integration steps: ## Setup test environment - name: Setup Test Environment diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8915e53020..5676d1bb12 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -489,7 +489,7 @@ pub mod test_observer { } } -const PANIC_TIMEOUT_SECS: u64 = 30; +const PANIC_TIMEOUT_SECS: u64 = 600; /// Returns `false` on a timeout, true otherwise. pub fn next_block_and_wait( From a7aa3f517b97d73689745eea61555064c551d937 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 13 Dec 2023 12:35:31 -0600 Subject: [PATCH 14/16] chore: only check config settings in nakamoto-neon, mockamoto --- .github/workflows/epoch-tests.yml | 1 - testnet/stacks-node/src/config.rs | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/epoch-tests.yml b/.github/workflows/epoch-tests.yml index eabd635246..a50e0d344d 100644 --- a/.github/workflows/epoch-tests.yml +++ b/.github/workflows/epoch-tests.yml @@ -54,7 +54,6 @@ jobs: - tests::epoch_23::trait_invocation_behavior - tests::epoch_24::fix_to_pox_contract - tests::epoch_24::verify_auto_unlock_behavior - - tests::nakamoto_integrations::simple_neon_integration steps: ## Setup test environment - name: Setup Test Environment diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 87c9169676..1d80c92bf7 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -626,6 +626,12 @@ impl Config { } // check if the Epoch 3.0 burnchain settings as configured are going to be valid. + if self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto" { + self.check_nakamoto_config(&burnchain); + } + } + + fn check_nakamoto_config(&self, burnchain: &Burnchain) { let epochs = StacksEpoch::get_epochs( self.burnchain.get_bitcoin_network().1, self.burnchain.epochs.as_ref(), From 9f59944add0785819ca44d6e901a736cfc34d4f6 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 20 Dec 2023 02:04:45 +0200 Subject: [PATCH 15/16] feat: ready source branch --- clarity/Cargo.toml | 2 +- clarity/src/{libclarity.rs => lib.rs} | 0 libsigner/Cargo.toml | 2 +- libsigner/src/{libsigner.rs => lib.rs} | 0 stacks-common/Cargo.toml | 2 +- stacks-common/src/{libcommon.rs => lib.rs} | 0 6 files changed, 3 insertions(+), 3 deletions(-) rename clarity/src/{libclarity.rs => lib.rs} (100%) rename libsigner/src/{libsigner.rs => lib.rs} (100%) rename stacks-common/src/{libcommon.rs => lib.rs} (100%) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 86089991dc..e83c77f823 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -15,7 +15,7 @@ resolver = "2" [lib] name = "clarity" -path = "./src/libclarity.rs" +path = "./src/lib.rs" [dependencies] rand = "0.7.3" diff --git a/clarity/src/libclarity.rs b/clarity/src/lib.rs similarity index 100% rename from clarity/src/libclarity.rs rename to clarity/src/lib.rs diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 8500ef55fa..35aaca69f7 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" [lib] name = "libsigner" -path = "./src/libsigner.rs" +path = "./src/lib.rs" [dependencies] clarity = { path = "../clarity" } diff --git a/libsigner/src/libsigner.rs b/libsigner/src/lib.rs similarity index 100% rename from libsigner/src/libsigner.rs rename to libsigner/src/lib.rs diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 1916572cf4..8ba0b64197 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -15,7 +15,7 @@ edition = "2021" [lib] name = "stacks_common" -path = "./src/libcommon.rs" +path = "./src/lib.rs" [dependencies] rand = "0.7.3" diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/lib.rs similarity index 100% rename from stacks-common/src/libcommon.rs rename to stacks-common/src/lib.rs From b70810fc058f88ae71cebcbc4020cc878eb58f75 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 22 Dec 2023 18:45:17 +0200 Subject: [PATCH 16/16] feat: run again with `cargo test --test-threads` arg --- stacks-common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/lib.rs b/stacks-common/src/lib.rs index 1448a2f90c..5325aabd65 100644 --- a/stacks-common/src/lib.rs +++ b/stacks-common/src/lib.rs @@ -6,7 +6,7 @@ #![allow(non_upper_case_globals)] #![cfg_attr(test, allow(unused_variables, unused_assignments))] #![allow(clippy::assertions_on_constants)] - +// test to re-run mutants trigger #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog;