diff --git a/libsigner/src/session.rs b/libsigner/src/session.rs
index d86a3a3d4c..30966f897b 100644
--- a/libsigner/src/session.rs
+++ b/libsigner/src/session.rs
@@ -22,6 +22,7 @@ use libstackerdb::{
stackerdb_get_chunk_path, stackerdb_get_metadata_path, stackerdb_post_chunk_path, SlotMetadata,
StackerDBChunkAckData, StackerDBChunkData,
};
+use stacks_common::codec::StacksMessageCodec;
use crate::error::RPCError;
use crate::http::run_http_request;
@@ -51,7 +52,14 @@ pub trait SignerSession {
/// Returns Ok(None) if the chunk with the given version does not exist
/// Returns Err(..) on transport error
fn get_chunk(&mut self, slot_id: u32, version: u32) -> Result>, RPCError> {
- Ok(self.get_chunks(&[(slot_id, version)])?[0].clone())
+ let mut chunks = self.get_chunks(&[(slot_id, version)])?;
+ // check if chunks is empty because [0] and remove(0) panic on out-of-bounds
+ if chunks.is_empty() {
+ return Ok(None);
+ }
+ // swap_remove breaks the ordering of latest_chunks, but we don't care because we
+ // only want the first element anyways.
+ Ok(chunks.swap_remove(0))
}
/// Get a single latest chunk.
@@ -59,7 +67,29 @@ pub trait SignerSession {
/// Returns Ok(None) if not
/// Returns Err(..) on transport error
fn get_latest_chunk(&mut self, slot_id: u32) -> Result >, RPCError> {
- Ok(self.get_latest_chunks(&[(slot_id)])?[0].clone())
+ let mut latest_chunks = self.get_latest_chunks(&[slot_id])?;
+ // check if latest_chunks is empty because [0] and remove(0) panic on out-of-bounds
+ if latest_chunks.is_empty() {
+ return Ok(None);
+ }
+ // swap_remove breaks the ordering of latest_chunks, but we don't care because we
+ // only want the first element anyways.
+ Ok(latest_chunks.swap_remove(0))
+ }
+
+ /// Get a single latest chunk from the StackerDB and deserialize into `T` using the
+ /// StacksMessageCodec.
+ fn get_latest(&mut self, slot_id: u32) -> Result, RPCError> {
+ let Some(latest_bytes) = self.get_latest_chunk(slot_id)? else {
+ return Ok(None);
+ };
+ Some(
+ T::consensus_deserialize(&mut latest_bytes.as_slice()).map_err(|e| {
+ let msg = format!("StacksMessageCodec::consensus_deserialize failure: {e}");
+ RPCError::Deserialize(msg)
+ }),
+ )
+ .transpose()
}
}
diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs
index 834d83ed20..7908f3abd7 100644
--- a/stackslib/src/chainstate/stacks/boot/mod.rs
+++ b/stackslib/src/chainstate/stacks/boot/mod.rs
@@ -91,7 +91,7 @@ const POX_4_BODY: &'static str = std::include_str!("pox-4.clar");
pub const SIGNERS_BODY: &'static str = std::include_str!("signers.clar");
pub const SIGNERS_DB_0_BODY: &'static str = std::include_str!("signers-0-xxx.clar");
pub const SIGNERS_DB_1_BODY: &'static str = std::include_str!("signers-1-xxx.clar");
-const SIGNERS_VOTING_BODY: &'static str = std::include_str!("signers-voting.clar");
+pub const SIGNERS_VOTING_BODY: &'static str = std::include_str!("signers-voting.clar");
pub const COSTS_1_NAME: &'static str = "costs";
pub const COSTS_2_NAME: &'static str = "costs-2";
@@ -120,7 +120,6 @@ lazy_static! {
pub static ref POX_3_TESTNET_CODE: String =
format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY);
pub static ref POX_4_CODE: String = format!("{}", POX_4_BODY);
- pub static ref SIGNER_VOTING_CODE: String = format!("{}", SIGNERS_VOTING_BODY);
pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting();
pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [
("pox", &BOOT_CODE_POX_MAINNET),
diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs
index 81a421cdef..ac764e0e91 100644
--- a/stackslib/src/clarity_vm/clarity.rs
+++ b/stackslib/src/clarity_vm/clarity.rs
@@ -50,7 +50,7 @@ use crate::chainstate::stacks::boot::{
BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, COSTS_2_NAME, COSTS_3_NAME,
MINERS_NAME, POX_2_MAINNET_CODE, POX_2_NAME, POX_2_TESTNET_CODE, POX_3_MAINNET_CODE,
POX_3_NAME, POX_3_TESTNET_CODE, POX_4_CODE, POX_4_NAME, SIGNERS_BODY, SIGNERS_DB_0_BODY,
- SIGNERS_DB_1_BODY, SIGNERS_NAME, SIGNERS_VOTING_NAME, SIGNER_VOTING_CODE,
+ SIGNERS_DB_1_BODY, SIGNERS_NAME, SIGNERS_VOTING_BODY, SIGNERS_VOTING_NAME,
};
use crate::chainstate::stacks::db::{StacksAccount, StacksChainState};
use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt};
@@ -1457,59 +1457,12 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> {
}
}
- let initialized_agg_key = if !mainnet {
- let agg_key_value_opt = self
- .with_readonly_clarity_env(
- false,
- self.chain_id,
- ClarityVersion::Clarity2,
- StacksAddress::burn_address(false).into(),
- None,
- LimitedCostTracker::Free,
- |vm_env| {
- vm_env.execute_contract_allow_private(
- &boot_code_id(BOOT_TEST_POX_4_AGG_KEY_CONTRACT, false),
- BOOT_TEST_POX_4_AGG_KEY_FNAME,
- &[],
- true,
- )
- },
- )
- .map(|agg_key_value| {
- agg_key_value
- .expect_buff(33)
- .expect("FATAL: test aggregate pub key must be a buffer")
- })
- .ok();
- agg_key_value_opt
- } else {
- None
- };
-
- let mut signers_voting_code = SIGNER_VOTING_CODE.clone();
- if !mainnet {
- if let Some(ref agg_pub_key) = initialized_agg_key {
- let hex_agg_pub_key = to_hex(agg_pub_key);
- for set_in_reward_cycle in 0..pox_4_first_cycle {
- info!(
- "Setting initial aggregate-public-key in PoX-4";
- "agg_pub_key" => &hex_agg_pub_key,
- "reward_cycle" => set_in_reward_cycle,
- "pox_4_first_cycle" => pox_4_first_cycle,
- );
- let set_str = format!("(map-set aggregate-public-keys u{set_in_reward_cycle} 0x{hex_agg_pub_key})");
- signers_voting_code.push_str("\n");
- signers_voting_code.push_str(&set_str);
- }
- }
- }
-
let signers_voting_contract_id = boot_code_id(SIGNERS_VOTING_NAME, mainnet);
let payload = TransactionPayload::SmartContract(
TransactionSmartContract {
name: ContractName::try_from(SIGNERS_VOTING_NAME)
.expect("FATAL: invalid boot-code contract name"),
- code_body: StacksString::from_str(&signers_voting_code)
+ code_body: StacksString::from_str(SIGNERS_VOTING_BODY)
.expect("FATAL: invalid boot code body"),
},
Some(ClarityVersion::Clarity2),
diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs
index cfd864f603..eba7790780 100644
--- a/testnet/stacks-node/src/config.rs
+++ b/testnet/stacks-node/src/config.rs
@@ -12,7 +12,6 @@ use rand::RngCore;
use stacks::burnchains::bitcoin::BitcoinNetworkType;
use stacks::burnchains::{Burnchain, MagicBytes, BLOCKSTACK_MAGIC_MAINNET};
use stacks::chainstate::nakamoto::signer_set::NakamotoSigners;
-use stacks::chainstate::nakamoto::test_signers::TestSigners;
use stacks::chainstate::stacks::boot::MINERS_NAME;
use stacks::chainstate::stacks::index::marf::MARFOpenOpts;
use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode;
@@ -33,7 +32,6 @@ use stacks::net::connection::ConnectionOptions;
use stacks::net::{Neighbor, NeighborKey};
use stacks::util_lib::boot::boot_code_id;
use stacks::util_lib::db::Error as DBError;
-use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG};
use stacks_common::consts::SIGNER_SLOTS_PER_USER;
use stacks_common::types::chainstate::StacksAddress;
use stacks_common::types::net::PeerAddress;
@@ -294,102 +292,6 @@ impl ConfigFile {
}
}
- pub fn mockamoto() -> ConfigFile {
- let epochs = vec![
- StacksEpochConfigFile {
- epoch_name: "1.0".into(),
- start_height: 0,
- },
- StacksEpochConfigFile {
- epoch_name: "2.0".into(),
- start_height: 0,
- },
- StacksEpochConfigFile {
- epoch_name: "2.05".into(),
- start_height: 1,
- },
- StacksEpochConfigFile {
- epoch_name: "2.1".into(),
- start_height: 2,
- },
- StacksEpochConfigFile {
- epoch_name: "2.2".into(),
- start_height: 3,
- },
- StacksEpochConfigFile {
- epoch_name: "2.3".into(),
- start_height: 4,
- },
- StacksEpochConfigFile {
- epoch_name: "2.4".into(),
- start_height: 5,
- },
- StacksEpochConfigFile {
- epoch_name: "2.5".into(),
- start_height: 6,
- },
- StacksEpochConfigFile {
- epoch_name: "3.0".into(),
- start_height: 7,
- },
- ];
-
- let burnchain = BurnchainConfigFile {
- mode: Some("mockamoto".into()),
- rpc_port: Some(8332),
- peer_port: Some(8333),
- peer_host: Some("localhost".into()),
- username: Some("blockstack".into()),
- password: Some("blockstacksystem".into()),
- magic_bytes: Some("M3".into()),
- epochs: Some(epochs),
- pox_prepare_length: Some(3),
- pox_reward_length: Some(36),
- ..BurnchainConfigFile::default()
- };
-
- let node = NodeConfigFile {
- bootstrap_node: None,
- miner: Some(true),
- stacker: Some(true),
- ..NodeConfigFile::default()
- };
-
- let mining_key = Secp256k1PrivateKey::new();
- let miner = MinerConfigFile {
- mining_key: Some(mining_key.to_hex()),
- ..MinerConfigFile::default()
- };
-
- let mock_private_key = Secp256k1PrivateKey::from_seed(&[0]);
- let mock_public_key = Secp256k1PublicKey::from_private(&mock_private_key);
- let mock_address = StacksAddress::from_public_keys(
- C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
- &AddressHashMode::SerializeP2PKH,
- 1,
- &vec![mock_public_key],
- )
- .unwrap();
-
- info!(
- "Mockamoto starting. Initial balance set to mock_private_key = {}",
- mock_private_key.to_hex()
- );
-
- let ustx_balance = vec![InitialBalanceFile {
- address: mock_address.to_string(),
- amount: 1_000_000_000_000,
- }];
-
- ConfigFile {
- burnchain: Some(burnchain),
- node: Some(node),
- miner: Some(miner),
- ustx_balance: Some(ustx_balance),
- ..ConfigFile::default()
- }
- }
-
pub fn helium() -> ConfigFile {
// ## Settings for local testnet, relying on a local bitcoind server
// ## running with the following bitcoin.conf:
@@ -524,19 +426,6 @@ lazy_static! {
}
impl Config {
- #[cfg(any(test, feature = "testing"))]
- pub fn self_signing(&self) -> Option {
- if !(self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto") {
- return None;
- }
- self.miner.self_signing_key.clone()
- }
-
- #[cfg(not(any(test, feature = "testing")))]
- pub fn self_signing(&self) -> Option {
- return None;
- }
-
/// get the up-to-date burnchain options from the config.
/// If the config file can't be loaded, then return the existing config
pub fn get_burnchain_config(&self) -> BurnchainConfig {
@@ -663,7 +552,7 @@ impl Config {
}
// check if the Epoch 3.0 burnchain settings as configured are going to be valid.
- if self.burnchain.mode == "nakamoto-neon" || self.burnchain.mode == "mockamoto" {
+ if self.burnchain.mode == "nakamoto-neon" {
self.check_nakamoto_config(&burnchain);
}
}
@@ -895,15 +784,7 @@ impl Config {
}
pub fn from_config_file(config_file: ConfigFile) -> Result {
- if config_file.burnchain.as_ref().map(|b| b.mode.clone()) == Some(Some("mockamoto".into()))
- {
- // in the case of mockamoto, use `ConfigFile::mockamoto()` as the default for
- // processing a user-supplied config
- let default = Self::from_config_default(ConfigFile::mockamoto(), Config::default())?;
- Self::from_config_default(config_file, default)
- } else {
- Self::from_config_default(config_file, Config::default())
- }
+ Self::from_config_default(config_file, Config::default())
}
fn from_config_default(config_file: ConfigFile, default: Config) -> Result {
@@ -929,7 +810,6 @@ impl Config {
"krypton",
"xenon",
"mainnet",
- "mockamoto",
"nakamoto-neon",
];
@@ -1368,7 +1248,7 @@ impl BurnchainConfig {
match self.mode.as_str() {
"mainnet" => ("mainnet".to_string(), BitcoinNetworkType::Mainnet),
"xenon" => ("testnet".to_string(), BitcoinNetworkType::Testnet),
- "helium" | "neon" | "argon" | "krypton" | "mocknet" | "mockamoto" | "nakamoto-neon" => {
+ "helium" | "neon" | "argon" | "krypton" | "mocknet" | "nakamoto-neon" => {
("regtest".to_string(), BitcoinNetworkType::Regtest)
}
other => panic!("Invalid stacks-node mode: {other}"),
@@ -1599,9 +1479,6 @@ pub struct NodeConfig {
pub chain_liveness_poll_time_secs: u64,
/// stacker DBs we replicate
pub stacker_dbs: Vec,
- /// if running in mockamoto mode, how long to wait between each
- /// simulated bitcoin block
- pub mockamoto_time_ms: u64,
}
#[derive(Clone, Debug)]
@@ -1882,7 +1759,6 @@ impl Default for NodeConfig {
fault_injection_hide_blocks: false,
chain_liveness_poll_time_secs: 300,
stacker_dbs: vec![],
- mockamoto_time_ms: 3_000,
}
}
}
@@ -2052,7 +1928,6 @@ pub struct MinerConfig {
pub candidate_retry_cache_size: u64,
pub unprocessed_block_deadline_secs: u64,
pub mining_key: Option,
- pub self_signing_key: Option,
/// Amount of time while mining in nakamoto to wait in between mining interim blocks
pub wait_on_interim_blocks: Duration,
/// minimum number of transactions that must be in a block if we're going to replace a pending
@@ -2100,7 +1975,6 @@ impl Default for MinerConfig {
candidate_retry_cache_size: 1024 * 1024,
unprocessed_block_deadline_secs: 30,
mining_key: None,
- self_signing_key: None,
wait_on_interim_blocks: Duration::from_millis(2_500),
min_tx_count: 0,
only_increase_tx_count: false,
@@ -2322,9 +2196,6 @@ pub struct NodeConfigFile {
pub chain_liveness_poll_time_secs: Option,
/// Stacker DBs we replicate
pub stacker_dbs: Option>,
- /// if running in mockamoto mode, how long to wait between each
- /// simulated bitcoin block
- pub mockamoto_time_ms: Option,
}
impl NodeConfigFile {
@@ -2400,9 +2271,6 @@ impl NodeConfigFile {
.iter()
.filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok())
.collect(),
- mockamoto_time_ms: self
- .mockamoto_time_ms
- .unwrap_or(default_node_config.mockamoto_time_ms),
};
Ok(node_config)
}
@@ -2486,7 +2354,6 @@ impl MinerConfigFile {
.as_ref()
.map(|x| Secp256k1PrivateKey::from_hex(x))
.transpose()?,
- self_signing_key: Some(TestSigners::default()),
wait_on_interim_blocks: self
.wait_on_interim_blocks_ms
.map(Duration::from_millis)
diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs
index 95a1dda4b8..bf54c1601d 100644
--- a/testnet/stacks-node/src/main.rs
+++ b/testnet/stacks-node/src/main.rs
@@ -22,7 +22,6 @@ pub mod event_dispatcher;
pub mod genesis_data;
pub mod globals;
pub mod keychain;
-pub mod mockamoto;
pub mod nakamoto_node;
pub mod neon_node;
pub mod node;
@@ -55,7 +54,6 @@ pub use self::node::{ChainTip, Node};
pub use self::run_loop::{helium, neon};
pub use self::tenure::Tenure;
use crate::chain_data::MinerStats;
-use crate::mockamoto::MockamotoNode;
use crate::neon_node::{BlockMinerThread, TipCandidate};
use crate::run_loop::boot_nakamoto;
@@ -322,10 +320,6 @@ fn main() {
args.finish();
ConfigFile::mainnet()
}
- "mockamoto" => {
- args.finish();
- ConfigFile::mockamoto()
- }
"check-config" => {
let config_path: String = args.value_from_str("--config").unwrap();
args.finish();
@@ -449,9 +443,6 @@ fn main() {
{
let mut run_loop = neon::RunLoop::new(conf);
run_loop.start(None, mine_start.unwrap_or(0));
- } else if conf.burnchain.mode == "mockamoto" {
- let mut mockamoto = MockamotoNode::new(&conf).unwrap();
- mockamoto.run();
} else if conf.burnchain.mode == "nakamoto-neon" {
let mut run_loop = boot_nakamoto::BootRunLoop::new(conf).unwrap();
run_loop.start(None, 0);
diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs
deleted file mode 100644
index 011300bc88..0000000000
--- a/testnet/stacks-node/src/mockamoto.rs
+++ /dev/null
@@ -1,1111 +0,0 @@
-// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
-// Copyright (C) 2020-2023 Stacks Open Internet Foundation
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see .
-use std::sync::atomic::AtomicBool;
-use std::sync::mpsc::{sync_channel, Receiver, RecvTimeoutError};
-use std::sync::{Arc, Mutex};
-use std::thread;
-use std::thread::{sleep, JoinHandle};
-use std::time::Duration;
-
-use clarity::vm::ast::ASTRules;
-use clarity::vm::Value as ClarityValue;
-use lazy_static::lazy_static;
-use stacks::burnchains::bitcoin::address::{
- BitcoinAddress, LegacyBitcoinAddress, LegacyBitcoinAddressType,
-};
-use stacks::burnchains::bitcoin::{
- BitcoinBlock, BitcoinInputType, BitcoinNetworkType, BitcoinTransaction,
- BitcoinTxInputStructured, BitcoinTxOutput,
-};
-use stacks::burnchains::db::{BurnchainDB, BurnchainHeaderReader};
-use stacks::burnchains::{
- BurnchainBlock, BurnchainBlockHeader, BurnchainSigner, Error as BurnchainError, Txid,
-};
-use stacks::chainstate::burn::db::sortdb::SortitionDB;
-use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS;
-use stacks::chainstate::burn::operations::{
- BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp,
-};
-use stacks::chainstate::burn::BlockSnapshot;
-use stacks::chainstate::coordinator::comm::CoordinatorReceivers;
-use stacks::chainstate::coordinator::{
- ChainsCoordinator, ChainsCoordinatorConfig, CoordinatorCommunication,
-};
-use stacks::chainstate::nakamoto::test_signers::TestSigners;
-use stacks::chainstate::nakamoto::{
- NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SetupBlockResult,
-};
-use stacks::chainstate::stacks::address::PoxAddress;
-use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME};
-use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState};
-use stacks::chainstate::stacks::miner::{
- BlockBuilder, BlockBuilderSettings, BlockLimitFunction, MinerStatus, TransactionResult,
-};
-use stacks::chainstate::stacks::{
- CoinbasePayload, Error as ChainstateError, StacksBlockBuilder, StacksTransaction,
- StacksTransactionSigner, TenureChangeCause, TenureChangePayload, ThresholdSignature,
- TransactionAuth, TransactionContractCall, TransactionPayload, TransactionVersion,
- MAX_EPOCH_SIZE, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH,
-};
-use stacks::core::mempool::MemPoolWalkSettings;
-use stacks::core::{
- MemPoolDB, StacksEpoch, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0,
- PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1,
- PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5,
- PEER_VERSION_EPOCH_3_0, STACKS_EPOCH_3_0_MARKER, TX_BLOCK_LIMIT_PROPORTION_HEURISTIC,
-};
-use stacks::net::atlas::{AtlasConfig, AtlasDB};
-use stacks::net::relay::Relayer;
-use stacks::net::stackerdb::StackerDBs;
-use stacks::util_lib::boot::boot_code_addr;
-use stacks::util_lib::db::Error as DBError;
-use stacks::util_lib::signed_structured_data::pox4::{
- make_pox_4_signer_key_signature, Pox4SignatureTopic,
-};
-use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG};
-use stacks_common::bitvec::BitVec;
-use stacks_common::codec::StacksMessageCodec;
-use stacks_common::consts::{
- CHAIN_ID_TESTNET, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_MAX,
-};
-use stacks_common::types::chainstate::{
- BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId,
- StacksPrivateKey, VRFSeed,
-};
-use stacks_common::types::{PrivateKey, StacksEpochId};
-use stacks_common::util::hash::{to_hex, Hash160, MerkleTree, Sha512Trunc256Sum};
-use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey};
-use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF};
-
-use crate::globals::{NeonGlobals as Globals, RelayerDirective};
-use crate::neon::Counters;
-use crate::neon_node::{PeerThread, StacksNode, BLOCK_PROCESSOR_STACK_SIZE};
-use crate::syncctl::PoxSyncWatchdogComms;
-use crate::{Config, EventDispatcher};
-
-#[cfg(test)]
-mod tests;
-
-lazy_static! {
- pub static ref STACKS_EPOCHS_MOCKAMOTO: [StacksEpoch; 9] = [
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch10,
- start_height: 0,
- end_height: 0,
- block_limit: BLOCK_LIMIT_MAINNET_10.clone(),
- network_epoch: PEER_VERSION_EPOCH_1_0
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch20,
- start_height: 0,
- end_height: 1,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_0
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch2_05,
- start_height: 1,
- end_height: 2,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_05
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch21,
- start_height: 2,
- end_height: 3,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_1
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch22,
- start_height: 3,
- end_height: 4,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_2
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch23,
- start_height: 4,
- end_height: 5,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_3
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch24,
- start_height: 5,
- end_height: 6,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_4
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch25,
- start_height: 6,
- end_height: 7,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_2_5
- },
- StacksEpoch {
- epoch_id: StacksEpochId::Epoch30,
- start_height: 7,
- end_height: STACKS_EPOCH_MAX,
- block_limit: HELIUM_BLOCK_LIMIT_20.clone(),
- network_epoch: PEER_VERSION_EPOCH_3_0
- },
- ];
-}
-
-/// Produce a mock bitcoin block that is descended from `parent_snapshot` and includes
-/// `ops`. This method uses `miner_pkh` to set the inputs and outputs of any supplied
-/// block commits or leader key registrations
-fn make_burn_block(
- parent_snapshot: &BlockSnapshot,
- miner_pkh: &Hash160,
- ops: Vec,
-) -> Result {
- let block_height = parent_snapshot.block_height + 1;
- let mut mock_burn_hash_contents = [0u8; 32];
- mock_burn_hash_contents[0..8].copy_from_slice((block_height + 1).to_be_bytes().as_ref());
-
- let txs = ops.into_iter().map(|op| {
- let mut data = match &op {
- BlockstackOperationType::LeaderKeyRegister(op) => op.serialize_to_vec(),
- BlockstackOperationType::LeaderBlockCommit(op) => op.serialize_to_vec(),
- _ => panic!("Attempted to mock unexpected blockstack operation."),
- };
-
- data.remove(0);
-
- let (inputs, outputs) = if let BlockstackOperationType::LeaderBlockCommit(ref op) = op {
- let burn_output = BitcoinTxOutput {
- units: op.burn_fee,
- address: BitcoinAddress::Legacy(LegacyBitcoinAddress {
- addrtype: LegacyBitcoinAddressType::PublicKeyHash,
- network_id: BitcoinNetworkType::Testnet,
- bytes: Hash160([0; 20]),
- }),
- };
-
- let change_output = BitcoinTxOutput {
- units: 1_000_000_000_000,
- address: BitcoinAddress::Legacy(LegacyBitcoinAddress {
- addrtype: LegacyBitcoinAddressType::PublicKeyHash,
- network_id: BitcoinNetworkType::Testnet,
- bytes: miner_pkh.clone(),
- }),
- };
-
- let tx_ref = (parent_snapshot.winning_block_txid.clone(), 3);
-
- let input = BitcoinTxInputStructured {
- keys: vec![],
- num_required: 0,
- in_type: BitcoinInputType::Standard,
- tx_ref,
- };
-
- (
- vec![input.into()],
- vec![burn_output.clone(), burn_output, change_output],
- )
- } else {
- (
- vec![BitcoinTxInputStructured {
- keys: vec![],
- num_required: 0,
- in_type: BitcoinInputType::Standard,
- tx_ref: (Txid([0; 32]), 0),
- }
- .into()],
- vec![BitcoinTxOutput {
- units: 1_000_000_000_000,
- address: BitcoinAddress::Legacy(LegacyBitcoinAddress {
- addrtype: LegacyBitcoinAddressType::PublicKeyHash,
- network_id: BitcoinNetworkType::Testnet,
- bytes: miner_pkh.clone(),
- }),
- }],
- )
- };
-
- BitcoinTransaction {
- txid: op.txid(),
- vtxindex: op.vtxindex(),
- opcode: op.opcode() as u8,
- data,
- data_amt: 0,
- inputs,
- outputs,
- }
- });
-
- Ok(BitcoinBlock {
- block_height,
- block_hash: BurnchainHeaderHash(mock_burn_hash_contents),
- parent_block_hash: parent_snapshot.burn_header_hash.clone(),
- txs: txs.collect(),
- timestamp: 100 * u64::from(block_height + 1),
- })
-}
-
-/// This struct wraps all the state required for operating a
-/// stacks-node in `mockamoto` mode.
-///
-/// This mode of operation is a single-node network in which bitcoin
-/// blocks are simulated: no `bitcoind` is communicated with (either
-/// operating as regtest, testnet or mainnet). This operation mode
-/// is useful for testing the stacks-only operation of Nakamoto.
-///
-/// During operation, the mockamoto node issues `stack-stx` and
-/// `stack-extend` contract-calls to ensure that the miner is a member
-/// of the current stacking set. This ensures nakamoto blocks can be
-/// produced with tenure change txs.
-///
-pub struct MockamotoNode {
- sortdb: SortitionDB,
- mempool: MemPoolDB,
- chainstate: StacksChainState,
- self_signer: TestSigners,
- miner_key: StacksPrivateKey,
- vrf_key: VRFPrivateKey,
- relay_rcv: Option>,
- coord_rcv: Option,
- dispatcher: EventDispatcher,
- pub globals: Globals,
- config: Config,
-}
-
-struct MockamotoBlockBuilder {
- txs: Vec,
- bytes_so_far: u64,
-}
-
-/// This struct is used by mockamoto to pass the burnchain indexer
-/// parameter to the `ChainsCoordinator`. It errors on every
-/// invocation except `read_burnchain_headers`.
-///
-/// The `ChainsCoordinator` only uses this indexer for evaluating
-/// affirmation maps, which should never be evaluated in mockamoto.
-/// This is passed to the Burnchain DB block processor, though, which
-/// requires `read_burnchain_headers` (to generate affirmation maps)
-struct MockBurnchainIndexer(BurnchainDB);
-
-impl BurnchainHeaderReader for MockBurnchainIndexer {
- fn read_burnchain_headers(
- &self,
- start_height: u64,
- end_height: u64,
- ) -> Result, DBError> {
- let mut output = vec![];
- for i in start_height..end_height {
- let header = BurnchainDB::get_burnchain_header(self.0.conn(), i)
- .map_err(|e| DBError::Other(e.to_string()))?
- .ok_or_else(|| DBError::NotFoundError)?;
- output.push(header);
- }
- Ok(output)
- }
- fn get_burnchain_headers_height(&self) -> Result {
- Err(DBError::NoDBError)
- }
- fn find_burnchain_header_height(
- &self,
- _header_hash: &BurnchainHeaderHash,
- ) -> Result, DBError> {
- Err(DBError::NoDBError)
- }
-}
-
-impl BlockBuilder for MockamotoBlockBuilder {
- fn try_mine_tx_with_len(
- &mut self,
- clarity_tx: &mut ClarityTx,
- tx: &StacksTransaction,
- tx_len: u64,
- limit_behavior: &BlockLimitFunction,
- ast_rules: ASTRules,
- ) -> TransactionResult {
- if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() {
- return TransactionResult::skipped(tx, "BlockSizeLimit".into());
- }
-
- if BlockLimitFunction::NO_LIMIT_HIT != *limit_behavior {
- return TransactionResult::skipped(tx, "LimitReached".into());
- }
-
- let (fee, receipt) = match StacksChainState::process_transaction(
- clarity_tx, tx, true, ast_rules,
- ) {
- Ok(x) => x,
- Err(ChainstateError::CostOverflowError(cost_before, cost_after, total_budget)) => {
- clarity_tx.reset_cost(cost_before.clone());
- if total_budget.proportion_largest_dimension(&cost_before)
- < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC
- {
- warn!(
- "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}",
- tx.txid(),
- 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC,
- &total_budget
- );
- return TransactionResult::error(&tx, ChainstateError::TransactionTooBigError);
- } else {
- warn!(
- "Transaction {} reached block cost {}; budget was {}",
- tx.txid(),
- &cost_after,
- &total_budget
- );
- return TransactionResult::skipped_due_to_error(
- &tx,
- ChainstateError::BlockTooBigError,
- );
- }
- }
- Err(e) => return TransactionResult::error(&tx, e),
- };
-
- info!("Include tx";
- "tx" => %tx.txid(),
- "payload" => tx.payload.name(),
- "origin" => %tx.origin_address());
-
- self.txs.push(tx.clone());
- self.bytes_so_far += tx_len;
-
- TransactionResult::success(tx, fee, receipt)
- }
-}
-
-impl MockamotoNode {
- pub fn new(config: &Config) -> Result {
- let miner_key = config
- .miner
- .mining_key
- .clone()
- .ok_or("Mockamoto node must be configured with `miner.mining_key`")?;
- let vrf_key = VRFPrivateKey::new();
-
- let stacker_pk = Secp256k1PublicKey::from_private(&miner_key);
- let stacker_pk_hash = Hash160::from_node_public_key(&stacker_pk);
-
- let stacker = StacksAddress {
- version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
- bytes: stacker_pk_hash,
- };
-
- let burnchain = config.get_burnchain();
- let (sortdb, _burndb) = burnchain
- .connect_db(
- true,
- BurnchainHeaderHash([0; 32]),
- 100,
- STACKS_EPOCHS_MOCKAMOTO.to_vec(),
- )
- .map_err(|e| e.to_string())?;
-
- let mut initial_balances: Vec<_> = config
- .initial_balances
- .iter()
- .map(|balance| (balance.address.clone(), balance.amount))
- .collect();
-
- initial_balances.push((stacker.into(), 100_000_000_000_000));
-
- // Create a boot contract to initialize the aggregate public key prior to Pox-4 activation
- let self_signer = TestSigners::default();
- let agg_pub_key = self_signer.aggregate_public_key.clone();
- info!("Mockamoto node setting agg public key"; "agg_pub_key" => %to_hex(&self_signer.aggregate_public_key.compress().data));
- let callback = move |clarity_tx: &mut ClarityTx| {
- NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key);
- };
- let mut boot_data =
- ChainStateBootData::new(&burnchain, initial_balances, Some(Box::new(callback)));
- let (chainstate, boot_receipts) = StacksChainState::open_and_exec(
- config.is_mainnet(),
- config.burnchain.chain_id,
- &config.get_chainstate_path_str(),
- Some(&mut boot_data),
- Some(config.node.get_marf_opts()),
- )
- .unwrap();
- let mempool = PeerThread::connect_mempool_db(config);
-
- let (coord_rcv, coord_comms) = CoordinatorCommunication::instantiate();
- let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(100)));
- let (relay_send, relay_rcv) = sync_channel(10);
- let counters = Counters::new();
- let should_keep_running = Arc::new(AtomicBool::new(true));
- let sync_comms = PoxSyncWatchdogComms::new(should_keep_running.clone());
-
- let globals = Globals::new(
- coord_comms,
- miner_status,
- relay_send,
- counters,
- sync_comms,
- should_keep_running,
- 0,
- );
-
- let mut event_dispatcher = EventDispatcher::new();
- for observer in config.events_observers.iter() {
- event_dispatcher.register_observer(observer);
- }
-
- crate::run_loop::announce_boot_receipts(
- &mut event_dispatcher,
- &chainstate,
- &burnchain.pox_constants,
- &boot_receipts,
- );
-
- Ok(MockamotoNode {
- sortdb,
- self_signer,
- chainstate,
- miner_key,
- vrf_key,
- relay_rcv: Some(relay_rcv),
- coord_rcv: Some(coord_rcv),
- dispatcher: event_dispatcher,
- mempool,
- globals,
- config: config.clone(),
- })
- }
-
- fn spawn_chains_coordinator(&mut self) -> JoinHandle<()> {
- let config = self.config.clone();
- let atlas_config = AtlasConfig::new(false);
-
- let (chainstate, _) = self.chainstate.reopen().unwrap();
- let coord_config = ChainsCoordinatorConfig {
- always_use_affirmation_maps: false,
- require_affirmed_anchor_blocks: false,
- ..ChainsCoordinatorConfig::new()
- };
- let mut dispatcher = self.dispatcher.clone();
- let burnchain = self.config.get_burnchain();
- let burndb = burnchain.open_burnchain_db(true).unwrap();
- let coordinator_indexer = MockBurnchainIndexer(burndb);
- let atlas_db = AtlasDB::connect(
- atlas_config.clone(),
- &self.config.get_atlas_db_file_path(),
- true,
- )
- .unwrap();
- let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready(100)));
- let coordinator_receivers = self.coord_rcv.take().unwrap();
-
- thread::Builder::new()
- .name(format!("chains-coordinator-{}", &config.node.rpc_bind))
- .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
- .spawn(move || {
- debug!(
- "chains-coordinator thread ID is {:?}",
- thread::current().id()
- );
- ChainsCoordinator::run(
- coord_config,
- chainstate,
- burnchain,
- &mut dispatcher,
- coordinator_receivers,
- atlas_config,
- Some(&mut ()),
- Some(&mut ()),
- miner_status,
- coordinator_indexer,
- atlas_db,
- );
- })
- .expect("FATAL: failed to start chains coordinator thread")
- }
-
- pub fn run(&mut self) {
- info!("Starting the mockamoto node by issuing initial empty mock burn blocks");
- let coordinator = self.spawn_chains_coordinator();
-
- self.produce_burnchain_block(true).unwrap();
- self.produce_burnchain_block(true).unwrap();
- self.produce_burnchain_block(true).unwrap();
- self.produce_burnchain_block(true).unwrap();
- self.produce_burnchain_block(true).unwrap();
- self.produce_burnchain_block(true).unwrap();
-
- let mut p2p_net = StacksNode::setup_peer_network(
- &self.config,
- &self.config.atlas,
- self.config.get_burnchain(),
- );
-
- let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true)
- .expect("FATAL: failed to connect to stacker DB");
-
- let _relayer = Relayer::from_p2p(&mut p2p_net, stackerdbs);
-
- let relayer_rcv = self.relay_rcv.take().unwrap();
- let relayer_globals = self.globals.clone();
- let mock_relayer_thread = thread::Builder::new()
- .name("mock-relayer".into())
- .spawn(move || {
- while relayer_globals.keep_running() {
- match relayer_rcv.recv_timeout(Duration::from_millis(500)) {
- Ok(dir) => {
- if let RelayerDirective::Exit = dir {
- break;
- }
- }
- Err(RecvTimeoutError::Timeout) => continue,
- Err(e) => {
- warn!("Error accepting relayer directive: {e:?}");
- break;
- }
- }
- }
- })
- .expect("FATAL: failed to start mock relayer thread");
-
- let peer_thread = PeerThread::new_all(
- self.globals.clone(),
- &self.config,
- self.config.get_burnchain().pox_constants,
- p2p_net,
- );
-
- let ev_dispatcher = self.dispatcher.clone();
- let peer_thread = thread::Builder::new()
- .stack_size(BLOCK_PROCESSOR_STACK_SIZE)
- .name("p2p".into())
- .spawn(move || {
- StacksNode::p2p_main(peer_thread, ev_dispatcher);
- })
- .expect("FATAL: failed to start p2p thread");
-
- while self.globals.keep_running() {
- self.produce_burnchain_block(false).unwrap();
- let expected_chain_length = self.mine_and_stage_block().unwrap();
- self.globals.coord().announce_new_stacks_block();
- let _ = self.wait_for_stacks_block(expected_chain_length);
- sleep(Duration::from_millis(self.config.node.mockamoto_time_ms));
- }
-
- self.globals.coord().stop_chains_coordinator();
-
- if let Err(e) = coordinator.join() {
- warn!("Error joining coordinator thread during shutdown: {e:?}");
- }
- if let Err(e) = mock_relayer_thread.join() {
- warn!("Error joining coordinator thread during shutdown: {e:?}");
- }
- if let Err(e) = peer_thread.join() {
- warn!("Error joining p2p thread during shutdown: {e:?}");
- }
- }
-
- #[cfg_attr(test, mutants::skip)]
- fn wait_for_stacks_block(&mut self, expected_length: u64) -> Result<(), ChainstateError> {
- while self.globals.keep_running() {
- let chain_length = match NakamotoChainState::get_canonical_block_header(
- self.chainstate.db(),
- &self.sortdb,
- ) {
- Ok(Some(chain_tip)) => chain_tip.stacks_block_height,
- Ok(None) | Err(ChainstateError::NoSuchBlockError) => 0,
- Err(e) => return Err(e),
- };
- if chain_length >= expected_length {
- return Ok(());
- }
- sleep(Duration::from_millis(100));
- }
- Err(ChainstateError::NoSuchBlockError)
- }
-
- fn produce_burnchain_block(&mut self, initializing: bool) -> Result<(), BurnchainError> {
- let miner_pk = Secp256k1PublicKey::from_private(&self.miner_key);
- let miner_pk_hash = Hash160::from_node_public_key(&miner_pk);
-
- let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.conn())?;
- info!("Mocking bitcoin block"; "parent_height" => parent_snapshot.block_height);
- let burn_height = parent_snapshot.block_height + 1;
-
- let mut ops = vec![];
-
- if burn_height == 1 {
- let mut txid = [2u8; 32];
- txid[0..8].copy_from_slice((burn_height + 1).to_be_bytes().as_ref());
- let key_register = LeaderKeyRegisterOp {
- consensus_hash: ConsensusHash([0; 20]),
- public_key: VRFPublicKey::from_private(&self.vrf_key),
- memo: miner_pk_hash.as_bytes().to_vec(),
- txid: Txid(txid),
- vtxindex: 0,
- block_height: burn_height,
- burn_header_hash: BurnchainHeaderHash([0; 32]),
- };
- ops.push(BlockstackOperationType::LeaderKeyRegister(key_register));
- } else if !initializing {
- let mut txid = [1u8; 32];
- txid[0..8].copy_from_slice((burn_height + 1).to_be_bytes().as_ref());
- txid[8..16].copy_from_slice((0u64).to_be_bytes().as_ref());
-
- let (parent_block_ptr, parent_vtxindex) =
- if parent_snapshot.winning_block_txid.as_bytes() == &[0; 32] {
- (0, 0)
- } else {
- (parent_snapshot.block_height.try_into().unwrap(), 0)
- };
-
- let parent_vrf_proof = NakamotoChainState::get_block_vrf_proof(
- self.chainstate.db(),
- &parent_snapshot.consensus_hash,
- )
- .map_err(|_e| BurnchainError::MissingParentBlock)?
- .unwrap_or_else(|| VRFProof::empty());
-
- let vrf_seed = VRFSeed::from_proof(&parent_vrf_proof);
- let parent_block_id = parent_snapshot.get_canonical_stacks_block_id();
-
- let block_commit = LeaderBlockCommitOp {
- block_header_hash: BlockHeaderHash(parent_block_id.0),
- new_seed: vrf_seed,
- parent_block_ptr,
- parent_vtxindex,
- key_block_ptr: 1,
- key_vtxindex: 0,
- memo: vec![STACKS_EPOCH_3_0_MARKER],
- burn_fee: 5000,
- input: (parent_snapshot.winning_block_txid.clone(), 3),
- burn_parent_modulus: u8::try_from(
- parent_snapshot.block_height % BURN_BLOCK_MINED_AT_MODULUS,
- )
- .unwrap(),
- apparent_sender: BurnchainSigner(miner_pk_hash.to_string()),
- commit_outs: vec![
- PoxAddress::Standard(StacksAddress::burn_address(false), None),
- PoxAddress::Standard(StacksAddress::burn_address(false), None),
- ],
- sunset_burn: 0,
- txid: Txid(txid),
- vtxindex: 0,
- block_height: burn_height,
- burn_header_hash: BurnchainHeaderHash([0; 32]),
- };
- ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit))
- }
-
- let new_burn_block = make_burn_block(&parent_snapshot, &miner_pk_hash, ops)?;
-
- let burnchain = self.config.get_burnchain();
- let burndb = burnchain.open_burnchain_db(true).unwrap();
- let indexer = MockBurnchainIndexer(burndb);
- let mut burndb = burnchain.open_burnchain_db(true).unwrap();
-
- burndb.store_new_burnchain_block(
- &burnchain,
- &indexer,
- &BurnchainBlock::Bitcoin(new_burn_block),
- StacksEpochId::Epoch30,
- )?;
-
- self.globals.coord().announce_new_burn_block();
- let mut cur_snapshot = SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.conn())?;
- while cur_snapshot.burn_header_hash == parent_snapshot.burn_header_hash {
- thread::sleep(Duration::from_millis(100));
- cur_snapshot = SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.conn())?;
- }
-
- Ok(())
- }
-
- fn mine_stacks_block(&mut self) -> Result {
- let miner_principal = StacksAddress::from_public_keys(
- C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
- &AddressHashMode::SerializeP2PKH,
- 1,
- &vec![Secp256k1PublicKey::from_private(&self.miner_key)],
- )
- .unwrap()
- .into();
- let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn())?;
- let chain_id = self.chainstate.chain_id;
- let (mut chainstate_tx, clarity_instance) = self.chainstate.chainstate_tx_begin().unwrap();
-
- let (is_genesis, chain_tip_bh, chain_tip_ch) =
- match NakamotoChainState::get_canonical_block_header(&chainstate_tx, &self.sortdb) {
- Ok(Some(chain_tip)) => (
- false,
- chain_tip.anchored_header.block_hash(),
- chain_tip.consensus_hash,
- ),
- Ok(None) | Err(ChainstateError::NoSuchBlockError) =>
- // No stacks tip yet, parent should be genesis
- {
- (
- true,
- FIRST_STACKS_BLOCK_HASH,
- FIRST_BURNCHAIN_CONSENSUS_HASH,
- )
- }
- Err(e) => return Err(e),
- };
-
- let parent_block_id = StacksBlockId::new(&chain_tip_ch, &chain_tip_bh);
-
- let (parent_chain_length, parent_burn_height) = if is_genesis {
- (0, 0)
- } else {
- let tip_info = NakamotoChainState::get_block_header(&chainstate_tx, &parent_block_id)?
- .ok_or(ChainstateError::NoSuchBlockError)?;
- (tip_info.stacks_block_height, tip_info.burn_header_height)
- };
-
- let miner_nonce = if is_genesis {
- 0
- } else {
- let sortdb_conn = self.sortdb.index_conn();
- let mut clarity_conn = clarity_instance.read_only_connection_checked(
- &parent_block_id,
- &chainstate_tx,
- &sortdb_conn,
- )?;
- StacksChainState::get_nonce(&mut clarity_conn, &miner_principal)
- };
-
- info!(
- "Mining block"; "parent_chain_length" => parent_chain_length, "chain_tip_bh" => %chain_tip_bh,
- "chain_tip_ch" => %chain_tip_ch, "miner_account" => %miner_principal, "miner_nonce" => %miner_nonce,
- );
-
- let vrf_proof = VRF::prove(&self.vrf_key, sortition_tip.sortition_hash.as_bytes());
- let coinbase_tx_payload =
- TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, Some(vrf_proof));
- let mut coinbase_tx = StacksTransaction::new(
- TransactionVersion::Testnet,
- TransactionAuth::from_p2pkh(&self.miner_key).unwrap(),
- coinbase_tx_payload,
- );
- coinbase_tx.chain_id = chain_id;
- coinbase_tx.set_origin_nonce(miner_nonce + 1);
- let mut coinbase_tx_signer = StacksTransactionSigner::new(&coinbase_tx);
- coinbase_tx_signer.sign_origin(&self.miner_key).unwrap();
- let coinbase_tx = coinbase_tx_signer.get_tx().unwrap();
-
- let miner_pk = Secp256k1PublicKey::from_private(&self.miner_key);
- let miner_pk_hash = Hash160::from_node_public_key(&miner_pk);
-
- // Add a tenure change transaction to the block:
- // as of now every mockamoto block is a tenure-change.
- // If mockamoto mode changes to support non-tenure-changing blocks, this will have
- // to be gated.
- let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload {
- tenure_consensus_hash: sortition_tip.consensus_hash.clone(),
- prev_tenure_consensus_hash: chain_tip_ch.clone(),
- burn_view_consensus_hash: sortition_tip.consensus_hash,
- previous_tenure_end: parent_block_id,
- previous_tenure_blocks: 1,
- cause: TenureChangeCause::BlockFound,
- pubkey_hash: miner_pk_hash,
- });
- let mut tenure_tx = StacksTransaction::new(
- TransactionVersion::Testnet,
- TransactionAuth::from_p2pkh(&self.miner_key).unwrap(),
- tenure_change_tx_payload,
- );
- tenure_tx.chain_id = chain_id;
- tenure_tx.set_origin_nonce(miner_nonce);
- let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx);
- tenure_tx_signer.sign_origin(&self.miner_key).unwrap();
- let tenure_tx = tenure_tx_signer.get_tx().unwrap();
-
- let pox_address = PoxAddress::Standard(
- StacksAddress::burn_address(false),
- Some(AddressHashMode::SerializeP2PKH),
- );
-
- let signer_sk = Secp256k1PrivateKey::from_seed(&[1, 2, 3, 4]);
- let signer_key = Secp256k1PublicKey::from_private(&signer_sk).to_bytes_compressed();
- let signer_addr = StacksAddress::from_public_keys(
- C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
- &AddressHashMode::SerializeP2PKH,
- 1,
- &vec![Secp256k1PublicKey::from_private(&signer_sk)],
- )
- .unwrap()
- .into();
-
- let block_height = sortition_tip.block_height;
- let reward_cycle = self
- .sortdb
- .pox_constants
- .block_height_to_reward_cycle(self.sortdb.first_block_height, block_height)
- .unwrap();
-
- let stack_stx_payload = if parent_chain_length < 2 {
- let signature = make_pox_4_signer_key_signature(
- &pox_address,
- &signer_sk,
- reward_cycle.into(),
- &Pox4SignatureTopic::StackStx,
- CHAIN_ID_TESTNET,
- 12_u128,
- )
- .unwrap()
- .to_rsv();
- TransactionPayload::ContractCall(TransactionContractCall {
- address: StacksAddress::burn_address(false),
- contract_name: "pox-4".try_into().unwrap(),
- function_name: "stack-stx".try_into().unwrap(),
- function_args: vec![
- ClarityValue::UInt(99_000_000_000_000),
- pox_address.as_clarity_tuple().unwrap().into(),
- ClarityValue::UInt(u128::from(parent_burn_height)),
- ClarityValue::UInt(12),
- ClarityValue::some(ClarityValue::buff_from(signature).unwrap()).unwrap(),
- ClarityValue::buff_from(signer_key).unwrap(),
- ],
- })
- } else {
- let signature = make_pox_4_signer_key_signature(
- &pox_address,
- &signer_sk,
- reward_cycle.into(),
- &Pox4SignatureTopic::StackExtend,
- CHAIN_ID_TESTNET,
- 5_u128,
- )
- .unwrap()
- .to_rsv();
- // NOTE: stack-extend doesn't currently work, because the PoX-4 lockup
- // special functions have not been implemented.
- TransactionPayload::ContractCall(TransactionContractCall {
- address: StacksAddress::burn_address(false),
- contract_name: "pox-4".try_into().unwrap(),
- function_name: "stack-extend".try_into().unwrap(),
- function_args: vec![
- ClarityValue::UInt(5),
- pox_address.as_clarity_tuple().unwrap().into(),
- ClarityValue::some(ClarityValue::buff_from(signature).unwrap()).unwrap(),
- ClarityValue::buff_from(signer_key).unwrap(),
- ],
- })
- };
- let mut stack_stx_tx = StacksTransaction::new(
- TransactionVersion::Testnet,
- TransactionAuth::from_p2pkh(&self.miner_key).unwrap(),
- stack_stx_payload,
- );
- stack_stx_tx.chain_id = chain_id;
- stack_stx_tx.set_origin_nonce(miner_nonce + 2);
- let mut stack_stx_tx_signer = StacksTransactionSigner::new(&stack_stx_tx);
- stack_stx_tx_signer.sign_origin(&self.miner_key).unwrap();
- let stacks_stx_tx = stack_stx_tx_signer.get_tx().unwrap();
-
- let signer_nonce = if is_genesis {
- 0
- } else {
- let sortdb_conn = self.sortdb.index_conn();
- let mut clarity_conn = clarity_instance.read_only_connection_checked(
- &parent_block_id,
- &chainstate_tx,
- &sortdb_conn,
- )?;
- StacksChainState::get_nonce(&mut clarity_conn, &signer_addr)
- };
- let mut next_signer = self.self_signer.clone();
- let next_agg_key = next_signer.generate_aggregate_key(reward_cycle + 1);
- let aggregate_public_key_val =
- ClarityValue::buff_from(next_agg_key.compress().data.to_vec())
- .expect("Failed to serialize aggregate public key");
- let vote_payload = TransactionPayload::new_contract_call(
- boot_code_addr(false),
- SIGNERS_VOTING_NAME,
- SIGNERS_VOTING_FUNCTION_NAME,
- vec![
- ClarityValue::UInt(0),
- aggregate_public_key_val,
- ClarityValue::UInt(0),
- ClarityValue::UInt((reward_cycle + 1).into()),
- ],
- )
- .unwrap();
- let mut vote_tx = StacksTransaction::new(
- TransactionVersion::Testnet,
- TransactionAuth::from_p2pkh(&signer_sk).unwrap(),
- vote_payload,
- );
- vote_tx.chain_id = chain_id;
- vote_tx.set_origin_nonce(signer_nonce);
- let mut vote_tx_signer = StacksTransactionSigner::new(&vote_tx);
- vote_tx_signer.sign_origin(&signer_sk).unwrap();
- let vote_tx = vote_tx_signer.get_tx().unwrap();
-
- let sortdb_handle = self.sortdb.index_conn();
- let SetupBlockResult {
- mut clarity_tx,
- matured_miner_rewards_opt,
- ..
- } = NakamotoChainState::setup_block(
- &mut chainstate_tx,
- clarity_instance,
- &sortdb_handle,
- self.sortdb.first_block_height,
- &self.sortdb.pox_constants,
- chain_tip_ch.clone(),
- chain_tip_bh.clone(),
- parent_chain_length,
- parent_burn_height,
- sortition_tip.burn_header_hash.clone(),
- sortition_tip.block_height.try_into().map_err(|_| {
- ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into())
- })?,
- true,
- parent_chain_length + 1,
- false,
- )?;
-
- let txs = vec![tenure_tx, coinbase_tx, stacks_stx_tx, vote_tx];
-
- let _ = match StacksChainState::process_block_transactions(
- &mut clarity_tx,
- &txs,
- 0,
- ASTRules::PrecheckSize,
- ) {
- Err(e) => {
- let msg = format!("Mined invalid stacks block {e:?}");
- warn!("{msg}");
-
- clarity_tx.rollback_block();
- return Err(ChainstateError::InvalidStacksBlock(msg));
- }
- Ok((block_fees, _block_burns, txs_receipts)) => (block_fees, txs_receipts),
- };
-
- let bytes_so_far = txs.iter().map(|tx| tx.tx_len()).sum();
- let mut builder = MockamotoBlockBuilder { txs, bytes_so_far };
- let _ = match StacksBlockBuilder::select_and_apply_transactions(
- &mut clarity_tx,
- &mut builder,
- &mut self.mempool,
- parent_chain_length,
- &[],
- BlockBuilderSettings {
- max_miner_time_ms: 15_000,
- mempool_settings: MemPoolWalkSettings::default(),
- miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(10000))),
- },
- None,
- ASTRules::PrecheckSize,
- ) {
- Ok(x) => x,
- Err(e) => {
- let msg = format!("Mined invalid stacks block {e:?}");
- warn!("{msg}");
-
- clarity_tx.rollback_block();
- return Err(ChainstateError::InvalidStacksBlock(msg));
- }
- };
-
- let _lockup_events = match NakamotoChainState::finish_block(
- &mut clarity_tx,
- matured_miner_rewards_opt.as_ref(),
- ) {
- Err(ChainstateError::InvalidStacksBlock(e)) => {
- clarity_tx.rollback_block();
- return Err(ChainstateError::InvalidStacksBlock(e));
- }
- Err(e) => return Err(e),
- Ok(lockup_events) => lockup_events,
- };
-
- let state_index_root = clarity_tx.seal();
- let tx_merkle_tree: MerkleTree = builder.txs.iter().collect();
- clarity_tx
- .commit_mined_block(&StacksBlockId::new(
- &MINER_BLOCK_CONSENSUS_HASH,
- &MINER_BLOCK_HEADER_HASH,
- ))
- .unwrap();
- chainstate_tx.commit().unwrap();
-
- let mut block = NakamotoBlock {
- header: NakamotoBlockHeader {
- version: 100,
- chain_length: parent_chain_length + 1,
- burn_spent: sortition_tip.total_burn,
- tx_merkle_root: tx_merkle_tree.root(),
- state_index_root,
- signer_signature: ThresholdSignature::empty(),
- miner_signature: MessageSignature::empty(),
- consensus_hash: sortition_tip.consensus_hash.clone(),
- parent_block_id: StacksBlockId::new(&chain_tip_ch, &chain_tip_bh),
- signer_bitvec: BitVec::zeros(1)
- .expect("BUG: bitvec of length-1 failed to construct"),
- },
- txs: builder.txs,
- };
-
- let miner_signature = self
- .miner_key
- .sign(block.header.miner_signature_hash().as_bytes())
- .unwrap();
-
- block.header.miner_signature = miner_signature;
-
- Ok(block)
- }
-
- #[cfg_attr(test, mutants::skip)]
- fn mine_and_stage_block(&mut self) -> Result {
- let mut block = self.mine_stacks_block()?;
- let config = self.chainstate.config();
- let chain_length = block.header.chain_length;
- let mut sortition_handle = self.sortdb.index_handle_at_tip();
- let burn_tip = SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.conn())?;
- let cycle = self
- .sortdb
- .pox_constants
- .block_height_to_reward_cycle(self.sortdb.first_block_height, burn_tip.block_height)
- .unwrap();
- self.self_signer.sign_nakamoto_block(&mut block, cycle);
-
- let aggregate_public_key = if chain_length <= 1 {
- self.self_signer.aggregate_public_key
- } else {
- let aggregate_public_key = NakamotoChainState::get_aggregate_public_key(
- &mut self.chainstate,
- &self.sortdb,
- &sortition_handle,
- &block,
- )?;
- aggregate_public_key
- };
- let (headers_conn, staging_tx) = self.chainstate.headers_conn_and_staging_tx_begin()?;
- NakamotoChainState::accept_block(
- &config,
- block,
- &mut sortition_handle,
- &staging_tx,
- headers_conn,
- &aggregate_public_key,
- )?;
- staging_tx.commit()?;
- Ok(chain_length)
- }
-}
diff --git a/testnet/stacks-node/src/mockamoto/tests.rs b/testnet/stacks-node/src/mockamoto/tests.rs
deleted file mode 100644
index cbbed76071..0000000000
--- a/testnet/stacks-node/src/mockamoto/tests.rs
+++ /dev/null
@@ -1,414 +0,0 @@
-use std::thread;
-use std::time::{Duration, Instant};
-
-use clarity::vm::costs::ExecutionCost;
-use stacks::chainstate::burn::db::sortdb::SortitionDB;
-use stacks::chainstate::nakamoto::NakamotoChainState;
-use stacks::chainstate::stacks::db::StacksChainState;
-use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey};
-use stacks_common::types::net::PeerAddress;
-use stacks_common::types::StacksEpochId;
-use stacks_common::util::get_epoch_time_secs;
-use stacks_common::util::hash::to_hex;
-
-use super::MockamotoNode;
-use crate::config::{EventKeyType, EventObserverConfig};
-use crate::neon_node::PeerThread;
-use crate::tests::neon_integrations::{get_pox_info, submit_tx, test_observer};
-use crate::tests::{make_stacks_transfer, to_addr};
-use crate::{Config, ConfigFile};
-
-#[test]
-fn observe_100_blocks() {
- let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap();
- conf.node.working_dir = format!(
- "/tmp/stacks-node-tests/mock_observe_100_blocks-{}",
- get_epoch_time_secs()
- );
- conf.node.rpc_bind = "127.0.0.1:19343".into();
- conf.node.p2p_bind = "127.0.0.1:19344".into();
- conf.connection_options.public_ip_address = Some((PeerAddress::from_ipv4(127, 0, 0, 1), 20443));
- conf.node.mockamoto_time_ms = 10;
-
- let submitter_sk = StacksPrivateKey::from_seed(&[1]);
- let submitter_addr = to_addr(&submitter_sk);
- conf.add_initial_balance(submitter_addr.to_string(), 1_000_000);
- let recipient_addr = StacksAddress::burn_address(false).into();
-
- let observer_port = 19300;
- test_observer::spawn_at(observer_port);
- conf.events_observers.insert(EventObserverConfig {
- endpoint: format!("localhost:{observer_port}"),
- events_keys: vec![EventKeyType::AnyEvent],
- });
-
- let mut mockamoto = MockamotoNode::new(&conf).unwrap();
- let globals = mockamoto.globals.clone();
-
- let mut mempool = PeerThread::connect_mempool_db(&conf);
- let (mut chainstate, _) = StacksChainState::open(
- conf.is_mainnet(),
- conf.burnchain.chain_id,
- &conf.get_chainstate_path_str(),
- None,
- )
- .unwrap();
- let burnchain = conf.get_burnchain();
- let sortdb = burnchain.open_sortition_db(true).unwrap();
-
- let start = Instant::now();
-
- let node_thread = thread::Builder::new()
- .name("mockamoto-main".into())
- .spawn(move || mockamoto.run())
- .expect("FATAL: failed to start mockamoto main thread");
-
- // make a transfer tx to test that the mockamoto miner picks up txs from the mempool
- let tx_fee = 200;
- let transfer_tx = make_stacks_transfer(&submitter_sk, 0, tx_fee, &recipient_addr, 100);
- let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx));
-
- let mut sent_tx = false;
-
- // complete within 2 minutes or abort
- let completed = loop {
- if Instant::now().duration_since(start) > Duration::from_secs(120) {
- break false;
- }
- let latest_block = test_observer::get_blocks().pop();
- thread::sleep(Duration::from_secs(1));
- let Some(ref latest_block) = latest_block else {
- info!("No block observed yet!");
- continue;
- };
- let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap();
- info!("Block height observed: {stacks_block_height}");
-
- if stacks_block_height >= 1 && !sent_tx {
- let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)
- .unwrap()
- .unwrap();
- // Bypass admission checks
- mempool
- .submit_raw(
- &mut chainstate,
- &sortdb,
- &tip.consensus_hash,
- &tip.anchored_header.block_hash(),
- transfer_tx.clone(),
- &ExecutionCost::max_value(),
- &StacksEpochId::Epoch30,
- )
- .unwrap();
-
- sent_tx = true;
- }
-
- if stacks_block_height >= 100 {
- break true;
- }
- };
-
- globals.signal_stop();
-
- node_thread
- .join()
- .expect("Failed to join node thread to exit");
-
- let transfer_tx_included = test_observer::get_blocks()
- .into_iter()
- .find(|block_json| {
- block_json["transactions"]
- .as_array()
- .unwrap()
- .iter()
- .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex))
- .is_some()
- })
- .is_some();
-
- assert!(
- transfer_tx_included,
- "Mockamoto node failed to include the transfer tx"
- );
-
- assert!(
- completed,
- "Mockamoto node failed to produce and announce 100 blocks before timeout"
- );
-}
-
-#[test]
-fn mempool_rpc_submit() {
- let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap();
- conf.node.working_dir = format!(
- "/tmp/stacks-node-tests/mempool_rpc_submit-{}",
- get_epoch_time_secs()
- );
- conf.node.rpc_bind = "127.0.0.1:19743".into();
- conf.node.p2p_bind = "127.0.0.1:19744".into();
- conf.node.mockamoto_time_ms = 10;
-
- let submitter_sk = StacksPrivateKey::from_seed(&[1]);
- let submitter_addr = to_addr(&submitter_sk);
- conf.add_initial_balance(submitter_addr.to_string(), 1_000);
- let recipient_addr = StacksAddress::burn_address(false).into();
-
- let observer_port = 19800;
- test_observer::spawn_at(observer_port);
- conf.events_observers.insert(EventObserverConfig {
- endpoint: format!("localhost:{observer_port}"),
- events_keys: vec![EventKeyType::AnyEvent],
- });
-
- let mut mockamoto = MockamotoNode::new(&conf).unwrap();
- let globals = mockamoto.globals.clone();
-
- let http_origin = format!("http://{}", &conf.node.rpc_bind);
-
- let start = Instant::now();
-
- let node_thread = thread::Builder::new()
- .name("mockamoto-main".into())
- .spawn(move || mockamoto.run())
- .expect("FATAL: failed to start mockamoto main thread");
-
- // make a transfer tx to test that the mockamoto miner picks up txs from the mempool
- let tx_fee = 200;
- let transfer_tx = make_stacks_transfer(&submitter_sk, 0, tx_fee, &recipient_addr, 100);
- let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx));
-
- let mut sent_tx = false;
-
- // complete within 2 minutes or abort
- let completed = loop {
- if Instant::now().duration_since(start) > Duration::from_secs(120) {
- break false;
- }
- let latest_block = test_observer::get_blocks().pop();
- thread::sleep(Duration::from_secs(1));
- let Some(ref latest_block) = latest_block else {
- info!("No block observed yet!");
- continue;
- };
- let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap();
- info!("Block height observed: {stacks_block_height}");
-
- if stacks_block_height >= 1 && !sent_tx {
- // Enforce admission checks by utilizing the RPC endpoint
- submit_tx(&http_origin, &transfer_tx);
- sent_tx = true;
- }
-
- if stacks_block_height >= 100 {
- break true;
- }
- };
-
- globals.signal_stop();
-
- node_thread
- .join()
- .expect("Failed to join node thread to exit");
-
- let transfer_tx_included = test_observer::get_blocks()
- .into_iter()
- .find(|block_json| {
- block_json["transactions"]
- .as_array()
- .unwrap()
- .iter()
- .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex))
- .is_some()
- })
- .is_some();
-
- assert!(
- transfer_tx_included,
- "Mockamoto node failed to include the transfer tx"
- );
-
- assert!(
- completed,
- "Mockamoto node failed to produce and announce 100 blocks before timeout"
- );
-}
-
-#[test]
-fn observe_set_aggregate_key() {
- let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap();
- conf.node.mockamoto_time_ms = 10;
- conf.node.p2p_bind = "127.0.0.1:20443".into();
- conf.node.rpc_bind = "127.0.0.1:20444".into();
- conf.connection_options.public_ip_address = Some((PeerAddress::from_ipv4(127, 0, 0, 1), 20443));
-
- let submitter_sk = StacksPrivateKey::from_seed(&[1]);
- let submitter_addr = to_addr(&submitter_sk);
- conf.add_initial_balance(submitter_addr.to_string(), 1_000);
-
- test_observer::spawn();
- let observer_port = test_observer::EVENT_OBSERVER_PORT;
- conf.events_observers.insert(EventObserverConfig {
- endpoint: format!("localhost:{observer_port}"),
- events_keys: vec![EventKeyType::AnyEvent],
- });
-
- let mut mockamoto = MockamotoNode::new(&conf).unwrap();
- let mut signer = mockamoto.self_signer.clone();
-
- let globals = mockamoto.globals.clone();
-
- StacksChainState::open(
- conf.is_mainnet(),
- conf.burnchain.chain_id,
- &conf.get_chainstate_path_str(),
- None,
- )
- .unwrap();
- let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(mockamoto.sortdb.conn()).unwrap();
-
- let start = Instant::now();
- // Get the reward cycle of the sortition tip
- let reward_cycle = mockamoto
- .sortdb
- .pox_constants
- .block_height_to_reward_cycle(
- mockamoto.sortdb.first_block_height,
- sortition_tip.block_height,
- )
- .unwrap_or_else(|| {
- panic!(
- "Failed to determine reward cycle of block height: {}",
- sortition_tip.block_height
- )
- });
-
- // Get the aggregate public key of the original reward cycle to compare against
- let expected_cur_key = signer.generate_aggregate_key(reward_cycle);
- let expected_next_key = signer.generate_aggregate_key(reward_cycle + 1);
-
- let node_thread = thread::Builder::new()
- .name("mockamoto-main".into())
- .spawn(move || {
- mockamoto.run();
- let aggregate_key_block_header = NakamotoChainState::get_canonical_block_header(
- mockamoto.chainstate.db(),
- &mockamoto.sortdb,
- )
- .unwrap()
- .unwrap();
- // Get the aggregate public key of the original reward cycle
- let orig_aggregate_key = mockamoto
- .chainstate
- .get_aggregate_public_key_pox_4(
- &mockamoto.sortdb,
- &aggregate_key_block_header.index_block_hash(),
- reward_cycle,
- )
- .unwrap();
- // Get the aggregate public key of the next reward cycle that we manually overwrote
- let new_aggregate_key = mockamoto
- .chainstate
- .get_aggregate_public_key_pox_4(
- &mockamoto.sortdb,
- &aggregate_key_block_header.index_block_hash(),
- reward_cycle + 1,
- )
- .unwrap();
- (orig_aggregate_key, new_aggregate_key)
- })
- .expect("FATAL: failed to start mockamoto main thread");
-
- // complete within 5 seconds or abort (we are only observing one block)
- let completed = loop {
- if Instant::now().duration_since(start) > Duration::from_secs(120) {
- break false;
- }
- let latest_block = test_observer::get_blocks().pop();
- thread::sleep(Duration::from_secs(1));
- let Some(ref latest_block) = latest_block else {
- info!("No block observed yet!");
- continue;
- };
- let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap();
- info!("Block height observed: {stacks_block_height}");
- if stacks_block_height >= 100 {
- break true;
- }
- };
-
- globals.signal_stop();
-
- let (orig_aggregate_key, new_aggregate_key) = node_thread
- .join()
- .expect("Failed to join node thread to exit");
-
- assert!(
- completed,
- "Mockamoto node failed to produce and announce its block before timeout"
- );
-
- // Did we set and retrieve the aggregate key correctly?
- assert_eq!(orig_aggregate_key.unwrap(), expected_cur_key);
- assert_eq!(new_aggregate_key.unwrap(), expected_next_key);
-}
-
-#[test]
-fn rpc_pox_info() {
- let mut conf = Config::from_config_file(ConfigFile::mockamoto()).unwrap();
- conf.node.mockamoto_time_ms = 10;
- conf.node.rpc_bind = "127.0.0.1:19543".into();
- conf.node.p2p_bind = "127.0.0.1:19544".into();
-
- let observer_port = 19500;
- test_observer::spawn_at(observer_port);
- conf.events_observers.insert(EventObserverConfig {
- endpoint: format!("localhost:{observer_port}"),
- events_keys: vec![EventKeyType::AnyEvent],
- });
-
- let mut mockamoto = MockamotoNode::new(&conf).unwrap();
- let globals = mockamoto.globals.clone();
-
- let http_origin = format!("http://{}", &conf.node.rpc_bind);
-
- let start = Instant::now();
-
- let node_thread = thread::Builder::new()
- .name("mockamoto-main".into())
- .spawn(move || mockamoto.run())
- .expect("FATAL: failed to start mockamoto main thread");
-
- // mine 5 blocks
- let completed = loop {
- // complete within 2 minutes or abort
- if Instant::now().duration_since(start) > Duration::from_secs(120) {
- break false;
- }
- let latest_block = test_observer::get_blocks().pop();
- thread::sleep(Duration::from_secs(1));
- let Some(ref latest_block) = latest_block else {
- info!("No block observed yet!");
- continue;
- };
- let stacks_block_height = latest_block.get("block_height").unwrap().as_u64().unwrap();
- info!("Block height observed: {stacks_block_height}");
-
- if stacks_block_height >= 5 {
- break true;
- }
- };
-
- // fetch rpc poxinfo
- let _pox_info = get_pox_info(&http_origin);
-
- globals.signal_stop();
-
- assert!(
- completed,
- "Mockamoto node failed to produce and announce 100 blocks before timeout"
- );
- node_thread
- .join()
- .expect("Failed to join node thread to exit");
-}
diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs
index 5344330bf6..3546e48b93 100644
--- a/testnet/stacks-node/src/nakamoto_node/miner.rs
+++ b/testnet/stacks-node/src/nakamoto_node/miner.rs
@@ -31,7 +31,6 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB;
use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash};
use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo};
use stacks::chainstate::nakamoto::signer_set::NakamotoSigners;
-use stacks::chainstate::nakamoto::test_signers::TestSigners;
use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote, NakamotoChainState};
use stacks::chainstate::stacks::boot::MINERS_NAME;
use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo};
@@ -219,21 +218,14 @@ impl BlockMinerThread {
warn!("Failed to propose block to stackerdb: {e:?}");
}
}
+ self.globals.counters.bump_naka_proposed_blocks();
- if let Some(self_signer) = self.config.self_signing() {
- if let Err(e) = self.self_sign_and_broadcast(self_signer, new_block.clone()) {
- warn!("Error self-signing block: {e:?}");
- } else {
- self.globals.coord().announce_new_stacks_block();
- }
+ if let Err(e) =
+ self.wait_for_signer_signature_and_broadcast(&stackerdbs, new_block.clone())
+ {
+ warn!("Error broadcasting block: {e:?}");
} else {
- if let Err(e) =
- self.wait_for_signer_signature_and_broadcast(&stackerdbs, new_block.clone())
- {
- warn!("Error broadcasting block: {e:?}");
- } else {
- self.globals.coord().announce_new_stacks_block();
- }
+ self.globals.coord().announce_new_stacks_block();
}
self.globals.counters.bump_naka_mined_blocks();
@@ -543,54 +535,6 @@ impl BlockMinerThread {
Ok(())
}
- fn self_sign_and_broadcast(
- &self,
- mut signer: TestSigners,
- mut block: NakamotoBlock,
- ) -> Result<(), ChainstateError> {
- let mut chain_state = neon_node::open_chainstate_with_faults(&self.config)
- .expect("FATAL: could not open chainstate DB");
- let chainstate_config = chain_state.config();
- let sort_db = SortitionDB::open(
- &self.config.get_burn_db_file_path(),
- true,
- self.burnchain.pox_constants.clone(),
- )
- .expect("FATAL: could not open sortition DB");
-
- let burn_height = self.burn_block.block_height;
- let cycle = self
- .burnchain
- .block_height_to_reward_cycle(burn_height)
- .expect("FATAL: no reward cycle for burn block");
- signer.sign_nakamoto_block(&mut block, cycle);
-
- let mut sortition_handle = sort_db.index_handle_at_tip();
- let aggregate_public_key = if block.header.chain_length <= 1 {
- signer.aggregate_public_key.clone()
- } else {
- let aggregate_public_key = NakamotoChainState::get_aggregate_public_key(
- &mut chain_state,
- &sort_db,
- &sortition_handle,
- &block,
- )?;
- aggregate_public_key
- };
-
- let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?;
- NakamotoChainState::accept_block(
- &chainstate_config,
- block,
- &mut sortition_handle,
- &staging_tx,
- headers_conn,
- &aggregate_public_key,
- )?;
- staging_tx.commit()?;
- Ok(())
- }
-
/// Get the coinbase recipient address, if set in the config and if allowed in this epoch
fn get_coinbase_recipient(&self, epoch_id: StacksEpochId) -> Option {
if epoch_id < StacksEpochId::Epoch21 && self.config.miner.block_reward_recipient.is_some() {
diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs
index f053c58cff..9a875d1786 100644
--- a/testnet/stacks-node/src/run_loop/neon.rs
+++ b/testnet/stacks-node/src/run_loop/neon.rs
@@ -17,8 +17,7 @@ use stacks::chainstate::coordinator::{
static_get_heaviest_affirmation_map, static_get_stacks_tip_affirmation_map, ChainsCoordinator,
ChainsCoordinatorConfig, CoordinatorCommunication, Error as coord_error,
};
-use stacks::chainstate::nakamoto::NakamotoChainState;
-use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState};
+use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState};
use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus};
use stacks::core::StacksEpochId;
use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment};
@@ -26,7 +25,7 @@ use stacks::util_lib::db::Error as db_error;
use stacks_common::deps_common::ctrlc as termination;
use stacks_common::deps_common::ctrlc::SignalId;
use stacks_common::types::PublicKey;
-use stacks_common::util::hash::{to_hex, Hash160};
+use stacks_common::util::hash::Hash160;
use stacks_common::util::{get_epoch_time_secs, sleep_ms};
use stx_genesis::GenesisData;
@@ -47,10 +46,12 @@ use crate::{
pub const STDERR: i32 = 2;
#[cfg(test)]
-pub type RunLoopCounter = Arc;
+#[derive(Clone)]
+pub struct RunLoopCounter(pub Arc);
#[cfg(not(test))]
-pub type RunLoopCounter = ();
+#[derive(Clone)]
+pub struct RunLoopCounter();
#[cfg(test)]
const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30;
@@ -58,7 +59,27 @@ const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 30;
#[cfg(not(test))]
const UNCONDITIONAL_CHAIN_LIVENESS_CHECK: u64 = 300;
-#[derive(Clone)]
+impl Default for RunLoopCounter {
+ #[cfg(test)]
+ fn default() -> Self {
+ RunLoopCounter(Arc::new(AtomicU64::new(0)))
+ }
+ #[cfg(not(test))]
+ fn default() -> Self {
+ Self()
+ }
+}
+
+#[cfg(test)]
+impl std::ops::Deref for RunLoopCounter {
+ type Target = Arc;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+#[derive(Clone, Default)]
pub struct Counters {
pub blocks_processed: RunLoopCounter,
pub microblocks_processed: RunLoopCounter,
@@ -69,43 +90,18 @@ pub struct Counters {
pub naka_submitted_vrfs: RunLoopCounter,
pub naka_submitted_commits: RunLoopCounter,
pub naka_mined_blocks: RunLoopCounter,
+ pub naka_proposed_blocks: RunLoopCounter,
pub naka_mined_tenures: RunLoopCounter,
}
impl Counters {
- #[cfg(test)]
- pub fn new() -> Counters {
- Counters {
- blocks_processed: RunLoopCounter::new(AtomicU64::new(0)),
- microblocks_processed: RunLoopCounter::new(AtomicU64::new(0)),
- missed_tenures: RunLoopCounter::new(AtomicU64::new(0)),
- missed_microblock_tenures: RunLoopCounter::new(AtomicU64::new(0)),
- cancelled_commits: RunLoopCounter::new(AtomicU64::new(0)),
- naka_submitted_vrfs: RunLoopCounter::new(AtomicU64::new(0)),
- naka_submitted_commits: RunLoopCounter::new(AtomicU64::new(0)),
- naka_mined_blocks: RunLoopCounter::new(AtomicU64::new(0)),
- naka_mined_tenures: RunLoopCounter::new(AtomicU64::new(0)),
- }
- }
-
- #[cfg(not(test))]
- pub fn new() -> Counters {
- Counters {
- blocks_processed: (),
- microblocks_processed: (),
- missed_tenures: (),
- missed_microblock_tenures: (),
- cancelled_commits: (),
- naka_submitted_vrfs: (),
- naka_submitted_commits: (),
- naka_mined_blocks: (),
- naka_mined_tenures: (),
- }
+ pub fn new() -> Self {
+ Self::default()
}
#[cfg(test)]
fn inc(ctr: &RunLoopCounter) {
- ctr.fetch_add(1, Ordering::SeqCst);
+ ctr.0.fetch_add(1, Ordering::SeqCst);
}
#[cfg(not(test))]
@@ -113,7 +109,7 @@ impl Counters {
#[cfg(test)]
fn set(ctr: &RunLoopCounter, value: u64) {
- ctr.store(value, Ordering::SeqCst);
+ ctr.0.store(value, Ordering::SeqCst);
}
#[cfg(not(test))]
@@ -151,6 +147,10 @@ impl Counters {
Counters::inc(&self.naka_mined_blocks);
}
+ pub fn bump_naka_proposed_blocks(&self) {
+ Counters::inc(&self.naka_proposed_blocks);
+ }
+
pub fn bump_naka_mined_tenures(&self) {
Counters::inc(&self.naka_mined_tenures);
}
@@ -217,7 +217,7 @@ impl RunLoop {
globals: None,
coordinator_channels: Some(channels),
callbacks: RunLoopCallbacks::new(),
- counters: Counters::new(),
+ counters: Counters::default(),
should_keep_running,
event_dispatcher,
pox_watchdog: None,
@@ -489,23 +489,10 @@ impl RunLoop {
.map(|e| (e.address.clone(), e.amount))
.collect();
- // TODO: delete this once aggregate public key voting is working
- let agg_pubkey_boot_callback = if let Some(self_signer) = self.config.self_signing() {
- let agg_pub_key = self_signer.aggregate_public_key.clone();
- info!("Neon node setting agg public key"; "agg_pub_key" => %to_hex(&agg_pub_key.compress().data));
- let callback = Box::new(move |clarity_tx: &mut ClarityTx| {
- NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key)
- }) as Box;
- Some(callback)
- } else {
- debug!("Neon node booting with no aggregate public key. Must have signers available to sign blocks.");
- None
- };
-
// instantiate chainstate
let mut boot_data = ChainStateBootData {
initial_balances,
- post_flight_callback: agg_pubkey_boot_callback,
+ post_flight_callback: None,
first_burnchain_block_hash: burnchain_config.first_block_hash,
first_burnchain_block_height: burnchain_config.first_block_height as u32,
first_burnchain_block_timestamp: burnchain_config.first_block_timestamp,
diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs
index d432592352..16b0583b4a 100644
--- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs
+++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs
@@ -13,21 +13,24 @@
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
+use std::collections::{HashMap, HashSet};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
+use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use std::{env, thread};
use clarity::vm::ast::ASTRules;
use clarity::vm::costs::ExecutionCost;
-use clarity::vm::types::PrincipalData;
+use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier};
use http_types::headers::AUTHORIZATION;
use lazy_static::lazy_static;
-use libsigner::{SignerSession, StackerDBSession};
+use libsigner::{BlockResponse, SignerMessage, SignerSession, StackerDBSession};
use stacks::burnchains::MagicBytes;
use stacks::chainstate::burn::db::sortdb::SortitionDB;
use stacks::chainstate::coordinator::comm::CoordinatorChannels;
use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder;
+use stacks::chainstate::nakamoto::signer_set::NakamotoSigners;
use stacks::chainstate::nakamoto::test_signers::TestSigners;
use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState};
use stacks::chainstate::stacks::address::PoxAddress;
@@ -43,6 +46,7 @@ use stacks::core::{
PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4,
PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0,
};
+use stacks::libstackerdb::{SlotMetadata, StackerDBChunkData};
use stacks::net::api::callreadonly::CallReadOnlyRequestBody;
use stacks::net::api::getstackers::GetStackersResponse;
use stacks::net::api::postblock_proposal::{
@@ -58,8 +62,8 @@ use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX};
use stacks_common::types::chainstate::{
BlockHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey,
};
-use stacks_common::util::hash::to_hex;
-use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey};
+use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum};
+use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey};
use super::bitcoin_regtest::BitcoinCoreController;
use crate::config::{EventKeyType, EventObserverConfig, InitialBalance};
@@ -157,6 +161,32 @@ pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse {
res
}
+pub fn get_stackerdb_slot_version(
+ http_origin: &str,
+ contract: &QualifiedContractIdentifier,
+ slot_id: u64,
+) -> Option {
+ let client = reqwest::blocking::Client::new();
+ let path = format!(
+ "{http_origin}/v2/stackerdb/{}/{}",
+ &contract.issuer, &contract.name
+ );
+ let res = client
+ .get(&path)
+ .send()
+ .unwrap()
+ .json::>()
+ .unwrap();
+ debug!("StackerDB metadata response: {res:?}");
+ res.iter().find_map(|slot| {
+ if u64::from(slot.slot_id) == slot_id {
+ Some(slot.slot_version)
+ } else {
+ None
+ }
+ })
+}
+
pub fn add_initial_balances(
conf: &mut Config,
accounts: usize,
@@ -174,6 +204,114 @@ pub fn add_initial_balances(
.collect()
}
+/// Spawn a blind signing thread. `signer` is the private key
+/// of the individual signer who broadcasts the response to the StackerDB
+pub fn blind_signer(
+ conf: &Config,
+ signers: &TestSigners,
+ signer: &Secp256k1PrivateKey,
+ proposals_count: RunLoopCounter,
+) -> JoinHandle<()> {
+ let mut signed_blocks = HashSet::new();
+ let conf = conf.clone();
+ let signers = signers.clone();
+ let signer = signer.clone();
+ let mut last_count = proposals_count.load(Ordering::SeqCst);
+ thread::spawn(move || loop {
+ thread::sleep(Duration::from_millis(100));
+ let cur_count = proposals_count.load(Ordering::SeqCst);
+ if cur_count <= last_count {
+ continue;
+ }
+ last_count = cur_count;
+ match read_and_sign_block_proposal(&conf, &signers, &signer, &signed_blocks) {
+ Ok(signed_block) => {
+ if signed_blocks.contains(&signed_block) {
+ continue;
+ }
+ info!("Signed block"; "signer_sig_hash" => signed_block.to_hex());
+ signed_blocks.insert(signed_block);
+ }
+ Err(e) => {
+ warn!("Error reading and signing block proposal: {e}");
+ }
+ }
+ })
+}
+
+pub fn read_and_sign_block_proposal(
+ conf: &Config,
+ signers: &TestSigners,
+ signer: &Secp256k1PrivateKey,
+ signed_blocks: &HashSet,
+) -> Result {
+ let burnchain = conf.get_burnchain();
+ let sortdb = burnchain.open_sortition_db(true).unwrap();
+ let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap();
+ let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap());
+ let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey)
+ .map_err(|_| "Unable to get miner slot")?
+ .ok_or("No miner slot exists")?;
+ let reward_cycle = burnchain
+ .block_height_to_reward_cycle(tip.block_height)
+ .unwrap();
+
+ let mut proposed_block: NakamotoBlock = {
+ let miner_contract_id = boot_code_id(MINERS_NAME, false);
+ let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id);
+ miners_stackerdb
+ .get_latest(miner_slot_id)
+ .map_err(|_| "Failed to get latest chunk from the miner slot ID")?
+ .ok_or("No chunk found")?
+ };
+ let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash());
+ let signer_sig_hash = proposed_block.header.signer_signature_hash();
+ if signed_blocks.contains(&signer_sig_hash) {
+ // already signed off on this block, don't sign again.
+ return Ok(signer_sig_hash);
+ }
+
+ info!(
+ "Fetched proposed block from .miners StackerDB";
+ "proposed_block_hash" => &proposed_block_hash,
+ "signer_sig_hash" => &signer_sig_hash.to_hex(),
+ );
+
+ signers
+ .clone()
+ .sign_nakamoto_block(&mut proposed_block, reward_cycle);
+
+ let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted((
+ signer_sig_hash.clone(),
+ proposed_block.header.signer_signature.clone(),
+ )));
+
+ let signers_contract_id =
+ NakamotoSigners::make_signers_db_contract_id(reward_cycle, libsigner::BLOCK_MSG_ID, false);
+
+ let http_origin = format!("http://{}", &conf.node.rpc_bind);
+ let signers_info = get_stacker_set(&http_origin, reward_cycle);
+ let signer_index = get_signer_index(&signers_info, &Secp256k1PublicKey::from_private(signer))
+ .unwrap()
+ .try_into()
+ .unwrap();
+
+ let next_version = get_stackerdb_slot_version(&http_origin, &signers_contract_id, signer_index)
+ .map(|x| x + 1)
+ .unwrap_or(0);
+ let mut signers_contract_sess = StackerDBSession::new(&conf.node.rpc_bind, signers_contract_id);
+ let mut chunk_to_put = StackerDBChunkData::new(
+ u32::try_from(signer_index).unwrap(),
+ next_version,
+ signer_message.serialize_to_vec(),
+ );
+ chunk_to_put.sign(signer).unwrap();
+ signers_contract_sess
+ .put_chunk(&chunk_to_put)
+ .map_err(|e| e.to_string())?;
+ Ok(signer_sig_hash)
+}
+
/// Return a working nakamoto-neon config and the miner's bitcoin address to fund
pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress) {
let mut conf = super::new_test_conf();
@@ -192,7 +330,6 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress
let mining_key = Secp256k1PrivateKey::from_seed(&[1]);
conf.miner.mining_key = Some(mining_key);
- conf.miner.self_signing_key = Some(TestSigners::default());
conf.node.miner = true;
conf.node.wait_time_for_microblocks = 500;
@@ -358,9 +495,10 @@ pub fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey {
/// for pox-4 to activate
pub fn boot_to_epoch_3(
naka_conf: &Config,
- blocks_processed: &RunLoopCounter,
+ blocks_processed: &Arc,
stacker_sks: &[StacksPrivateKey],
signer_sks: &[StacksPrivateKey],
+ self_signing: Option<&TestSigners>,
btc_regtest_controller: &mut BitcoinRegtestController,
) {
assert_eq!(stacker_sks.len(), signer_sks.len());
@@ -442,25 +580,29 @@ pub fn boot_to_epoch_3(
&naka_conf,
);
- // If we are self-signing, then we need to vote on the aggregate public key
- if let Some(mut signers) = naka_conf.self_signing() {
+ // We need to vote on the aggregate public key if this test is self signing
+ if let Some(signers) = self_signing {
// Get the aggregate key
- let aggregate_key = signers.generate_aggregate_key(reward_cycle + 1);
+ let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1);
let aggregate_public_key =
clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec())
.expect("Failed to serialize aggregate public key");
-
+ let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect();
+ let signer_set = get_stacker_set(&http_origin, reward_cycle + 1);
// Vote on the aggregate public key
- for (i, signer_sk) in signer_sks.iter().enumerate() {
+ for signer_sk in signer_sks_unique.values() {
+ let signer_index =
+ get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk))
+ .unwrap();
let voting_tx = tests::make_contract_call(
- &signer_sk,
+ signer_sk,
0,
300,
&StacksAddress::burn_address(false),
SIGNERS_VOTING_NAME,
SIGNERS_VOTING_FUNCTION_NAME,
&[
- clarity::vm::Value::UInt(i as u128),
+ clarity::vm::Value::UInt(u128::try_from(signer_index).unwrap()),
aggregate_public_key.clone(),
clarity::vm::Value::UInt(0),
clarity::vm::Value::UInt(reward_cycle as u128 + 1),
@@ -480,6 +622,32 @@ pub fn boot_to_epoch_3(
info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop");
}
+fn get_signer_index(
+ stacker_set: &GetStackersResponse,
+ signer_key: &Secp256k1PublicKey,
+) -> Result {
+ let Some(ref signer_set) = stacker_set.stacker_set.signers else {
+ return Err("Empty signer set for reward cycle".into());
+ };
+ let signer_key_bytes = signer_key.to_bytes_compressed();
+ signer_set
+ .iter()
+ .enumerate()
+ .find_map(|(ix, entry)| {
+ if entry.signing_key.as_slice() == signer_key_bytes.as_slice() {
+ Some(ix)
+ } else {
+ None
+ }
+ })
+ .ok_or_else(|| {
+ format!(
+ "Signing key not found. {} not found.",
+ to_hex(&signer_key_bytes)
+ )
+ })
+}
+
fn is_key_set_for_cycle(
reward_cycle: u64,
is_mainnet: bool,
@@ -520,63 +688,62 @@ fn signer_vote_if_needed(
btc_regtest_controller: &BitcoinRegtestController,
naka_conf: &Config,
signer_sks: &[StacksPrivateKey], // TODO: Is there some way to get this from the TestSigners?
+ signers: &TestSigners,
) {
- if let Some(mut signers) = naka_conf.self_signing() {
- // When we reach the next prepare phase, submit new voting transactions
- let block_height = btc_regtest_controller.get_headers_height();
- let reward_cycle = btc_regtest_controller
- .get_burnchain()
- .block_height_to_reward_cycle(block_height)
- .unwrap();
- let prepare_phase_start = btc_regtest_controller
- .get_burnchain()
- .pox_constants
- .prepare_phase_start(
- btc_regtest_controller.get_burnchain().first_block_height,
- reward_cycle,
- );
+ // When we reach the next prepare phase, submit new voting transactions
+ let block_height = btc_regtest_controller.get_headers_height();
+ let reward_cycle = btc_regtest_controller
+ .get_burnchain()
+ .block_height_to_reward_cycle(block_height)
+ .unwrap();
+ let prepare_phase_start = btc_regtest_controller
+ .get_burnchain()
+ .pox_constants
+ .prepare_phase_start(
+ btc_regtest_controller.get_burnchain().first_block_height,
+ reward_cycle,
+ );
- if block_height >= prepare_phase_start {
- // If the key is already set, do nothing.
- if is_key_set_for_cycle(
- reward_cycle + 1,
- naka_conf.is_mainnet(),
- &naka_conf.node.rpc_bind,
- )
- .unwrap_or(false)
- {
- return;
- }
+ if block_height >= prepare_phase_start {
+ // If the key is already set, do nothing.
+ if is_key_set_for_cycle(
+ reward_cycle + 1,
+ naka_conf.is_mainnet(),
+ &naka_conf.node.rpc_bind,
+ )
+ .unwrap_or(false)
+ {
+ return;
+ }
- // If we are self-signing, then we need to vote on the aggregate public key
- let http_origin = format!("http://{}", &naka_conf.node.rpc_bind);
-
- // Get the aggregate key
- let aggregate_key = signers.generate_aggregate_key(reward_cycle + 1);
- let aggregate_public_key =
- clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec())
- .expect("Failed to serialize aggregate public key");
-
- for (i, signer_sk) in signer_sks.iter().enumerate() {
- let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce;
-
- // Vote on the aggregate public key
- let voting_tx = tests::make_contract_call(
- &signer_sk,
- signer_nonce,
- 300,
- &StacksAddress::burn_address(false),
- SIGNERS_VOTING_NAME,
- SIGNERS_VOTING_FUNCTION_NAME,
- &[
- clarity::vm::Value::UInt(i as u128),
- aggregate_public_key.clone(),
- clarity::vm::Value::UInt(0),
- clarity::vm::Value::UInt(reward_cycle as u128 + 1),
- ],
- );
- submit_tx(&http_origin, &voting_tx);
- }
+ // If we are self-signing, then we need to vote on the aggregate public key
+ let http_origin = format!("http://{}", &naka_conf.node.rpc_bind);
+
+ // Get the aggregate key
+ let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1);
+ let aggregate_public_key =
+ clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec())
+ .expect("Failed to serialize aggregate public key");
+
+ for (i, signer_sk) in signer_sks.iter().enumerate() {
+ let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce;
+
+ // Vote on the aggregate public key
+ let voting_tx = tests::make_contract_call(
+ &signer_sk,
+ signer_nonce,
+ 300,
+ &StacksAddress::burn_address(false),
+ SIGNERS_VOTING_NAME,
+ "vote-for-aggregate-public-key",
+ &[
+ clarity::vm::Value::UInt(i as u128),
+ aggregate_public_key.clone(),
+ clarity::vm::Value::UInt(0),
+ clarity::vm::Value::UInt(reward_cycle as u128 + 1),
+ ],
+ );
+ submit_tx(&http_origin, &voting_tx);
}
}
}
@@ -587,7 +754,7 @@ fn signer_vote_if_needed(
/// * `signer_pks` - must be the same size as `stacker_sks`
pub fn boot_to_epoch_3_reward_set(
naka_conf: &Config,
- blocks_processed: &RunLoopCounter,
+ blocks_processed: &Arc,
stacker_sks: &[StacksPrivateKey],
signer_sks: &[StacksPrivateKey],
btc_regtest_controller: &mut BitcoinRegtestController,
@@ -695,6 +862,7 @@ fn simple_neon_integration() {
return;
}
+ let signers = TestSigners::default();
let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None);
let prom_bind = format!("{}:{}", "127.0.0.1", 6000);
naka_conf.node.prometheus_bind = Some(prom_bind.clone());
@@ -737,6 +905,7 @@ fn simple_neon_integration() {
blocks_processed,
naka_submitted_vrfs: vrfs_submitted,
naka_submitted_commits: commits_submitted,
+ naka_proposed_blocks: proposals_submitted,
..
} = run_loop.counters();
@@ -749,6 +918,7 @@ fn simple_neon_integration() {
&blocks_processed,
&[stacker_sk],
&[sender_signer_sk],
+ Some(&signers),
&mut btc_regtest_controller,
);
@@ -786,6 +956,8 @@ fn simple_neon_integration() {
}
info!("Nakamoto miner started...");
+ blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted);
+
// first block wakes up the run loop, wait until a key registration has been submitted.
next_block_and(&mut btc_regtest_controller, 60, || {
let vrf_count = vrfs_submitted.load(Ordering::SeqCst);
@@ -810,7 +982,12 @@ fn simple_neon_integration() {
)
.unwrap();
- signer_vote_if_needed(&btc_regtest_controller, &naka_conf, &[sender_signer_sk]);
+ signer_vote_if_needed(
+ &btc_regtest_controller,
+ &naka_conf,
+ &[sender_signer_sk],
+ &signers,
+ );
}
// Submit a TX
@@ -847,7 +1024,12 @@ fn simple_neon_integration() {
)
.unwrap();
- signer_vote_if_needed(&btc_regtest_controller, &naka_conf, &[sender_signer_sk]);
+ signer_vote_if_needed(
+ &btc_regtest_controller,
+ &naka_conf,
+ &[sender_signer_sk],
+ &signers,
+ );
}
// load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3
@@ -919,6 +1101,7 @@ fn mine_multiple_per_tenure_integration() {
return;
}
+ let signers = TestSigners::default();
let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None);
let http_origin = format!("http://{}", &naka_conf.node.rpc_bind);
naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1);
@@ -963,6 +1146,7 @@ fn mine_multiple_per_tenure_integration() {
blocks_processed,
naka_submitted_vrfs: vrfs_submitted,
naka_submitted_commits: commits_submitted,
+ naka_proposed_blocks: proposals_submitted,
..
} = run_loop.counters();
@@ -978,6 +1162,7 @@ fn mine_multiple_per_tenure_integration() {
&blocks_processed,
&[stacker_sk],
&[sender_signer_sk],
+ Some(&signers),
&mut btc_regtest_controller,
);
@@ -1000,6 +1185,8 @@ fn mine_multiple_per_tenure_integration() {
.stacks_block_height;
info!("Nakamoto miner started...");
+ blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted);
+
// first block wakes up the run loop, wait until a key registration has been submitted.
next_block_and(&mut btc_regtest_controller, 60, || {
let vrf_count = vrfs_submitted.load(Ordering::SeqCst);
@@ -1096,6 +1283,7 @@ fn correct_burn_outs() {
return;
}
+ let signers = TestSigners::default();
let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None);
naka_conf.burnchain.pox_reward_length = Some(10);
naka_conf.burnchain.pox_prepare_length = Some(3);
@@ -1152,6 +1340,7 @@ fn correct_burn_outs() {
blocks_processed,
naka_submitted_vrfs: vrfs_submitted,
naka_submitted_commits: commits_submitted,
+ naka_proposed_blocks: proposals_submitted,
..
} = run_loop.counters();
@@ -1283,7 +1472,12 @@ fn correct_burn_outs() {
&naka_conf,
);
- signer_vote_if_needed(&btc_regtest_controller, &naka_conf, &[sender_signer_sk]);
+ signer_vote_if_needed(
+ &btc_regtest_controller,
+ &naka_conf,
+ &[sender_signer_sk],
+ &signers,
+ );
run_until_burnchain_height(
&mut btc_regtest_controller,
@@ -1293,6 +1487,7 @@ fn correct_burn_outs() {
);
info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop");
+ blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted);
// we should already be able to query the stacker set via RPC
let burnchain = naka_conf.get_burnchain();
@@ -1354,7 +1549,12 @@ fn correct_burn_outs() {
"The new burnchain tip must have been processed"
);
- signer_vote_if_needed(&btc_regtest_controller, &naka_conf, &[sender_signer_sk]);
+ signer_vote_if_needed(
+ &btc_regtest_controller,
+ &naka_conf,
+ &[sender_signer_sk],
+ &signers,
+ );
}
coord_channel
@@ -1402,6 +1602,7 @@ fn block_proposal_api_endpoint() {
return;
}
+ let signers = TestSigners::default();
let (mut conf, _miner_account) = naka_neon_integration_conf(None);
let password = "12345".to_string();
conf.connection_options.block_proposal_token = Some(password.clone());
@@ -1435,6 +1636,7 @@ fn block_proposal_api_endpoint() {
blocks_processed,
naka_submitted_vrfs: vrfs_submitted,
naka_submitted_commits: commits_submitted,
+ naka_proposed_blocks: proposals_submitted,
..
} = run_loop.counters();
@@ -1447,10 +1649,12 @@ fn block_proposal_api_endpoint() {
&blocks_processed,
&[stacker_sk],
&[sender_signer_sk],
+ Some(&signers),
&mut btc_regtest_controller,
);
info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner");
+ blind_signer(&conf, &signers, &sender_signer_sk, proposals_submitted);
let burnchain = conf.get_burnchain();
let sortdb = burnchain.open_sortition_db(true).unwrap();
@@ -1498,9 +1702,6 @@ fn block_proposal_api_endpoint() {
// TODO (hack) instantiate the sortdb in the burnchain
_ = btc_regtest_controller.sortdb_mut();
- // Set up test signer
- let signer = conf.miner.self_signing_key.as_mut().unwrap();
-
// ----- Setup boilerplate finished, test block proposal API endpoint -----
let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)
@@ -1529,19 +1730,13 @@ fn block_proposal_api_endpoint() {
_ => None,
});
- // Apply both miner/stacker signatures
- let mut sign = |mut p: NakamotoBlockProposal| {
+ // Apply miner signature
+ let sign = |p: &NakamotoBlockProposal| {
+ let mut p = p.clone();
p.block
.header
.sign_miner(&privk)
.expect("Miner failed to sign");
- let burn_height = burnchain
- .get_highest_burnchain_block()
- .unwrap()
- .unwrap()
- .block_height;
- let cycle = burnchain.block_height_to_reward_cycle(burn_height).unwrap();
- signer.sign_nakamoto_block(&mut p.block, cycle);
p
};
@@ -1600,15 +1795,15 @@ fn block_proposal_api_endpoint() {
let test_cases = [
(
"Valid Nakamoto block proposal",
- sign(proposal.clone()),
+ sign(&proposal),
HTTP_ACCEPTED,
Some(Ok(())),
),
- ("Must wait", sign(proposal.clone()), HTTP_TOO_MANY, None),
+ ("Must wait", sign(&proposal), HTTP_TOO_MANY, None),
(
"Corrupted (bit flipped after signing)",
(|| {
- let mut sp = sign(proposal.clone());
+ let mut sp = sign(&proposal);
sp.block.header.consensus_hash.0[3] ^= 0x07;
sp
})(),
@@ -1620,7 +1815,7 @@ fn block_proposal_api_endpoint() {
(|| {
let mut p = proposal.clone();
p.chain_id ^= 0xFFFFFFFF;
- sign(p)
+ sign(&p)
})(),
HTTP_ACCEPTED,
Some(Err(ValidateRejectCode::InvalidBlock)),
@@ -1628,19 +1823,14 @@ fn block_proposal_api_endpoint() {
(
"Invalid `miner_signature`",
(|| {
- let mut sp = sign(proposal.clone());
+ let mut sp = sign(&proposal);
sp.block.header.miner_signature.0[1] ^= 0x80;
sp
})(),
HTTP_ACCEPTED,
Some(Err(ValidateRejectCode::ChainstateError)),
),
- (
- "Not authorized",
- sign(proposal.clone()),
- HTTP_NOT_AUTHORIZED,
- None,
- ),
+ ("Not authorized", sign(&proposal), HTTP_NOT_AUTHORIZED, None),
];
// Build HTTP client
@@ -1773,6 +1963,7 @@ fn miner_writes_proposed_block_to_stackerdb() {
return;
}
+ let signers = TestSigners::default();
let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None);
naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000);
let sender_sk = Secp256k1PrivateKey::new();
@@ -1813,6 +2004,7 @@ fn miner_writes_proposed_block_to_stackerdb() {
blocks_processed,
naka_submitted_vrfs: vrfs_submitted,
naka_submitted_commits: commits_submitted,
+ naka_proposed_blocks: proposals_submitted,
..
} = run_loop.counters();
@@ -1825,10 +2017,12 @@ fn miner_writes_proposed_block_to_stackerdb() {
&blocks_processed,
&[stacker_sk],
&[sender_signer_sk],
+ Some(&signers),
&mut btc_regtest_controller,
);
info!("Nakamoto miner started...");
+ blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted);
// first block wakes up the run loop, wait until a key registration has been submitted.
next_block_and(&mut btc_regtest_controller, 60, || {
let vrf_count = vrfs_submitted.load(Ordering::SeqCst);
@@ -1860,21 +2054,15 @@ fn miner_writes_proposed_block_to_stackerdb() {
.expect("Unable to get miner slot")
.expect("No miner slot exists");
- let chunk = std::thread::spawn(move || {
+ let proposed_block: NakamotoBlock = {
let miner_contract_id = boot_code_id(MINERS_NAME, false);
let mut miners_stackerdb =
StackerDBSession::new(&naka_conf.node.rpc_bind, miner_contract_id);
miners_stackerdb
- .get_latest_chunk(slot_id)
+ .get_latest(slot_id)
.expect("Failed to get latest chunk from the miner slot ID")
.expect("No chunk found")
- })
- .join()
- .expect("Failed to join chunk handle");
-
- // We should now successfully deserialize a chunk
- let proposed_block = NakamotoBlock::consensus_deserialize(&mut &chunk[..])
- .expect("Failed to deserialize chunk into block");
+ };
let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash());
let mut proposed_zero_block = proposed_block.clone();
diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs
index e0c2d27da9..54e851be9f 100644
--- a/testnet/stacks-node/src/tests/signer.rs
+++ b/testnet/stacks-node/src/tests/signer.rs
@@ -104,7 +104,6 @@ impl SignerTest {
.collect::>();
let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None);
- naka_conf.miner.self_signing_key = None;
// So the combination is... one, two, three, four, five? That's the stupidest combination I've ever heard in my life!
// That's the kind of thing an idiot would have on his luggage!
let password = "12345";
@@ -891,9 +890,9 @@ fn setup_stx_btc_node(
btc_regtest_controller,
run_loop_thread,
run_loop_stopper,
- vrfs_submitted,
- commits_submitted,
- blocks_processed,
+ vrfs_submitted: vrfs_submitted.0,
+ commits_submitted: commits_submitted.0,
+ blocks_processed: blocks_processed.0,
coord_channel,
conf: naka_conf,
}