From 58019925e47fb3434f5baf9650563e113bd981ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 17 Jan 2024 16:16:42 -0500 Subject: [PATCH 1/2] Merge master to develop --- .github/workflows/bitcoin-tests.yml | 1 + CHANGELOG.md | 21 + CODE_OF_CONDUCT.md | 3 + Cargo.lock | 8 +- README.md | 2 +- clarity/src/vm/docs/mod.rs | 2 +- .../get_unconfirmed_block_commmits.py | 134 ++ stackslib/src/burnchains/burnchain.rs | 4 +- stackslib/src/chainstate/stacks/db/blocks.rs | 22 + stackslib/src/chainstate/stacks/miner.rs | 1 + stackslib/src/chainstate/stacks/mod.rs | 2 +- .../stacks/tests/block_construction.rs | 1 - stackslib/src/core/mempool.rs | 115 +- stackslib/src/core/tests/mod.rs | 174 ++- stackslib/src/cost_estimates/fee_scalar.rs | 24 +- stackslib/src/main.rs | 1 - stackslib/src/net/httpcore.rs | 19 +- stackslib/src/net/rpc.rs | 14 +- testnet/stacks-node/Cargo.toml | 4 +- .../burnchains/bitcoin_regtest_controller.rs | 84 +- testnet/stacks-node/src/chain_data.rs | 1088 +++++++++++++++++ testnet/stacks-node/src/config.rs | 184 ++- testnet/stacks-node/src/main.rs | 252 +++- testnet/stacks-node/src/neon_node.rs | 1038 ++++++++++++++-- testnet/stacks-node/src/run_loop/mod.rs | 2 +- testnet/stacks-node/src/run_loop/neon.rs | 19 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 1 - testnet/stacks-node/src/tests/epoch_205.rs | 1 - testnet/stacks-node/src/tests/epoch_21.rs | 2 - testnet/stacks-node/src/tests/epoch_22.rs | 2 - testnet/stacks-node/src/tests/epoch_23.rs | 1 - testnet/stacks-node/src/tests/epoch_24.rs | 2 - testnet/stacks-node/src/tests/integrations.rs | 1 - testnet/stacks-node/src/tests/mod.rs | 350 +++++- .../src/tests/neon_integrations.rs | 356 +++++- 35 files changed, 3689 insertions(+), 246 deletions(-) create mode 100755 contrib/miner-queries/get_unconfirmed_block_commmits.py create mode 100644 testnet/stacks-node/src/chain_data.rs diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4acac1c8a0..babcbfda46 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -68,6 +68,7 @@ jobs: - tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored - tests::neon_integrations::test_problematic_txs_are_not_stored - tests::neon_integrations::use_latest_tip_integration_test + - tests::neon_integrations::min_txs - tests::should_succeed_handling_malformed_and_valid_txs steps: ## Setup test environment diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ceb41364a..fc21d8eac5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.4.0.0.5] + +This introduces a set of improvements to the Stacks miner behavior. In +particular: +* The VRF public key can be re-used across node restarts. +* Settings that affect mining are hot-reloaded from the config file. They take + effect once the file is updated; there is no longer a need to restart the +node. +* The act of changing the miner settings in the config file automatically + triggers a subsequent block-build attempt, allowing the operator to force the +miner to re-try building blocks. +* This adds a new tip-selection algorithm that minimizes block orphans within a + configurable window of time. +* When configured, the node will automatically stop mining if it is not achieving a + targeted win rate over a configurable window of blocks. +* When configured, the node will selectively mine transactions from only certain + addresses, or only of certain types (STX-transfers, contract-publishes, +contract-calls). +* When configured, the node will optionally only RBF block-commits if it can + produce a block with strictly more transactions. + ## [2.4.0.0.4] This is a high-priority hotfix that addresses a bug in transaction processing which diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 6d6e5053dd..81f2ed3cc0 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -71,6 +71,9 @@ Community leaders will follow these Community Impact Guidelines in determining t **Consequence**: A permanent ban from any sort of public interaction within the community. +### Secret Code: +The code to the contest is: BITCOINL2 + ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, diff --git a/Cargo.lock b/Cargo.lock index 58da7992d4..f7e17419fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1933,9 +1933,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libflate" @@ -2430,9 +2430,9 @@ dependencies = [ [[package]] name = "pico-args" -version = "0.3.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9b4df73455c861d7cbf8be42f01d3b373ed7f02e378d55fa84eafc6f638b1" +checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" diff --git a/README.md b/README.md index 2f1be08873..e61829ff30 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ You can observe the state machine in action locally by running: ```bash $ cd testnet/stacks-node -$ cargo run --bin stacks-node -- start --config=./conf/testnet-follower-conf.toml +$ cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml ``` _On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index df117a7bca..034616c0f7 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2001,7 +2001,7 @@ const DEFINE_TRAIT_API: DefineAPI = DefineAPI { can implement a given trait and then have their contract identifier being passed as a function argument in order to be called dynamically with `contract-call?`. -Traits are defined with a name, and a list functions, defined with a name, a list of argument types, and return type. +Traits are defined with a name, and a list of functions, where each function is defined with a name, a list of argument types, and a return type. In Clarity 1, a trait type can be used to specify the type of a function parameter. A parameter with a trait type can be used as the target of a dynamic `contract-call?`. A principal literal (e.g. `ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM.foo`) diff --git a/contrib/miner-queries/get_unconfirmed_block_commmits.py b/contrib/miner-queries/get_unconfirmed_block_commmits.py new file mode 100755 index 0000000000..c5cee38123 --- /dev/null +++ b/contrib/miner-queries/get_unconfirmed_block_commmits.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +""" +Usage: +This script is designed to be run from the command line. It takes one or more Bitcoin addresses +and outputs the extracted block commit data for these addresses. + +Example command line usage: +python3 get_unconfirmed_block_commits.py [btcAddress1] [btcAddress2] ... +""" + +import requests +import json +import sys + +def read_api_endpoint(url): + """ + Reads data from the specified API endpoint and returns the response. + + Args: + url (str): The API endpoint URL. + + Returns: + dict: JSON response from the API if successful, otherwise None. + """ + try: + response = requests.get(url) + response.raise_for_status() # Raise an exception for non-200 status codes + return response.json() # Assuming a JSON response + except requests.exceptions.RequestException as e: + return None + +def is_block_commit(txn): + """ + Determines whether a given transaction is a block commit. + + Args: + txn (dict): The transaction data. + + Returns: + bool: True if the transaction is a block commit, otherwise False. + """ + try: + vout = txn['vout'] + + # Verify the number of recipients. + assert(3 <= len(vout) <= 4) + block_commit_txn = vout[0] + to_stacker_txns = vout[1::2] + + # Verify block commit. + # TODO: Add more verification steps if necessary. + assert(block_commit_txn['scriptpubkey_type'] == "op_return") + + # Verify PoX Payouts. + for to_stacker_txn in to_stacker_txns: + # TODO: Add more verification steps if necessary. + assert(to_stacker_txn['scriptpubkey_type'] != "op_return") + + except (Exception, AssertionError): + return False + return True + +MEMPOOL_TXN_API = "https://mempool.space/api/address/{btcAddress}/txs/mempool" +def unconfirmed_block_commit_from_address(btcAddress): + """ + Fetches the first unconfirmed block commit for a given Bitcoin address. + + Args: + btcAddress (str): Bitcoin address. + + Returns: + dict: The first transaction that is a block commit. + """ + url = MEMPOOL_TXN_API.format(btcAddress=btcAddress) + txns = read_api_endpoint(url) + + # Return only the first block commit transaction. This is good enough for now. + for txn in txns: + if is_block_commit(txn): + return txn + +def extracted_block_commit_data(txn): + """ + Extracts data from a block commit transaction. + + Args: + txn (dict): Block commit transaction. + + Returns: + dict: Extracted data from the transaction, or None if extraction fails. + """ + try: + vout_start = 1 + vout_end = len(txn['vout']) - 1 + spent_utxo = txn['vin'][0] + return { + 'txid': txn['txid'], + 'burn': sum(pox_payout['value'] for pox_payout in txn['vout'][vout_start:vout_end]), + 'address': spent_utxo['prevout']['scriptpubkey_address'], + 'pox_addrs': [txn['vout'][i]['scriptpubkey'] for i in range(vout_start,vout_end)], + 'input_txid': spent_utxo['txid'], + 'input_index': spent_utxo['vout'], + } + except Exception as e: + return None + +def block_commit_data(btcAddresses): + """ + Fetches and extracts block commit data for a list of Bitcoin addresses. + + Args: + btcAddresses (list): List of Bitcoin addresses. + + Returns: + list: Extracted block commit data for each address. + """ + return [extracted_block_commit_data(unconfirmed_block_commit_from_address(btcAddress)) \ + for btcAddress in btcAddresses] + +def main(): + """ + Main function to run the script. Takes command line arguments as Bitcoin addresses. + """ + btc_addresses = sys.argv[1:] + if not btc_addresses: + print("No Bitcoin addresses provided. Please provide at least one address.") + return + + # Return the data by printing it to stdout. + data = block_commit_data(btc_addresses) + print(json.dumps([datum for datum in data if datum is not None], indent=1)) + +if __name__ == "__main__": + main() diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 4ba47f804e..babc3da537 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -297,7 +297,7 @@ impl BurnchainStateTransition { } impl BurnchainSigner { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn mock_parts( hash_mode: AddressHashMode, num_sigs: usize, @@ -311,7 +311,7 @@ impl BurnchainSigner { BurnchainSigner(repr) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_p2pkh(pubk: &StacksPublicKey) -> BurnchainSigner { BurnchainSigner::mock_parts(AddressHashMode::SerializeP2PKH, 1, vec![pubk.clone()]) } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 25dcdc9f33..1bd8188815 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -6307,6 +6307,28 @@ impl StacksChainState { query_row(&self.db(), sql, args).map_err(Error::DBError) } + /// Get all possible canonical chain tips + pub fn get_stacks_chain_tips(&self, sortdb: &SortitionDB) -> Result, Error> { + let (consensus_hash, block_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; + let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; + let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; + let Some(staging_block): Option = + query_row(&self.db(), sql, args).map_err(Error::DBError)? + else { + return Ok(vec![]); + }; + self.get_stacks_chain_tips_at_height(staging_block.height) + } + + /// Get all Stacks blocks at a given height + pub fn get_stacks_chain_tips_at_height(&self, height: u64) -> Result, Error> { + let sql = + "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND height = ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + query_rows(&self.db(), sql, args).map_err(Error::DBError) + } + /// Get the parent block of `staging_block`. pub fn get_stacks_block_parent( &self, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index de58760ce6..534f81f725 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -105,6 +105,7 @@ impl MinerStatus { pub fn get_spend_amount(&self) -> u64 { return self.spend_amount; } + pub fn set_spend_amount(&mut self, amt: u64) { self.spend_amount = amt; } diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f7f1243d9c..d0e18721b5 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -74,7 +74,7 @@ pub use stacks_common::address::{ }; pub use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; -pub const STACKS_BLOCK_VERSION: u8 = 6; +pub const STACKS_BLOCK_VERSION: u8 = 7; pub const STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE: u8 = 1; pub const MAX_BLOCK_LEN: u32 = 2 * 1024 * 1024; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 24de63a676..c81a57b098 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -4697,7 +4697,6 @@ fn paramaterized_mempool_walk_test( let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; let mut tx_events = Vec::new(); let txs = codec_all_transactions( diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 24ef7e5485..0146065e63 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -22,6 +22,7 @@ use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::time::Instant; use std::{fs, io}; +use std::str::FromStr; use clarity::vm::types::PrincipalData; use rand::distributions::Uniform; @@ -431,10 +432,51 @@ impl MemPoolTxMetadata { } } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum MemPoolWalkTxTypes { + TokenTransfer, + SmartContract, + ContractCall, +} + +impl FromStr for MemPoolWalkTxTypes { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "TokenTransfer" => { + return Ok(Self::TokenTransfer); + } + "SmartContract" => { + return Ok(Self::SmartContract); + } + "ContractCall" => { + return Ok(Self::ContractCall); + } + _ => { + return Err("Unknown mempool tx walk type"); + } + } + } +} + +impl MemPoolWalkTxTypes { + pub fn all() -> HashSet { + [ + MemPoolWalkTxTypes::TokenTransfer, + MemPoolWalkTxTypes::SmartContract, + MemPoolWalkTxTypes::ContractCall, + ] + .into_iter() + .collect() + } + + pub fn only(selected: &[MemPoolWalkTxTypes]) -> HashSet { + selected.iter().map(|x| x.clone()).collect() + } +} + #[derive(Debug, Clone)] pub struct MemPoolWalkSettings { - /// Minimum transaction fee that will be considered - pub min_tx_fee: u64, /// Maximum amount of time a miner will spend walking through mempool transactions, in /// milliseconds. This is a soft deadline. pub max_walk_time_ms: u64, @@ -447,25 +489,43 @@ pub struct MemPoolWalkSettings { /// Size of the candidate cache. These are the candidates that will be retried after each /// transaction is mined. pub candidate_retry_cache_size: u64, + /// Types of transactions we'll consider + pub txs_to_consider: HashSet, + /// Origins for transactions that we'll consider + pub filter_origins: HashSet, } impl MemPoolWalkSettings { pub fn default() -> MemPoolWalkSettings { MemPoolWalkSettings { - min_tx_fee: 1, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, candidate_retry_cache_size: 64 * 1024, + txs_to_consider: [ + MemPoolWalkTxTypes::TokenTransfer, + MemPoolWalkTxTypes::SmartContract, + MemPoolWalkTxTypes::ContractCall, + ] + .into_iter() + .collect(), + filter_origins: HashSet::new(), } } pub fn zero() -> MemPoolWalkSettings { MemPoolWalkSettings { - min_tx_fee: 0, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, candidate_retry_cache_size: 64 * 1024, + txs_to_consider: [ + MemPoolWalkTxTypes::TokenTransfer, + MemPoolWalkTxTypes::SmartContract, + MemPoolWalkTxTypes::ContractCall, + ] + .into_iter() + .collect(), + filter_origins: HashSet::new(), } } } @@ -837,8 +897,8 @@ impl<'a> MemPoolTx<'a> { let evict_txid = { let num_recents = MemPoolDB::get_num_recent_txs(&dbtx)?; if num_recents >= MAX_BLOOM_COUNTER_TXS.into() { - // for now, remove lowest-fee tx in the recent tx set. - // TODO: In the future, do it by lowest fee rate + // remove lowest-fee tx (they're paying the least, so replication is + // deprioritized) let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height > ?1 ORDER BY a.tx_fee ASC LIMIT 1"; let args: &[&dyn ToSql] = &[&u64_to_sql( height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), @@ -1693,6 +1753,49 @@ impl MemPoolDB { } }; + let (tx_type, do_consider) = match &tx_info.tx.payload { + TransactionPayload::TokenTransfer(..) => ( + "TokenTransfer".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::TokenTransfer), + ), + TransactionPayload::SmartContract(..) => ( + "SmartContract".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::SmartContract), + ), + TransactionPayload::ContractCall(..) => ( + "ContractCall".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::ContractCall), + ), + _ => ("".to_string(), true), + }; + if !do_consider { + debug!("Will skip mempool tx, since it does not have an acceptable type"; + "txid" => %tx_info.tx.txid(), + "type" => %tx_type); + continue; + } + + let do_consider = if settings.filter_origins.len() > 0 { + settings + .filter_origins + .contains(&tx_info.metadata.origin_address) + } else { + true + }; + + if !do_consider { + debug!("Will skip mempool tx, since it does not have an allowed origin"; + "txid" => %tx_info.tx.txid(), + "origin" => %tx_info.metadata.origin_address); + continue; + } + let consider = ConsiderTransaction { tx: tx_info, update_estimate, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 8902ff4cb8..cfa950f1f5 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -58,7 +58,7 @@ use crate::chainstate::stacks::{ C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::core::mempool::{ - db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, TxTag, BLOOM_COUNTER_DEPTH, + db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; @@ -259,8 +259,7 @@ fn mempool_walk_over_fork() { // try to walk at b_4, we should be able to find // the transaction at b_1 - let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; + let mempool_settings = MemPoolWalkSettings::default(); let mut tx_events = Vec::new(); chainstate.with_read_only_clarity_tx( &TEST_BURN_STATE_DB, @@ -595,7 +594,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; let mut tx_events = Vec::new(); let mut txs = codec_all_transactions( @@ -790,8 +788,7 @@ fn test_iterate_candidates_skipped_transaction() { ); let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); - let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; + let mempool_settings = MemPoolWalkSettings::default(); let mut tx_events = Vec::new(); let mut txs = codec_all_transactions( @@ -903,8 +900,7 @@ fn test_iterate_candidates_processing_error_transaction() { ); let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); - let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; + let mempool_settings = MemPoolWalkSettings::default(); let mut tx_events = Vec::new(); let mut txs = codec_all_transactions( @@ -1018,8 +1014,7 @@ fn test_iterate_candidates_problematic_transaction() { ); let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); - let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; + let mempool_settings = MemPoolWalkSettings::default(); let mut tx_events = Vec::new(); let mut txs = codec_all_transactions( @@ -1134,7 +1129,6 @@ fn test_iterate_candidates_concurrent_write_lock() { let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); let mut mempool_settings = MemPoolWalkSettings::default(); - mempool_settings.min_tx_fee = 10; let mut tx_events = Vec::new(); let mut txs = codec_all_transactions( @@ -2648,3 +2642,161 @@ fn test_drop_and_blacklist_txs_by_size() { assert_eq!(num_blacklisted, 5); } + +#[test] +fn test_filter_txs_by_type() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let addr = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let mut txs = vec![]; + let block_height = 10; + let mut total_len = 0; + + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + + let mut mempool_tx = mempool.tx_begin().unwrap(); + for i in 0..10 { + let pk = StacksPrivateKey::new(); + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + total_len += tx_bytes.len(); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_2.0, + &b_2.1, + txid.clone(), + tx_bytes, + tx_fee, + block_height as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + txs.push(tx); + } + mempool_tx.commit().unwrap(); + + let mut mempool_settings = MemPoolWalkSettings::default(); + let mut tx_events = Vec::new(); + mempool_settings.txs_to_consider = [ + MemPoolWalkTxTypes::SmartContract, + MemPoolWalkTxTypes::ContractCall, + ] + .into_iter() + .collect(); + + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + 2, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + available_tx.tx.metadata.tx_fee, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::zero(), + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap(); + assert_eq!(count_txs, 0); + }, + ); + + mempool_settings.txs_to_consider = [MemPoolWalkTxTypes::TokenTransfer].into_iter().collect(); + + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + 2, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + available_tx.tx.metadata.tx_fee, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::zero(), + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap(); + assert_eq!(count_txs, 10); + }, + ); +} diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index 14c4471458..b7fc814ff3 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -12,6 +12,10 @@ use serde_json::Value as JsonValue; use super::metrics::CostMetric; use super::{EstimatorError, FeeEstimator, FeeRateEstimate}; + +use clarity::vm::database::ClaritySerializable; +use clarity::vm::database::STXBalance; + use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::TransactionPayload; @@ -163,7 +167,25 @@ impl FeeEstimator for ScalarFeeRateEstimator { let scalar_cost = match payload { TransactionPayload::TokenTransfer(_, _, _) => { // TokenTransfers *only* contribute tx_len, and just have an empty ExecutionCost. - self.metric.from_len(tx_size) + let stx_balance_len = STXBalance::LockedPoxThree { + amount_unlocked: 1, + amount_locked: 1, + unlock_height: 1, + } + .serialize() + .as_bytes() + .len() as u64; + self.metric.from_cost_and_len( + &ExecutionCost { + write_length: stx_balance_len, + write_count: 1, + read_length: 2 * stx_balance_len, + read_count: 2, + runtime: 4640, // taken from .costs-3 + }, + &block_limit, + tx_size, + ) } TransactionPayload::Coinbase(..) => { // Coinbase txs are "free", so they don't factor into the fee market. diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index c70e5c2e7a..03d2d2edfa 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -771,7 +771,6 @@ simulating a miner. let mut settings = BlockBuilderSettings::limited(); settings.max_miner_time_ms = max_time; - settings.mempool_settings.min_tx_fee = min_fee; let result = StacksBlockBuilder::build_anchored_block( &chain_state, diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 8b4c11bb07..017a151af6 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -33,6 +33,7 @@ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use stacks_common::util::chunked_encoding::*; use stacks_common::util::retry::{BoundReader, RetryReader}; +use stacks_common::util::get_epoch_time_ms; use url::Url; use crate::burnchains::Txid; @@ -435,11 +436,12 @@ pub trait RPCRequestHandler: HttpRequest + HttpResponse + RPCRequestHandlerClone pub struct StacksHttpRequest { preamble: HttpRequestPreamble, contents: HttpRequestContents, + start_time: u128 } impl StacksHttpRequest { pub fn new(preamble: HttpRequestPreamble, contents: HttpRequestContents) -> Self { - Self { preamble, contents } + Self { preamble, contents, start_time: get_epoch_time_ms() } } /// Instantiate a request to a remote Stacks peer @@ -470,7 +472,7 @@ impl StacksHttpRequest { preamble.path_and_query_str = decoded_path; } - Ok(Self { preamble, contents }) + Ok(Self { preamble, contents, start_time: get_epoch_time_ms() }) } /// Get a reference to the request premable metadata @@ -493,6 +495,17 @@ impl StacksHttpRequest { &self.preamble.path_and_query_str } + /// Get the HTTP verb for this request + pub fn verb(&self) -> &str { + &self.preamble.verb + } + + /// Get the number of milliseconds elapsed since this request was created + pub fn duration_ms(&self) -> u128 { + let now = get_epoch_time_ms(); + now.saturating_sub(self.start_time) + } + /// Write out this message to a Write. /// NOTE: In practice, the Write will be a reply handle endpoint, so writing to it won't block. pub fn send(&self, fd: &mut W) -> Result<(), NetError> { @@ -982,7 +995,7 @@ impl StacksHttp { } }; - info!("Handle StacksHttpRequest"; "verb" => %verb, "peer_addr" => %self.peer_addr, "path" => %decoded_path, "query" => %query); + debug!("Handle StacksHttpRequest"; "verb" => %verb, "peer_addr" => %self.peer_addr, "path" => %decoded_path, "query" => %query); let request = StacksHttpRequest::new(preamble.clone(), payload); return Ok(request); } diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index f66e26a71a..49f0aa2479 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -546,13 +546,21 @@ impl ConversationHttp { // new request that we can handle self.total_request_count += 1; self.last_request_timestamp = get_epoch_time_secs(); + let latency = req.duration_ms(); let start_time = Instant::now(); - let path = req.request_path().to_string(); + let verb = req.verb().to_string(); + let request_path = req.request_path().to_string(); let msg_opt = monitoring::instrument_http_request_handler(req, |req| { self.handle_request(req, node) })?; - debug!("Processed HTTPRequest"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); + info!("Handled StacksHTTPRequest"; + "verb" => %verb, + "path" => %request_path, + "processing_time_ms" => start_time.elapsed().as_millis(), + "latency_ms" => latency, + "conn_id" => self.conn_id, + "peer_addr" => &self.peer_addr); if let Some(msg) = msg_opt { ret.push(msg); @@ -565,7 +573,7 @@ impl ConversationHttp { let start_time = Instant::now(); self.reply_error(resp)?; - debug!("Processed HTTPRequest Error"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); + info!("Handled StacksHTTPRequest Error"; "path" => %path, "processing_time_ms" => start_time.elapsed().as_millis(), "conn_id" => self.conn_id, "peer_addr" => &self.peer_addr); } StacksHttpMessage::Response(resp) => { // Is there someone else waiting for this message? If so, pass it along. diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 82ed994ee4..b50198954b 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -8,7 +8,7 @@ rust-version = "1.61" [dependencies] lazy_static = "1.4.0" -pico-args = "0.3.1" +pico-args = "0.5.0" rand = "0.7.3" serde = "1" serde_derive = "1" @@ -21,7 +21,7 @@ async-std = { version = "1.6", features = ["attributes"] } http-types = "2.12" base64 = "0.12.0" backtrace = "0.3.50" -libc = "0.2" +libc = "0.2.151" slog = { version = "2.5.2", features = [ "max_level_trace" ] } clarity = { path = "../../clarity" } stacks-common = { path = "../../stacks-common" } diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 30967f5556..d511603ed5 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -155,41 +155,15 @@ pub fn make_bitcoin_indexer( } pub fn get_satoshis_per_byte(config: &Config) -> u64 { - match config.get_burnchain_config() { - Ok(s) => s.satoshis_per_byte, - Err(_) => { - info!("No config found. Using previous configuration."); - config.burnchain.satoshis_per_byte - } - } + config.get_burnchain_config().satoshis_per_byte } -#[cfg(test)] -mod tests { - use std::env::temp_dir; - use std::fs::File; - use std::io::Write; - - use super::*; - use crate::config::DEFAULT_SATS_PER_VB; - - #[test] - fn test_get_satoshis_per_byte() { - let dir = temp_dir(); - let file_path = dir.as_path().join("config.toml"); - - let mut config = Config::default(); - - let satoshis_per_byte = get_satoshis_per_byte(&config); - assert_eq!(satoshis_per_byte, DEFAULT_SATS_PER_VB); - - let mut file = File::create(&file_path).unwrap(); - writeln!(file, "[burnchain]").unwrap(); - writeln!(file, "satoshis_per_byte = 51").unwrap(); - config.config_path = Some(file_path.to_str().unwrap().to_string()); +pub fn get_rbf_fee_increment(config: &Config) -> u64 { + config.get_burnchain_config().rbf_fee_increment +} - assert_eq!(get_satoshis_per_byte(&config), 51); - } +pub fn get_max_rbf(config: &Config) -> u64 { + config.get_burnchain_config().max_rbf } impl LeaderBlockCommitFees { @@ -201,7 +175,7 @@ impl LeaderBlockCommitFees { let mut fees = LeaderBlockCommitFees::estimated_fees_from_payload(payload, config); fees.spent_in_attempts = cmp::max(1, self.spent_in_attempts); fees.final_size = self.final_size; - fees.fee_rate = self.fee_rate + config.burnchain.rbf_fee_increment; + fees.fee_rate = self.fee_rate + get_rbf_fee_increment(&config); fees.is_rbf_enabled = true; fees } @@ -835,8 +809,8 @@ impl BitcoinRegtestController { let public_key = signer.get_public_key(); // reload the config to find satoshis_per_byte changes - let satoshis_per_byte = get_satoshis_per_byte(&self.config); - let btc_miner_fee = self.config.burnchain.leader_key_tx_estimated_size * satoshis_per_byte; + let btc_miner_fee = self.config.burnchain.leader_key_tx_estimated_size + * get_satoshis_per_byte(&self.config); let budget_for_outputs = DUST_UTXO_LIMIT; let total_required = btc_miner_fee + budget_for_outputs; @@ -864,7 +838,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; - let fee_rate = satoshis_per_byte; + let fee_rate = get_satoshis_per_byte(&self.config); self.finalize_tx( epoch_id, @@ -958,7 +932,6 @@ impl BitcoinRegtestController { ) -> Option { let public_key = signer.get_public_key(); let max_tx_size = 230; - let satoshis_per_byte = get_satoshis_per_byte(&self.config); let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( Transaction { @@ -976,7 +949,7 @@ impl BitcoinRegtestController { self.prepare_tx( epoch_id, &public_key, - DUST_UTXO_LIMIT + max_tx_size * satoshis_per_byte, + DUST_UTXO_LIMIT + max_tx_size * get_satoshis_per_byte(&self.config), None, None, 0, @@ -1004,14 +977,13 @@ impl BitcoinRegtestController { .to_bitcoin_tx_out(DUST_UTXO_LIMIT), ); - let satoshis_per_byte = get_satoshis_per_byte(&self.config); self.finalize_tx( epoch_id, &mut tx, DUST_UTXO_LIMIT, 0, max_tx_size, - satoshis_per_byte, + get_satoshis_per_byte(&self.config), &mut utxos, signer, )?; @@ -1359,11 +1331,11 @@ impl BitcoinRegtestController { // Stop as soon as the fee_rate is ${self.config.burnchain.max_rbf} percent higher, stop RBF if ongoing_op.fees.fee_rate - > (get_satoshis_per_byte(&self.config) * self.config.burnchain.max_rbf / 100) + > (get_satoshis_per_byte(&self.config) * get_max_rbf(&self.config) / 100) { warn!( "RBF'd block commits reached {}% satoshi per byte fee rate, not resubmitting", - self.config.burnchain.max_rbf + get_max_rbf(&self.config) ); self.ongoing_block_commit = Some(ongoing_op); return None; @@ -2546,3 +2518,31 @@ impl BitcoinRPCRequest { Ok(payload) } } + +#[cfg(test)] +mod tests { + use std::env::temp_dir; + use std::fs::File; + use std::io::Write; + + use super::*; + use crate::config::DEFAULT_SATS_PER_VB; + + #[test] + fn test_get_satoshis_per_byte() { + let dir = temp_dir(); + let file_path = dir.as_path().join("config.toml"); + + let mut config = Config::default(); + + let satoshis_per_byte = get_satoshis_per_byte(&config); + assert_eq!(satoshis_per_byte, DEFAULT_SATS_PER_VB); + + let mut file = File::create(&file_path).unwrap(); + writeln!(file, "[burnchain]").unwrap(); + writeln!(file, "satoshis_per_byte = 51").unwrap(); + config.config_path = Some(file_path.to_str().unwrap().to_string()); + + assert_eq!(get_satoshis_per_byte(&config), 51); + } +} diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs new file mode 100644 index 0000000000..587fece9bc --- /dev/null +++ b/testnet/stacks-node/src/chain_data.rs @@ -0,0 +1,1088 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; +use std::process::{Command, Stdio}; + +use stacks::burnchains::bitcoin::address::BitcoinAddress; +use stacks::burnchains::bitcoin::{BitcoinNetworkType, BitcoinTxOutput}; +use stacks::burnchains::{Burnchain, BurnchainSigner, Error as BurnchainError, Txid}; +use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; +use stacks::chainstate::burn::distribution::BurnSamplePoint; +use stacks::chainstate::burn::operations::leader_block_commit::{ + MissedBlockCommit, BURN_BLOCK_MINED_AT_MODULUS, +}; +use stacks::chainstate::burn::operations::LeaderBlockCommitOp; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::core::MINING_COMMITMENT_WINDOW; +use stacks::util_lib::db::Error as DBError; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; +use stacks_common::util::hash::hex_bytes; + +pub struct MinerStats { + pub unconfirmed_commits_helper: String, +} + +/// Unconfirmed block-commit transaction as emitted by our helper +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct UnconfirmedBlockCommit { + /// burnchain signer + address: String, + /// PoX payouts + pox_addrs: Vec, + /// UTXO spent to create this block-commit + input_index: u32, + input_txid: String, + /// transaction ID + txid: String, + /// amount spent + burn: u64, +} + +const DEADBEEF: [u8; 32] = [ + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, +]; + +impl MinerStats { + /// Find the burn distribution for a single sortition's block-commits and missed-commits + fn get_burn_distribution( + sort_handle: &mut SH, + burnchain: &Burnchain, + burn_block_height: u64, + block_commits: Vec, + missed_commits: Vec, + ) -> Result, BurnchainError> { + // assemble the commit windows + let mut windowed_block_commits = vec![block_commits]; + let mut windowed_missed_commits = vec![]; + + if !burnchain.is_in_prepare_phase(burn_block_height) { + // PoX reward-phase is active! + // build a map of intended sortition -> missed commit for the missed commits + // discovered in this block. + let mut missed_commits_map: HashMap<_, Vec<_>> = HashMap::new(); + for missed in missed_commits.iter() { + if let Some(commits_at_sortition) = + missed_commits_map.get_mut(&missed.intended_sortition) + { + commits_at_sortition.push(missed); + } else { + missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + } + } + + for blocks_back in 0..(MINING_COMMITMENT_WINDOW - 1) { + if burn_block_height.saturating_sub(1) < (blocks_back as u64) { + debug!("Mining commitment window shortened because block height is less than window size"; + "block_height" => %burn_block_height.saturating_sub(1), + "window_size" => %MINING_COMMITMENT_WINDOW); + break; + } + let block_height = (burn_block_height.saturating_sub(1)) - (blocks_back as u64); + let sortition_id = match sort_handle.get_block_snapshot_by_height(block_height)? { + Some(sn) => sn.sortition_id, + None => break, + }; + windowed_block_commits.push(SortitionDB::get_block_commits_by_block( + sort_handle.sqlite(), + &sortition_id, + )?); + let mut missed_commits_at_height = SortitionDB::get_missed_commits_by_intended( + sort_handle.sqlite(), + &sortition_id, + )?; + if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { + missed_commits_at_height + .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + } + + windowed_missed_commits.push(missed_commits_at_height); + } + } else { + // PoX reward-phase is not active + debug!( + "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", + burn_block_height; + ); + + assert_eq!(windowed_block_commits.len(), 1); + assert_eq!(windowed_missed_commits.len(), 0); + } + + // reverse vecs so that windows are in ascending block height order + windowed_block_commits.reverse(); + windowed_missed_commits.reverse(); + + // figure out if the PoX sunset finished during the window, + // and/or which sortitions must be PoB due to them falling in a prepare phase. + let window_end_height = burn_block_height; + let window_start_height = window_end_height + 1 - (windowed_block_commits.len() as u64); + let mut burn_blocks = vec![false; windowed_block_commits.len()]; + + // set burn_blocks flags to accomodate prepare phases and PoX sunset + for (i, b) in burn_blocks.iter_mut().enumerate() { + if burnchain.is_in_prepare_phase(window_start_height + (i as u64)) { + // must burn + *b = true; + } else { + // must not burn + *b = false; + } + } + + // not all commits in windowed_block_commits have been confirmed, so make sure that they + // are in the right order + let mut block_height_at_index = None; + for (index, commits) in windowed_block_commits.iter_mut().enumerate() { + let index = index as u64; + for commit in commits.iter_mut() { + if let Some((first_block_height, first_index)) = block_height_at_index { + if commit.block_height != first_block_height + (index - first_index) { + commit.block_height = first_block_height + (index - first_index); + } + } else { + block_height_at_index = Some((commit.block_height, index)); + } + } + } + + // calculate the burn distribution from these operations. + // The resulting distribution will contain the user burns that match block commits + let burn_dist = BurnSamplePoint::make_min_median_distribution( + windowed_block_commits, + windowed_missed_commits, + burn_blocks, + ); + + Ok(burn_dist) + } + + fn fmt_bin_args(bin: &str, args: &[&str]) -> String { + let mut all = Vec::with_capacity(1 + args.len()); + all.push(bin); + for arg in args { + all.push(arg); + } + all.join(" ") + } + + /// Returns (exit code, stdout, stderr) + fn run_subprocess( + bin_fullpath: &str, + args: &[&str], + ) -> Result<(i32, Vec, Vec), String> { + let full_args = Self::fmt_bin_args(bin_fullpath, args); + let mut cmd = Command::new(bin_fullpath); + cmd.stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .args(args); + + debug!("Run: `{:?}`", &cmd); + + let output = cmd + .spawn() + .map_err(|e| format!("Failed to run `{}`: {:?}", &full_args, &e))? + .wait_with_output() + .map_err(|ioe| format!("Failed to run `{}`: {:?}", &full_args, &ioe))?; + + let exit_code = match output.status.code() { + Some(code) => code, + None => { + // failed due to signal + return Err(format!("Failed to run `{}`: killed by signal", &full_args)); + } + }; + + Ok((exit_code, output.stdout, output.stderr)) + } + + /// Get the list of all unconfirmed block-commits. + pub fn get_unconfirmed_commits( + &self, + next_block_height: u64, + all_miners: &[&str], + ) -> Result, String> { + let (exit_code, stdout, _stderr) = + Self::run_subprocess(&self.unconfirmed_commits_helper, &all_miners)?; + if exit_code != 0 { + return Err(format!( + "Failed to run `{}`: exit code {}", + &self.unconfirmed_commits_helper, exit_code + )); + } + + // decode stdout to JSON + let unconfirmed_commits: Vec = serde_json::from_slice(&stdout) + .map_err(|e| { + format!( + "Failed to decode output from `{}`: {:?}. Output was `{}`", + &self.unconfirmed_commits_helper, + &e, + String::from_utf8_lossy(&stdout) + ) + })?; + + let mut unconfirmed_spends = vec![]; + for unconfirmed_commit in unconfirmed_commits.into_iter() { + let Ok(txid) = Txid::from_hex(&unconfirmed_commit.txid) else { + return Err(format!("Not a valid txid: `{}`", &unconfirmed_commit.txid)); + }; + let Ok(input_txid) = Txid::from_hex(&unconfirmed_commit.input_txid) else { + return Err(format!( + "Not a valid txid: `{}`", + &unconfirmed_commit.input_txid + )); + }; + let mut decoded_pox_addrs = vec![]; + for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { + let Ok(pox_addr_bytes) = hex_bytes(&pox_addr_hex) else { + return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); + }; + let Some(bitcoin_addr) = + BitcoinAddress::from_scriptpubkey(BitcoinNetworkType::Mainnet, &pox_addr_bytes) + else { + return Err(format!( + "Not a recognized Bitcoin scriptpubkey: {}", + &pox_addr_hex + )); + }; + let Some(pox_addr) = PoxAddress::try_from_bitcoin_output(&BitcoinTxOutput { + address: bitcoin_addr.clone(), + units: 1, + }) else { + return Err(format!("Not a recognized PoX address: {}", &bitcoin_addr)); + }; + decoded_pox_addrs.push(pox_addr); + } + + // mocked commit + let mocked_commit = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash(DEADBEEF.clone()), + new_seed: VRFSeed(DEADBEEF.clone()), + parent_block_ptr: 1, + parent_vtxindex: 1, + key_block_ptr: 1, + key_vtxindex: 1, + memo: vec![], + commit_outs: decoded_pox_addrs, + burn_fee: unconfirmed_commit.burn, + input: (input_txid, unconfirmed_commit.input_index), + apparent_sender: BurnchainSigner(unconfirmed_commit.address), + txid, + vtxindex: 1, + block_height: next_block_height, + burn_parent_modulus: ((next_block_height.saturating_sub(1)) + % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + }; + + unconfirmed_spends.push(mocked_commit); + } + Ok(unconfirmed_spends) + } + + /// Convert a list of burn sample points into a probability distribution by candidate's + /// apparent sender (e.g. miner address). + pub fn burn_dist_to_prob_dist(burn_dist: &[BurnSamplePoint]) -> HashMap { + if burn_dist.len() == 0 { + return HashMap::new(); + } + if burn_dist.len() == 1 { + let mut ret = HashMap::new(); + ret.insert(burn_dist[0].candidate.apparent_sender.to_string(), 1.0); + return ret; + } + + let mut ret = HashMap::new(); + for pt in burn_dist.iter() { + // take the upper 32 bits + let range_lower_64 = (pt.range_end - pt.range_start) >> 192; + let int_prob = (range_lower_64.low_u64() >> 32) as u32; + + ret.insert( + pt.candidate.apparent_sender.to_string(), + (int_prob as f64) / (u32::MAX as f64), + ); + } + + ret + } + + /// Get the spend distribution and total spend. + /// If the miner has both a confirmed and unconfirmed spend, then take the latter. + pub fn get_spend_distribution( + active_miners_and_commits: &[(String, LeaderBlockCommitOp)], + unconfirmed_block_commits: &[LeaderBlockCommitOp], + expected_pox_addrs: &[PoxAddress], + ) -> (HashMap, u64) { + let unconfirmed_block_commits: Vec<_> = unconfirmed_block_commits + .iter() + .filter(|commit| { + if commit.commit_outs.len() != expected_pox_addrs.len() { + return false; + } + for i in 0..commit.commit_outs.len() { + if commit.commit_outs[i].to_burnchain_repr() + != expected_pox_addrs[i].to_burnchain_repr() + { + info!( + "Skipping invalid unconfirmed block-commit: {:?} != {:?}", + &commit.commit_outs[i].to_burnchain_repr(), + expected_pox_addrs[i].to_burnchain_repr() + ); + return false; + } + } + true + }) + .collect(); + + let mut total_spend = 0; + let mut dist = HashMap::new(); + for commit in unconfirmed_block_commits { + let addr = commit.apparent_sender.to_string(); + dist.insert(addr, commit.burn_fee); + } + + for (_, commit) in active_miners_and_commits.iter() { + let addr = commit.apparent_sender.to_string(); + if dist.contains_key(&addr) { + continue; + } + dist.insert(addr, commit.burn_fee); + } + + for (_, spend) in dist.iter() { + total_spend += *spend; + } + + (dist, total_spend) + } + + /// Get the probability distribution for the Bitcoin block 6+ blocks in the future, assuming + /// all block-commit spends remain the same. + pub fn get_future_win_distribution( + active_miners_and_commits: &[(String, LeaderBlockCommitOp)], + unconfirmed_block_commits: &[LeaderBlockCommitOp], + expected_pox_addrs: &[PoxAddress], + ) -> HashMap { + let (dist, total_spend) = Self::get_spend_distribution( + active_miners_and_commits, + unconfirmed_block_commits, + &expected_pox_addrs, + ); + + let mut probs = HashMap::new(); + for (addr, spend) in dist.into_iter() { + if total_spend == 0 { + probs.insert(addr, 0.0); + } else { + probs.insert(addr, (spend as f64) / (total_spend as f64)); + } + } + probs + } + + /// Get the burn distribution for the _next_ Bitcoin block, assuming that the given list of + /// block-commit data will get mined. For miners that are known to the system but who do not + /// have unconfirmed block-commits, infer that they'll just mine the same block-commit value + /// again. + pub fn get_unconfirmed_burn_distribution( + &self, + burnchain: &Burnchain, + sortdb: &SortitionDB, + active_miners_and_commits: &[(String, LeaderBlockCommitOp)], + unconfirmed_block_commits: Vec, + expected_pox_addrs: &[PoxAddress], + at_block: Option, + ) -> Result, BurnchainError> { + let mut commit_table = HashMap::new(); + for commit in unconfirmed_block_commits.iter() { + commit_table.insert(commit.apparent_sender.to_string(), commit.clone()); + } + + let tip = if let Some(at_block) = at_block { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + let ih = sortdb.index_handle(&tip.sortition_id); + ih.get_block_snapshot_by_height(at_block)? + .ok_or(BurnchainError::MissingParentBlock)? + } else { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())? + }; + + let next_block_height = tip.block_height + 1; + let expected_input_index = if burnchain.is_in_prepare_phase(tip.block_height) { + LeaderBlockCommitOp::expected_chained_utxo(true) + } else { + LeaderBlockCommitOp::expected_chained_utxo(false) + }; + + for (miner, last_commit) in active_miners_and_commits.iter() { + if !commit_table.contains_key(miner) { + let mocked_commit = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash(DEADBEEF.clone()), + new_seed: VRFSeed(DEADBEEF.clone()), + parent_block_ptr: 2, + parent_vtxindex: 2, + key_block_ptr: 2, + key_vtxindex: 2, + memo: vec![], + commit_outs: expected_pox_addrs.to_vec(), + burn_fee: last_commit.burn_fee, + input: (last_commit.txid, expected_input_index), + apparent_sender: last_commit.apparent_sender.clone(), + txid: Txid(DEADBEEF.clone()), + vtxindex: 1, + block_height: next_block_height, + burn_parent_modulus: ((next_block_height.saturating_sub(1)) + % BURN_BLOCK_MINED_AT_MODULUS) + as u8, + burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + }; + commit_table.insert(miner.to_string(), mocked_commit); + } + } + + let unconfirmed_block_commits: Vec<_> = commit_table + .into_values() + .filter(|commit| { + if commit.commit_outs.len() != expected_pox_addrs.len() { + return false; + } + for i in 0..commit.commit_outs.len() { + if commit.commit_outs[i].to_burnchain_repr() + != expected_pox_addrs[i].to_burnchain_repr() + { + info!( + "Skipping invalid unconfirmed block-commit: {:?} != {:?}", + &commit.commit_outs[i].to_burnchain_repr(), + expected_pox_addrs[i].to_burnchain_repr() + ); + return false; + } + } + true + }) + .collect(); + + let mut handle = sortdb.index_handle(&tip.sortition_id); + Self::get_burn_distribution( + &mut handle, + burnchain, + tip.block_height + 1, + unconfirmed_block_commits, + vec![], + ) + } + + /// Given the sortition DB, get the list of all miners in the past MINING_COMMITMENT_WINDOW + /// blocks, as well as their last block-commits + pub fn get_active_miners( + sortdb: &SortitionDB, + at_burn_block: Option, + ) -> Result, DBError> { + let mut tip = if let Some(at_burn_block) = at_burn_block { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + let ih = sortdb.index_handle(&tip.sortition_id); + ih.get_block_snapshot_by_height(at_burn_block)? + .ok_or(DBError::NotFoundError)? + } else { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())? + }; + + let mut miners = HashMap::new(); + for _i in 0..MINING_COMMITMENT_WINDOW { + let commits = + SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id)?; + for commit in commits.into_iter() { + let miner = commit.apparent_sender.to_string(); + if miners.get(&miner).is_none() { + miners.insert(miner, commit); + } + } + tip = SortitionDB::get_block_snapshot(sortdb.conn(), &tip.parent_sortition_id)? + .ok_or(DBError::NotFoundError)?; + } + Ok(miners.into_iter().collect()) + } +} + +#[cfg(test)] +pub mod tests { + use std::fs; + use std::io::Write; + + use stacks::burnchains::{BurnchainSigner, Txid}; + use stacks::chainstate::burn::distribution::BurnSamplePoint; + use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; + use stacks::chainstate::burn::operations::LeaderBlockCommitOp; + use stacks::chainstate::stacks::address::{PoxAddress, PoxAddressType20}; + use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPublicKey, VRFSeed, + }; + use stacks_common::util::hash::{hex_bytes, Hash160}; + use stacks_common::util::uint::{BitArray, Uint256}; + + use super::MinerStats; + + #[test] + fn test_burn_dist_to_prob_dist() { + let block_commit_1 = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash([0x22; 32]), + new_seed: VRFSeed([0x33; 32]), + parent_block_ptr: 111, + parent_vtxindex: 456, + key_block_ptr: 123, + key_vtxindex: 456, + memo: vec![0x80], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::new_p2pkh( + &StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap(), + ), + + commit_outs: vec![], + + txid: Txid::from_bytes_be( + &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27cf") + .unwrap(), + ) + .unwrap(), + vtxindex: 443, + block_height: 124, + burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + }; + + let block_commit_2 = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash([0x22; 32]), + new_seed: VRFSeed([0x33; 32]), + parent_block_ptr: 112, + parent_vtxindex: 111, + key_block_ptr: 122, + key_vtxindex: 457, + memo: vec![0x80], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::new_p2pkh( + &StacksPublicKey::from_hex( + "023616a344700c9455bf0b55cc65e404c7b8f82e815da885398a44f6dc70e64045", + ) + .unwrap(), + ), + + commit_outs: vec![], + + txid: Txid::from_bytes_be( + &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27d0") + .unwrap(), + ) + .unwrap(), + vtxindex: 444, + block_height: 124, + burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000004", + ) + .unwrap(), + }; + + let block_commit_3 = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash([0x22; 32]), + new_seed: VRFSeed([0x33; 32]), + parent_block_ptr: 113, + parent_vtxindex: 111, + key_block_ptr: 121, + key_vtxindex: 10, + memo: vec![0x80], + + burn_fee: 23456, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::new_p2pkh( + &StacksPublicKey::from_hex( + "020a9b0a938a2226694fe4f867193cf0b78cd6264e4277fd686468a00a9afdc36d", + ) + .unwrap(), + ), + + commit_outs: vec![], + + txid: Txid::from_bytes_be( + &hex_bytes("301dc687a9f06a1ae87a013f27133e9cec0843c2983567be73e185827c7c13de") + .unwrap(), + ) + .unwrap(), + vtxindex: 445, + block_height: 124, + burn_parent_modulus: (123 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000004", + ) + .unwrap(), + }; + let burn_dist = vec![ + BurnSamplePoint { + burns: block_commit_1.burn_fee.into(), + median_burn: block_commit_2.burn_fee.into(), + range_start: Uint256::zero(), + range_end: Uint256([ + 0x3ed94d3cb0a84709, + 0x0963dded799a7c1a, + 0x70989faf596c8b65, + 0x41a3ed94d3cb0a84, + ]), + candidate: block_commit_1.clone(), + user_burns: vec![], + }, + BurnSamplePoint { + burns: block_commit_2.burn_fee.into(), + median_burn: block_commit_2.burn_fee.into(), + range_start: Uint256([ + 0x3ed94d3cb0a84709, + 0x0963dded799a7c1a, + 0x70989faf596c8b65, + 0x41a3ed94d3cb0a84, + ]), + range_end: Uint256([ + 0x7db29a7961508e12, + 0x12c7bbdaf334f834, + 0xe1313f5eb2d916ca, + 0x8347db29a7961508, + ]), + candidate: block_commit_2.clone(), + user_burns: vec![], + }, + BurnSamplePoint { + burns: (block_commit_3.burn_fee).into(), + median_burn: block_commit_3.burn_fee.into(), + range_start: Uint256([ + 0x7db29a7961508e12, + 0x12c7bbdaf334f834, + 0xe1313f5eb2d916ca, + 0x8347db29a7961508, + ]), + range_end: Uint256::max(), + candidate: block_commit_3.clone(), + user_burns: vec![], + }, + ]; + + let prob_dist = MinerStats::burn_dist_to_prob_dist(&burn_dist); + assert_eq!(prob_dist.len(), 3); + assert!( + (prob_dist + .get(&format!("{}", &block_commit_1.apparent_sender)) + .unwrap() + - 0.25641) + .abs() + < 0.001 + ); + assert!( + (prob_dist + .get(&format!("{}", &block_commit_2.apparent_sender)) + .unwrap() + - 0.25641) + .abs() + < 0.001 + ); + assert!( + (prob_dist + .get(&format!("{}", &block_commit_3.apparent_sender)) + .unwrap() + - 0.48718) + .abs() + < 0.001 + ); + } + + #[test] + fn test_get_unconfirmed_commits() { + use std::os::unix::fs::PermissionsExt; + let shell_code = r#"#!/bin/bash +echo < { + assert_eq!(spend, 2); + } + "miner-2" => { + assert_eq!(spend, 3); + } + "miner-3" => { + assert_eq!(spend, 10); + } + "miner-4" => { + assert_eq!(spend, 10); + } + _ => { + panic!("unknown miner {}", &miner); + } + } + } + + let win_probs = MinerStats::get_future_win_distribution( + &active_miners_and_commits, + &unconfirmed_block_commits, + &[], + ); + for miner in &[ + "miner-1".to_string(), + "miner-2".to_string(), + "miner-3".to_string(), + "miner-4".to_string(), + ] { + let prob = *win_probs + .get(miner) + .expect(&format!("no probability for {}", &miner)); + match miner.as_str() { + "miner-1" => { + assert!((prob - (2.0 / 25.0)).abs() < 0.00001); + } + "miner-2" => { + assert!((prob - (3.0 / 25.0)).abs() < 0.00001); + } + "miner-3" => { + assert!((prob - (10.0 / 25.0)).abs() < 0.00001); + } + "miner-4" => { + assert!((prob - (10.0 / 25.0)).abs() < 0.00001); + } + _ => { + panic!("unknown miner {}", &miner); + } + } + } + } +} diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index bb1c4e91d1..fb4d6d91b0 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -14,7 +14,7 @@ use stacks::chainstate::stacks::index::marf::MARFOpenOpts; use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use stacks::chainstate::stacks::MAX_BLOCK_LEN; -use stacks::core::mempool::MemPoolWalkSettings; +use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; use stacks::core::{ StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, PEER_VERSION_MAINNET, PEER_VERSION_TESTNET, @@ -27,11 +27,15 @@ use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator}; use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; +use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::net::PeerAddress; +use stacks_common::types::Address; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use crate::chain_data::MinerStats; + pub const DEFAULT_SATS_PER_VB: u64 = 50; const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; @@ -393,16 +397,36 @@ lazy_static! { } impl Config { - /// get the up-to-date burnchain from the config - pub fn get_burnchain_config(&self) -> Result { - if let Some(path) = &self.config_path { - let config_file = ConfigFile::from_path(path.as_str())?; - let config = Config::from_config_file(config_file)?; - Ok(config.burnchain) - } else { - Ok(self.burnchain.clone()) - } + /// get the up-to-date burnchain options from the config. + /// If the config file can't be loaded, then return the existing config + pub fn get_burnchain_config(&self) -> BurnchainConfig { + let Some(path) = &self.config_path else { + return self.burnchain.clone(); + }; + let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { + return self.burnchain.clone(); + }; + let Ok(config) = Config::from_config_file(config_file) else { + return self.burnchain.clone(); + }; + config.burnchain } + + /// get the up-to-date miner options from the config + /// If the config can't be loaded for some reason, then return the existing config + pub fn get_miner_config(&self) -> MinerConfig { + let Some(path) = &self.config_path else { + return self.miner.clone(); + }; + let Ok(config_file) = ConfigFile::from_path(path.as_str()) else { + return self.miner.clone(); + }; + let Ok(config) = Config::from_config_file(config_file) else { + return self.miner.clone(); + }; + return config.miner; + } + /// Apply any test settings to this burnchain config struct fn apply_test_settings(&self, burnchain: &mut Burnchain) { if self.burnchain.get_bitcoin_network().1 == BitcoinNetworkType::Mainnet { @@ -898,7 +922,6 @@ impl Config { let miner_default_config = MinerConfig::default(); let miner = match config_file.miner { Some(ref miner) => MinerConfig { - min_tx_fee: miner.min_tx_fee.unwrap_or(miner_default_config.min_tx_fee), first_attempt_time_ms: miner .first_attempt_time_ms .unwrap_or(miner_default_config.first_attempt_time_ms), @@ -926,6 +949,52 @@ impl Config { unprocessed_block_deadline_secs: miner .unprocessed_block_deadline_secs .unwrap_or(miner_default_config.unprocessed_block_deadline_secs), + min_tx_count: miner.min_tx_count.unwrap_or(0), + only_increase_tx_count: miner.only_increase_tx_count.unwrap_or(false), + unconfirmed_commits_helper: miner.unconfirmed_commits_helper.clone(), + target_win_probability: miner.target_win_probability.unwrap_or(0.0), + activated_vrf_key_path: miner.activated_vrf_key_path.clone(), + fast_rampup: miner.fast_rampup.unwrap_or(true), + underperform_stop_threshold: miner.underperform_stop_threshold, + txs_to_consider: { + if let Some(txs_to_consider) = &miner.txs_to_consider { + txs_to_consider + .split(",") + .map( + |txs_to_consider_str| match str::parse(txs_to_consider_str) { + Ok(txtype) => txtype, + Err(e) => { + panic!( + "could not parse '{}': {}", + &txs_to_consider_str, &e + ); + } + }, + ) + .collect() + } else { + MemPoolWalkTxTypes::all() + } + }, + filter_origins: { + if let Some(filter_origins) = &miner.filter_origins { + filter_origins + .split(",") + .map(|origin_str| match StacksAddress::from_string(origin_str) { + Some(addr) => addr, + None => { + panic!( + "could not parse '{}' into a Stacks address", + origin_str + ); + } + }) + .collect() + } else { + HashSet::new() + } + }, + max_reorg_depth: miner.max_reorg_depth.unwrap_or(3), }, None => miner_default_config, }; @@ -1301,34 +1370,47 @@ impl Config { microblocks: bool, miner_status: Arc>, ) -> BlockBuilderSettings { + let miner_config = self.get_miner_config(); BlockBuilderSettings { max_miner_time_ms: if microblocks { - self.miner.microblock_attempt_time_ms + miner_config.microblock_attempt_time_ms } else if attempt <= 1 { // first attempt to mine a block -- do so right away - self.miner.first_attempt_time_ms + miner_config.first_attempt_time_ms } else { // second or later attempt to mine a block -- give it some time - self.miner.subsequent_attempt_time_ms + miner_config.subsequent_attempt_time_ms }, mempool_settings: MemPoolWalkSettings { - min_tx_fee: self.miner.min_tx_fee, max_walk_time_ms: if microblocks { - self.miner.microblock_attempt_time_ms + miner_config.microblock_attempt_time_ms } else if attempt <= 1 { // first attempt to mine a block -- do so right away - self.miner.first_attempt_time_ms + miner_config.first_attempt_time_ms } else { // second or later attempt to mine a block -- give it some time - self.miner.subsequent_attempt_time_ms + miner_config.subsequent_attempt_time_ms }, - consider_no_estimate_tx_prob: self.miner.probability_pick_no_estimate_tx, - nonce_cache_size: self.miner.nonce_cache_size, - candidate_retry_cache_size: self.miner.candidate_retry_cache_size, + consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, + nonce_cache_size: miner_config.nonce_cache_size, + candidate_retry_cache_size: miner_config.candidate_retry_cache_size, + txs_to_consider: miner_config.txs_to_consider, + filter_origins: miner_config.filter_origins, }, miner_status, } } + + pub fn get_miner_stats(&self) -> Option { + let miner_config = self.get_miner_config(); + if let Some(unconfirmed_commits_helper) = miner_config.unconfirmed_commits_helper.as_ref() { + let miner_stats = MinerStats { + unconfirmed_commits_helper: unconfirmed_commits_helper.clone(), + }; + return Some(miner_stats); + } + None + } } impl std::default::Default for Config { @@ -1917,9 +1999,8 @@ impl NodeConfig { } } -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, PartialEq)] pub struct MinerConfig { - pub min_tx_fee: u64, pub first_attempt_time_ms: u64, pub subsequent_attempt_time_ms: u64, pub microblock_attempt_time_ms: u64, @@ -1933,22 +2014,58 @@ pub struct MinerConfig { pub nonce_cache_size: u64, pub candidate_retry_cache_size: u64, pub unprocessed_block_deadline_secs: u64, + /// minimum number of transactions that must be in a block if we're going to replace a pending + /// block-commit with a new block-commit + pub min_tx_count: u64, + /// Only allow a block's tx count to increase across RBFs. + pub only_increase_tx_count: bool, + /// Path to a script that prints out all unconfirmed block-commits for a list of addresses + pub unconfirmed_commits_helper: Option, + /// Targeted win probability for this miner. Used to deduce when to stop trying to mine. + pub target_win_probability: f64, + /// Path to a serialized RegisteredKey struct, which points to an already-registered VRF key + /// (so we don't have to go make a new one) + pub activated_vrf_key_path: Option, + /// When estimating win probability, whether or not to use the assumed win rate 6+ blocks from + /// now (true), or the current win rate (false) + pub fast_rampup: bool, + /// Number of Bitcoin blocks which must pass where the boostes+neutrals are a minority, at which + /// point the miner will stop trying. + pub underperform_stop_threshold: Option, + /// Kinds of transactions to consider from the mempool. This is used by boosted and neutral + /// miners to push past averse fee estimations. + pub txs_to_consider: HashSet, + /// Origin addresses to whitelist when doing a mempool walk. This is used by boosted and + /// neutral miners to push transactions through that are important to them. + pub filter_origins: HashSet, + /// When selecting the "nicest" tip, do not consider tips that are more than this many blocks + /// behind the highest tip. + pub max_reorg_depth: u64, } impl MinerConfig { pub fn default() -> MinerConfig { MinerConfig { - min_tx_fee: 1, - first_attempt_time_ms: 5_000, - subsequent_attempt_time_ms: 30_000, + first_attempt_time_ms: 10, + subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, probability_pick_no_estimate_tx: 5, block_reward_recipient: None, segwit: false, wait_for_block_download: true, - nonce_cache_size: 10_000, - candidate_retry_cache_size: 10_000, + nonce_cache_size: 1024 * 1024, + candidate_retry_cache_size: 1024 * 1024, unprocessed_block_deadline_secs: 30, + min_tx_count: 0, + only_increase_tx_count: false, + unconfirmed_commits_helper: None, + target_win_probability: 0.0, + activated_vrf_key_path: None, + fast_rampup: false, + underperform_stop_threshold: None, + txs_to_consider: MemPoolWalkTxTypes::all(), + filter_origins: HashSet::new(), + max_reorg_depth: 3, } } } @@ -2043,7 +2160,6 @@ pub struct FeeEstimationConfigFile { #[derive(Clone, Deserialize, Default, Debug)] pub struct MinerConfigFile { - pub min_tx_fee: Option, pub first_attempt_time_ms: Option, pub subsequent_attempt_time_ms: Option, pub microblock_attempt_time_ms: Option, @@ -2053,6 +2169,16 @@ pub struct MinerConfigFile { pub nonce_cache_size: Option, pub candidate_retry_cache_size: Option, pub unprocessed_block_deadline_secs: Option, + pub min_tx_count: Option, + pub only_increase_tx_count: Option, + pub unconfirmed_commits_helper: Option, + pub target_win_probability: Option, + pub activated_vrf_key_path: Option, + pub fast_rampup: Option, + pub underperform_stop_threshold: Option, + pub txs_to_consider: Option, + pub filter_origins: Option, + pub max_reorg_depth: Option, } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 33f1214dc9..6495beab74 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -24,6 +24,7 @@ use stacks_common::util::hash::hex_bytes; pub mod monitoring; pub mod burnchains; +pub mod chain_data; pub mod config; pub mod event_dispatcher; pub mod genesis_data; @@ -35,11 +36,17 @@ pub mod run_loop; pub mod syncctl; pub mod tenure; +use std::collections::HashMap; use std::convert::TryInto; use std::{env, panic, process}; use backtrace::Backtrace; use pico_args::Arguments; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::operations::leader_block_commit::RewardSetInfo; +use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::StacksChainState; pub use self::burnchains::{ BitcoinRegtestController, BurnchainController, BurnchainTip, MocknetController, @@ -50,6 +57,212 @@ pub use self::keychain::Keychain; pub use self::node::{ChainTip, Node}; pub use self::run_loop::{helium, neon}; pub use self::tenure::Tenure; +use crate::chain_data::MinerStats; +use crate::neon_node::{BlockMinerThread, TipCandidate}; + +/// Implmentation of `pick_best_tip` CLI option +fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { + info!("Loading config at path {}", config_path); + let config = match ConfigFile::from_path(config_path) { + Ok(config_file) => Config::from_config_file(config_file).unwrap(), + Err(e) => { + warn!("Invalid config file: {}", e); + process::exit(1); + } + }; + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + let burnchain = config.get_burnchain(); + let (mut chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), + ) + .unwrap(); + let mut sortdb = + SortitionDB::open(&burn_db_path, false, burnchain.pox_constants.clone()).unwrap(); + + let max_depth = config.miner.max_reorg_depth; + + // There could be more than one possible chain tip. Go find them. + let stacks_tips = BlockMinerThread::load_candidate_tips( + &mut sortdb, + &mut chainstate, + max_depth, + at_stacks_height, + ); + + let best_tip = BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap(); + best_tip +} + +/// Implementation of `get_miner_spend` CLI option +fn cli_get_miner_spend( + config_path: &str, + mine_start: Option, + at_burnchain_height: Option, +) -> u64 { + info!("Loading config at path {}", config_path); + let config = match ConfigFile::from_path(&config_path) { + Ok(config_file) => Config::from_config_file(config_file).unwrap(), + Err(e) => { + warn!("Invalid config file: {}", e); + process::exit(1); + } + }; + let keychain = Keychain::default(config.node.seed.clone()); + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + let burnchain = config.get_burnchain(); + let (mut chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), + ) + .unwrap(); + let mut sortdb = + SortitionDB::open(&burn_db_path, true, burnchain.pox_constants.clone()).unwrap(); + let tip = if let Some(at_burnchain_height) = at_burnchain_height { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let ih = sortdb.index_handle(&tip.sortition_id); + ih.get_block_snapshot_by_height(at_burnchain_height) + .unwrap() + .unwrap() + } else { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap() + }; + + let recipients = get_next_recipients( + &tip, + &mut chainstate, + &mut sortdb, + &burnchain, + &OnChainRewardSetProvider(), + config.node.always_use_affirmation_maps, + ) + .unwrap(); + + let commit_outs = if !burnchain.is_in_prepare_phase(tip.block_height + 1) { + RewardSetInfo::into_commit_outs(recipients, config.is_mainnet()) + } else { + vec![PoxAddress::standard_burn_address(config.is_mainnet())] + }; + + let spend_amount = BlockMinerThread::get_mining_spend_amount( + &config, + &keychain, + &burnchain, + &mut sortdb, + &commit_outs, + mine_start.unwrap_or(tip.block_height), + at_burnchain_height, + |burn_block_height| { + let sortdb = + SortitionDB::open(&burn_db_path, true, burnchain.pox_constants.clone()).unwrap(); + let Some(miner_stats) = config.get_miner_stats() else { + return 0.0; + }; + let Ok(active_miners_and_commits) = + MinerStats::get_active_miners(&sortdb, Some(burn_block_height)).map_err(|e| { + warn!("Failed to get active miners: {:?}", &e); + e + }) + else { + return 0.0; + }; + if active_miners_and_commits.len() == 0 { + warn!("No active miners detected; using config file burn_fee_cap"); + return 0.0; + } + + let active_miners: Vec<_> = active_miners_and_commits + .iter() + .map(|(miner, _cmt)| miner.as_str()) + .collect(); + + info!("Active miners: {:?}", &active_miners); + + let Ok(unconfirmed_block_commits) = miner_stats + .get_unconfirmed_commits(burn_block_height + 1, &active_miners) + .map_err(|e| { + warn!("Failed to find unconfirmed block-commits: {}", &e); + e + }) + else { + return 0.0; + }; + + let unconfirmed_miners_and_amounts: Vec<(String, u64)> = unconfirmed_block_commits + .iter() + .map(|cmt| (format!("{}", &cmt.apparent_sender), cmt.burn_fee)) + .collect(); + + info!( + "Found unconfirmed block-commits: {:?}", + &unconfirmed_miners_and_amounts + ); + + let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( + &active_miners_and_commits, + &unconfirmed_block_commits, + &commit_outs, + ); + let win_probs = if config.miner.fast_rampup { + // look at spends 6+ blocks in the future + let win_probs = MinerStats::get_future_win_distribution( + &active_miners_and_commits, + &unconfirmed_block_commits, + &commit_outs, + ); + win_probs + } else { + // look at the current spends + let Ok(unconfirmed_burn_dist) = miner_stats + .get_unconfirmed_burn_distribution( + &burnchain, + &sortdb, + &active_miners_and_commits, + unconfirmed_block_commits, + &commit_outs, + at_burnchain_height, + ) + .map_err(|e| { + warn!("Failed to get unconfirmed burn distribution: {:?}", &e); + e + }) + else { + return 0.0; + }; + + let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); + win_probs + }; + + info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!( + "Unconfirmed win probabilities (fast_rampup={}): {:?}", + config.miner.fast_rampup, &win_probs + ); + + let miner_addrs = BlockMinerThread::get_miner_addrs(&config, &keychain); + let win_prob = miner_addrs + .iter() + .find_map(|x| win_probs.get(x)) + .copied() + .unwrap_or(0.0); + + info!( + "This miner's win probability at {} is {}", + tip.block_height, &win_prob + ); + win_prob + }, + |_burn_block_height, _win_prob| {}, + ); + spend_amount +} fn main() { panic::set_hook(Box::new(|panic_info| { @@ -91,24 +304,24 @@ fn main() { let config_file = match subcommand.as_str() { "mocknet" => { - args.finish().unwrap(); + args.finish(); ConfigFile::mocknet() } "helium" => { - args.finish().unwrap(); + args.finish(); ConfigFile::helium() } "testnet" => { - args.finish().unwrap(); + args.finish(); ConfigFile::xenon() } "mainnet" => { - args.finish().unwrap(); + args.finish(); ConfigFile::mainnet() } "check-config" => { let config_path: String = args.value_from_str("--config").unwrap(); - args.finish().unwrap(); + args.finish(); info!("Loading config at path {}", config_path); let config_file = match ConfigFile::from_path(&config_path) { Ok(config_file) => { @@ -133,7 +346,7 @@ fn main() { } "start" => { let config_path: String = args.value_from_str("--config").unwrap(); - args.finish().unwrap(); + args.finish(); info!("Loading config at path {}", config_path); match ConfigFile::from_path(&config_path) { Ok(config_file) => config_file, @@ -154,14 +367,15 @@ fn main() { let conf = Config::from_config_file(ConfigFile::from_path(&config_path).unwrap()) .unwrap(); - args.finish().unwrap(); + args.finish(); conf.node.seed } else { - let free_args = args.free().unwrap(); + let free_args = args.finish(); let seed_hex = free_args .first() .expect("`wif-for-seed` must be passed either a config file via the `--config` flag or a hex seed string"); - hex_bytes(seed_hex).expect("Seed should be a hex encoded string") + hex_bytes(seed_hex.to_str().unwrap()) + .expect("Seed should be a hex encoded string") } }; let keychain = Keychain::default(seed); @@ -175,6 +389,26 @@ fn main() { ); return; } + "pick-best-tip" => { + let config_path: String = args.value_from_str("--config").unwrap(); + let at_stacks_height: Option = + args.opt_value_from_str("--at-stacks-height").unwrap(); + args.finish(); + + let best_tip = cli_pick_best_tip(&config_path, at_stacks_height); + println!("Best tip is {:?}", &best_tip); + process::exit(0); + } + "get-spend-amount" => { + let config_path: String = args.value_from_str("--config").unwrap(); + let at_burnchain_height: Option = + args.opt_value_from_str("--at-bitcoin-height").unwrap(); + args.finish(); + + let spend_amount = cli_get_miner_spend(&config_path, mine_start, at_burnchain_height); + println!("Will spend {}", spend_amount); + process::exit(0); + } _ => { print_help(); return; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 517f080cb6..56f777076e 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1,5 +1,5 @@ // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -138,20 +138,23 @@ /// /// This file may be refactored in the future into a full-fledged module. use std::cmp; -use std::collections::{HashMap, VecDeque}; +use std::cmp::Ordering as CmpOrdering; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::default::Default; +use std::io::{Read, Write}; use std::net::SocketAddr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::{Receiver, SyncSender, TrySendError}; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::time::Duration; -use std::{mem, thread}; +use std::{fs, mem, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainParameters, BurnchainSigner, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -165,11 +168,12 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, - MinerStatus, StacksMicroblockBuilder, + signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, MinerStatus, + StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -198,7 +202,7 @@ use stacks_common::types::chainstate::{ StacksPrivateKey, VRFSeed, }; use stacks_common::types::net::PeerAddress; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{PublicKey, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum}; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -209,6 +213,8 @@ use crate::burnchains::bitcoin_regtest_controller::{ addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; +use crate::chain_data::MinerStats; +use crate::config::MinerConfig; use crate::run_loop::neon::{Counters, RunLoop}; use crate::run_loop::RegisteredKey; use crate::syncctl::PoxSyncWatchdogComms; @@ -222,7 +228,7 @@ pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB type MinedBlocks = HashMap; /// Result of running the miner thread. It could produce a Stacks block or a microblock. -enum MinerThreadResult { +pub(crate) enum MinerThreadResult { Block( AssembledAnchorBlock, Secp256k1PrivateKey, @@ -238,7 +244,7 @@ enum MinerThreadResult { /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. #[derive(Clone)] -struct AssembledAnchorBlock { +pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, /// Burnchain tip's block hash when we finished mining @@ -291,6 +297,15 @@ pub struct Globals { pub should_keep_running: Arc, /// Status of our VRF key registration state (shared between the main thread and the relayer) leader_key_registration_state: Arc>, + /// Last miner config loaded + last_miner_config: Arc>>, + /// burnchain height at which we start mining + start_mining_height: Arc>, + /// estimated winning probability at given bitcoin block heights + estimated_winning_probs: Arc>>, + /// previously-selected best tips + /// maps stacks height to tip candidate + previous_best_tips: Arc>>, } /// Miner chain tip, on top of which to build microblocks @@ -334,6 +349,7 @@ impl Globals { counters: Counters, sync_comms: PoxSyncWatchdogComms, should_keep_running: Arc, + start_mining_height: u64, ) -> Globals { Globals { last_sortition: Arc::new(Mutex::new(None)), @@ -347,6 +363,10 @@ impl Globals { leader_key_registration_state: Arc::new(Mutex::new( LeaderKeyRegistrationState::Inactive, )), + last_miner_config: Arc::new(Mutex::new(None)), + start_mining_height: Arc::new(Mutex::new(start_mining_height)), + estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), + previous_best_tips: Arc::new(Mutex::new(BTreeMap::new())), } } @@ -486,8 +506,8 @@ impl Globals { &self, burn_block_height: u64, key_registers: Vec, - ) -> bool { - let mut activated = false; + ) -> Option { + let mut activated_key = None; match self.leader_key_registration_state.lock() { Ok(ref mut leader_key_registration_state) => { for op in key_registers.into_iter() { @@ -499,14 +519,17 @@ impl Globals { burn_block_height, txid ); if txid == op.txid { + let active_key = RegisteredKey { + target_block_height, + vrf_public_key: op.public_key, + block_height: op.block_height as u64, + op_vtxindex: op.vtxindex as u32, + }; + **leader_key_registration_state = - LeaderKeyRegistrationState::Active(RegisteredKey { - target_block_height, - vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, - }); - activated = true; + LeaderKeyRegistrationState::Active(active_key.clone()); + + activated_key = Some(active_key); } else { debug!( "key_register_op {} does not match our pending op {}", @@ -521,7 +544,126 @@ impl Globals { panic!(); } } - activated + activated_key + } + + /// Directly set the leader key activation state from a saved key + pub fn resume_leader_key(&self, registered_key: RegisteredKey) { + match self.leader_key_registration_state.lock() { + Ok(ref mut leader_key_registration_state) => { + **leader_key_registration_state = LeaderKeyRegistrationState::Active(registered_key) + } + Err(_e) => { + error!("FATAL: failed to lock leader key registration state mutex"); + panic!(); + } + } + } + + /// Get the last miner config loaded + pub fn get_last_miner_config(&self) -> Option { + match self.last_miner_config.lock() { + Ok(last_miner_config) => (*last_miner_config).clone(), + Err(_e) => { + error!("FATAL; failed to lock last miner config"); + panic!(); + } + } + } + + /// Set the last miner config loaded + pub fn set_last_miner_config(&self, miner_config: MinerConfig) { + match self.last_miner_config.lock() { + Ok(ref mut last_miner_config) => **last_miner_config = Some(miner_config), + Err(_e) => { + error!("FATAL; failed to lock last miner config"); + panic!(); + } + } + } + + /// Get the height at which we should start mining + pub fn get_start_mining_height(&self) -> u64 { + match self.start_mining_height.lock() { + Ok(ht) => *ht, + Err(_e) => { + error!("FATAL: failed to lock start_mining_height"); + panic!(); + } + } + } + + /// Set the height at which we started mining. + /// Only takes effect if the current start mining height is 0. + pub fn set_start_mining_height_if_zero(&self, value: u64) { + match self.start_mining_height.lock() { + Ok(ref mut ht) => { + if **ht == 0 { + **ht = value; + } + } + Err(_e) => { + error!("FATAL: failed to lock start_mining_height"); + panic!(); + } + } + } + + /// Record an estimated winning probability + pub fn add_estimated_win_prob(&self, burn_height: u64, win_prob: f64) { + match self.estimated_winning_probs.lock() { + Ok(mut probs) => { + probs.insert(burn_height, win_prob); + } + Err(_e) => { + error!("FATAL: failed to lock estimated_winning_probs"); + panic!(); + } + } + } + + /// Get the estimated winning probability, if we have one + pub fn get_estimated_win_prob(&self, burn_height: u64) -> Option { + match self.estimated_winning_probs.lock() { + Ok(probs) => probs.get(&burn_height).cloned(), + Err(_e) => { + error!("FATAL: failed to lock estimated_winning_probs"); + panic!(); + } + } + } + + /// Record a best-tip + pub fn add_best_tip(&self, stacks_height: u64, tip_candidate: TipCandidate, max_depth: u64) { + match self.previous_best_tips.lock() { + Ok(mut tips) => { + tips.insert(stacks_height, tip_candidate); + let mut stale = vec![]; + for (prev_height, _) in tips.iter() { + if *prev_height + max_depth < stacks_height { + stale.push(*prev_height); + } + } + for height in stale.into_iter() { + tips.remove(&height); + } + } + Err(_e) => { + error!("FATAL: failed to lock previous_best_tips"); + panic!(); + } + } + } + + /// Get a best-tip at a previous height + pub fn get_best_tip(&self, stacks_height: u64) -> Option { + match self.previous_best_tips.lock() { + Ok(tips) => tips.get(&stacks_height).cloned(), + Err(_e) => { + error!("FATAL: failed to lock previous_best_tips"); + panic!(); + } + } } } @@ -735,7 +877,7 @@ pub struct RelayerThread { mined_stacks_block: bool, } -struct BlockMinerThread { +pub(crate) struct BlockMinerThread { /// node config struct config: Config, /// handle to global state @@ -1053,8 +1195,6 @@ impl MicroblockMinerThread { #[cfg(any(test, feature = "testing"))] { - use std::fs; - use std::io::Write; use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this microblock somewhere @@ -1206,6 +1346,46 @@ impl MicroblockMinerThread { } } +/// Candidate chain tip +#[derive(Debug, Clone, PartialEq)] +pub struct TipCandidate { + pub stacks_height: u64, + pub consensus_hash: ConsensusHash, + pub anchored_block_hash: BlockHeaderHash, + pub parent_consensus_hash: ConsensusHash, + pub parent_anchored_block_hash: BlockHeaderHash, + /// the block's sortition's burnchain height + pub burn_height: u64, + /// the number of Stacks blocks *at the same height* as this one, but from earlier sortitions + /// than `burn_height` + pub num_earlier_siblings: u64, +} + +impl TipCandidate { + pub fn id(&self) -> StacksBlockId { + StacksBlockId::new(&self.consensus_hash, &self.anchored_block_hash) + } + + pub fn parent_id(&self) -> StacksBlockId { + StacksBlockId::new( + &self.parent_consensus_hash, + &self.parent_anchored_block_hash, + ) + } + + pub fn new(tip: StagingBlock, burn_height: u64) -> Self { + Self { + stacks_height: tip.height, + consensus_hash: tip.consensus_hash, + anchored_block_hash: tip.anchored_block_hash, + parent_consensus_hash: tip.parent_consensus_hash, + parent_anchored_block_hash: tip.parent_anchored_block_hash, + burn_height, + num_earlier_siblings: 0, + } + } +} + impl BlockMinerThread { /// Instantiate the miner thread from its parent RelayerThread pub fn from_relayer_thread( @@ -1228,11 +1408,12 @@ impl BlockMinerThread { /// Get the coinbase recipient address, if set in the config and if allowed in this epoch fn get_coinbase_recipient(&self, epoch_id: StacksEpochId) -> Option { - if epoch_id < StacksEpochId::Epoch21 && self.config.miner.block_reward_recipient.is_some() { + let miner_config = self.config.get_miner_config(); + if epoch_id < StacksEpochId::Epoch21 && miner_config.block_reward_recipient.is_some() { warn!("Coinbase pay-to-contract is not supported in the current epoch"); None } else { - self.config.miner.block_reward_recipient.clone() + miner_config.block_reward_recipient.clone() } } @@ -1343,6 +1524,320 @@ impl BlockMinerThread { ret } + /// Load all candidate tips upon which to build. This is all Stacks blocks whose heights are + /// less than or equal to at `at_stacks_height` (or the canonical chain tip height, if not given), + /// but greater than or equal to this end height minus `max_depth`. + /// Returns the list of all Stacks blocks up to max_depth blocks beneath it. + /// The blocks will be sorted first by stacks height, and then by burnchain height + pub(crate) fn load_candidate_tips( + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + max_depth: u64, + at_stacks_height: Option, + ) -> Vec { + let stacks_tips = if let Some(start_height) = at_stacks_height { + chain_state + .get_stacks_chain_tips_at_height(start_height) + .expect("FATAL: could not query chain tips at start height") + } else { + chain_state + .get_stacks_chain_tips(burn_db) + .expect("FATAL: could not query chain tips") + }; + + if stacks_tips.len() == 0 { + return vec![]; + } + + let mut considered = HashSet::new(); + let mut candidates = vec![]; + let end_height = stacks_tips[0].height; + + for cur_height in end_height.saturating_sub(max_depth)..=end_height { + let stacks_tips = chain_state + .get_stacks_chain_tips_at_height(cur_height) + .expect("FATAL: could not query chain tips at height"); + + for tip in stacks_tips { + let index_block_hash = + StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); + + if !considered.contains(&index_block_hash) { + let burn_height = burn_db + .get_consensus_hash_height(&tip.consensus_hash) + .expect("FATAL: could not query burnchain block height") + .expect("FATAL: no burnchain block height for Stacks tip"); + let candidate = TipCandidate::new(tip, burn_height); + candidates.push(candidate); + considered.insert(index_block_hash); + } + } + } + Self::sort_and_populate_candidates(candidates) + } + + /// Put all tip candidates in order by stacks height, breaking ties with burnchain height. + /// Also, count up the number of earliersiblings each tip has -- i.e. the number of stacks + /// blocks that have the same height, but a later burnchain sortition. + pub(crate) fn sort_and_populate_candidates( + mut candidates: Vec, + ) -> Vec { + if candidates.len() == 0 { + return candidates; + } + candidates.sort_by(|tip1, tip2| { + // stacks block height, then burnchain block height + let ord = tip1.stacks_height.cmp(&tip2.stacks_height); + if ord == CmpOrdering::Equal { + return tip1.burn_height.cmp(&tip2.burn_height); + } + ord + }); + + // calculate the number of earlier siblings for each block. + // this is the number of stacks blocks at the same height, but later burnchain heights. + let mut idx = 0; + let mut cur_stacks_height = candidates[idx].stacks_height; + let mut num_siblings = 0; + loop { + idx += 1; + if idx >= candidates.len() { + break; + } + if cur_stacks_height == candidates[idx].stacks_height { + // same stacks height, so this block has one more earlier sibling than the last + num_siblings += 1; + candidates[idx].num_earlier_siblings = num_siblings; + } else { + // new stacks height, so no earlier siblings + num_siblings = 0; + cur_stacks_height = candidates[idx].stacks_height; + candidates[idx].num_earlier_siblings = 0; + } + } + + candidates + } + + /// Select the best tip to mine the next block on. Potential tips are all + /// leaf nodes where the Stacks block height is <= the max height - + /// max_reorg_depth. Each potential tip is then scored based on the amount + /// of orphans that its chain has caused -- that is, the number of orphans + /// that the tip _and all of its ancestors_ (up to `max_depth`) created. + /// The tip with the lowest score is composed of blocks that collectively made the fewest + /// orphans, and is thus the "nicest" chain with the least orphaning. This is the tip that is + /// selected. + pub fn pick_best_tip( + globals: &Globals, + config: &Config, + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + at_stacks_height: Option, + ) -> Option { + info!("Picking best Stacks tip"); + let miner_config = config.get_miner_config(); + let max_depth = miner_config.max_reorg_depth; + + // There could be more than one possible chain tip. Go find them. + let stacks_tips = + Self::load_candidate_tips(burn_db, chain_state, max_depth, at_stacks_height); + + let mut previous_best_tips = HashMap::new(); + for tip in stacks_tips.iter() { + let Some(prev_best_tip) = globals.get_best_tip(tip.stacks_height) else { + continue; + }; + previous_best_tips.insert(tip.stacks_height, prev_best_tip); + } + + let best_tip_opt = Self::inner_pick_best_tip(stacks_tips, previous_best_tips); + if let Some(best_tip) = best_tip_opt.as_ref() { + globals.add_best_tip(best_tip.stacks_height, best_tip.clone(), max_depth); + } else { + // no best-tip found; revert to old tie-breaker logic + info!("No best-tips found; using old tie-breaking logic"); + return chain_state + .get_stacks_chain_tip(burn_db) + .expect("FATAL: could not load chain tip") + .map(|staging_block| { + let burn_height = burn_db + .get_consensus_hash_height(&staging_block.consensus_hash) + .expect("FATAL: could not query burnchain block height") + .expect("FATAL: no burnchain block height for Stacks tip"); + TipCandidate::new(staging_block, burn_height) + }); + } + best_tip_opt + } + + /// Given a list of sorted candidate tips, pick the best one. See `Self::pick_best_tip()`. + /// Takes the list of stacks tips that are eligible to be built on, and a map of + /// previously-chosen best tips (so if we chose a tip in the past, we keep confirming it, even + /// if subsequent stacks blocks show up). The previous best tips should be from recent Stacks + /// heights; it's important that older best-tips are forgotten in order to ensure that miners + /// will eventually (e.g. after `max_reorg_depth` Stacks blocks pass) stop trying to confirm a + /// now-orphaned previously-chosen best-tip. If there are multiple best-tips that conflict in + /// `previosu_best_tips`, then only the highest one which the leaf could confirm will be + /// considered (since the node updates its understanding of the best-tip on each RunTenure). + pub(crate) fn inner_pick_best_tip( + stacks_tips: Vec, + previous_best_tips: HashMap, + ) -> Option { + // identify leaf tips -- i.e. blocks with no children + let parent_consensus_hashes: HashSet<_> = stacks_tips + .iter() + .map(|x| x.parent_consensus_hash.clone()) + .collect(); + + let mut leaf_tips: Vec<_> = stacks_tips + .iter() + .filter(|x| !parent_consensus_hashes.contains(&x.consensus_hash)) + .collect(); + + if leaf_tips.len() == 0 { + return None; + } + + // Make scoring deterministic in the case of a tie. + // Prefer leafs that were mined earlier on the burnchain, + // but which pass through previously-determined best tips. + leaf_tips.sort_by(|tip1, tip2| { + // stacks block height, then burnchain block height + let ord = tip1.stacks_height.cmp(&tip2.stacks_height); + if ord == CmpOrdering::Equal { + return tip1.burn_height.cmp(&tip2.burn_height); + } + ord + }); + + let mut scores = BTreeMap::new(); + for (i, leaf_tip) in leaf_tips.iter().enumerate() { + let leaf_id = leaf_tip.id(); + // Score each leaf tip as the number of preceding Stacks blocks that are _not_ an + // ancestor. Because stacks_tips are in order by stacks height, a linear scan of this + // list will allow us to match all ancestors in the last max_depth Stacks blocks. + // `ancestor_ptr` tracks the next expected ancestor. + let mut ancestor_ptr = leaf_tip.parent_id(); + let mut score: u64 = 0; + let mut score_summaries = vec![]; + + // find the highest stacks_tip we must confirm + let mut must_confirm = None; + for tip in stacks_tips.iter().rev() { + if let Some(prev_best_tip) = previous_best_tips.get(&tip.stacks_height) { + if leaf_id != prev_best_tip.id() { + // the `ancestor_ptr` must pass through this prior best-tip + must_confirm = Some(prev_best_tip.clone()); + break; + } + } + } + + for tip in stacks_tips.iter().rev() { + if let Some(required_ancestor) = must_confirm.as_ref() { + if tip.stacks_height < required_ancestor.stacks_height + && leaf_tip.stacks_height >= required_ancestor.stacks_height + { + // This leaf does not confirm a previous-best-tip, so assign it the + // worst-possible score. + info!("Tip #{} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", + i, + &leaf_tip.consensus_hash, + &leaf_tip.anchored_block_hash, + leaf_tip.burn_height, + leaf_tip.stacks_height, + &required_ancestor.consensus_hash, + &required_ancestor.anchored_block_hash, + required_ancestor.burn_height, + required_ancestor.stacks_height + ); + score = u64::MAX; + score_summaries.push(format!("{} (best-tip reorged)", u64::MAX)); + break; + } + } + if tip.id() == leaf_id { + // we can't orphan ourselves + continue; + } + if leaf_tip.stacks_height < tip.stacks_height { + // this tip is further along than leaf_tip, so canonicalizing leaf_tip would + // orphan `tip.stacks_height - leaf_tip.stacks_height` blocks. + score = score.saturating_add(tip.stacks_height - leaf_tip.stacks_height); + score_summaries.push(format!( + "{} (stx height diff)", + tip.stacks_height - leaf_tip.stacks_height + )); + } else if leaf_tip.stacks_height == tip.stacks_height + && leaf_tip.burn_height > tip.burn_height + { + // this tip has the same stacks height as the leaf, but its sortition happened + // earlier. This means that the leaf is trying to orphan this block and all + // blocks sortition'ed up to this leaf. The miner should have instead tried to + // confirm this existing tip, instead of mine a sibling. + score = score.saturating_add(tip.num_earlier_siblings + 1); + score_summaries.push(format!("{} (uncles)", tip.num_earlier_siblings + 1)); + } + if tip.id() == ancestor_ptr { + // did we confirm a previous best-tip? If so, then clear this + if let Some(required_ancestor) = must_confirm.take() { + if required_ancestor.id() != tip.id() { + // did not confirm, so restoroe + must_confirm = Some(required_ancestor); + } + } + + // this stacks tip is the next ancestor. However, that ancestor may have + // earlier-sortition'ed siblings that confirming this tip would orphan, so count those. + ancestor_ptr = tip.parent_id(); + score = score.saturating_add(tip.num_earlier_siblings); + score_summaries.push(format!("{} (earlier sibs)", tip.num_earlier_siblings)); + } else { + // this stacks tip is not an ancestor, and would be orphaned if leaf_tip is + // canonical. + score = score.saturating_add(1); + score_summaries.push(format!("{} (non-ancestor)", 1)); + } + } + + info!( + "Tip #{} {}/{} at {}:{} has score {} ({})", + i, + &leaf_tip.consensus_hash, + &leaf_tip.anchored_block_hash, + leaf_tip.burn_height, + leaf_tip.stacks_height, + score, + score_summaries.join(" + ").to_string() + ); + if score < u64::MAX { + scores.insert(i, score); + } + } + + if scores.len() == 0 { + // revert to prior tie-breaking scheme + return None; + } + + // The lowest score is the "nicest" tip (least amount of orphaning) + let best_tip_idx = scores + .iter() + .min_by_key(|(_, score)| *score) + .expect("FATAL: candidates should not be empty here") + .0; + + let best_tip = leaf_tips + .get(*best_tip_idx) + .expect("FATAL: candidates should not be empty"); + + info!( + "Best tip is #{} {}/{}", + best_tip_idx, &best_tip.consensus_hash, &best_tip.anchored_block_hash + ); + Some((*best_tip).clone()) + } + /// Load up the parent block info for mining. /// If there's no parent because this is the first block, then return the genesis block's info. /// If we can't find the parent in the DB but we expect one, return None. @@ -1350,22 +1845,25 @@ impl BlockMinerThread { &self, burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, - ) -> Option { + ) -> (Option, bool) { if let Some(stacks_tip) = chain_state .get_stacks_chain_tip(burn_db) .expect("FATAL: could not query chain tip") { + let best_stacks_tip = + Self::pick_best_tip(&self.globals, &self.config, burn_db, chain_state, None) + .expect("FATAL: no best chain tip"); let miner_address = self .keychain .origin_address(self.config.is_mainnet()) .unwrap(); - match ParentStacksBlockInfo::lookup( + let parent_info = match ParentStacksBlockInfo::lookup( chain_state, burn_db, &self.burn_block, miner_address, - &stacks_tip.consensus_hash, - &stacks_tip.anchored_block_hash, + &best_stacks_tip.consensus_hash, + &best_stacks_tip.anchored_block_hash, ) { Ok(parent_info) => Some(parent_info), Err(Error::BurnchainTipChanged) => { @@ -1373,7 +1871,16 @@ impl BlockMinerThread { None } Err(..) => None, + }; + if parent_info.is_none() { + warn!( + "No parent for best-tip {}/{}", + &best_stacks_tip.consensus_hash, &best_stacks_tip.anchored_block_hash + ); } + let canonical = best_stacks_tip.consensus_hash == stacks_tip.consensus_hash + && best_stacks_tip.anchored_block_hash == stacks_tip.anchored_block_hash; + (parent_info, canonical) } else { debug!("No Stacks chain tip known, will return a genesis block"); let (network, _) = self.config.burnchain.get_bitcoin_network(); @@ -1387,26 +1894,30 @@ impl BlockMinerThread { burnchain_params.first_block_timestamp.into(), ); - Some(ParentStacksBlockInfo { - stacks_parent_header: chain_tip.metadata, - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - parent_block_burn_height: 0, - parent_block_total_burn: 0, - parent_winning_vtxindex: 0, - coinbase_nonce: 0, - }) + ( + Some(ParentStacksBlockInfo { + stacks_parent_header: chain_tip.metadata, + parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_block_burn_height: 0, + parent_block_total_burn: 0, + parent_winning_vtxindex: 0, + coinbase_nonce: 0, + }), + true, + ) } } /// Determine which attempt this will be when mining a block, and whether or not an attempt /// should even be made. - /// Returns Some(attempt) if we should attempt to mine (and what attempt it will be) + /// Returns Some(attempt, max-txs) if we should attempt to mine (and what attempt it will be) /// Returns None if we should not mine. fn get_mine_attempt( &self, chain_state: &StacksChainState, parent_block_info: &ParentStacksBlockInfo, - ) -> Option { + force: bool, + ) -> Option<(u64, u64)> { let parent_consensus_hash = &parent_block_info.parent_consensus_hash; let stacks_parent_header = &parent_block_info.stacks_parent_header; let parent_block_burn_height = parent_block_info.parent_block_burn_height; @@ -1415,22 +1926,28 @@ impl BlockMinerThread { Self::find_inflight_mined_blocks(self.burn_block.block_height, &self.last_mined_blocks); // has the tip changed from our previously-mined block for this epoch? - let attempt = if last_mined_blocks.len() <= 1 { + let (attempt, max_txs) = if last_mined_blocks.len() <= 1 { // always mine if we've not mined a block for this epoch yet, or // if we've mined just one attempt, unconditionally try again (so we // can use `subsequent_miner_time_ms` in this attempt) if last_mined_blocks.len() == 1 { - debug!("Have only attempted one block; unconditionally trying again"); + info!("Have only attempted one block; unconditionally trying again"); + } + let attempt = last_mined_blocks.len() as u64 + 1; + let mut max_txs = 0; + for last_mined_block in last_mined_blocks.iter() { + max_txs = cmp::max(max_txs, last_mined_block.anchored_block.txs.len()); } - last_mined_blocks.len() as u64 + 1 + (attempt, max_txs) } else { let mut best_attempt = 0; - debug!( + let mut max_txs = 0; + info!( "Consider {} in-flight Stacks tip(s)", &last_mined_blocks.len() ); for prev_block in last_mined_blocks.iter() { - debug!( + info!( "Consider in-flight block {} on Stacks tip {}/{} in {} with {} txs", &prev_block.anchored_block.block_hash(), &prev_block.parent_consensus_hash, @@ -1438,6 +1955,7 @@ impl BlockMinerThread { &prev_block.my_burn_hash, &prev_block.anchored_block.txs.len() ); + max_txs = cmp::max(max_txs, prev_block.anchored_block.txs.len()); if prev_block.anchored_block.txs.len() == 1 && prev_block.attempt == 1 { // Don't let the fact that we've built an empty block during this sortition @@ -1473,47 +1991,51 @@ impl BlockMinerThread { as usize) + 1) { - // the chain tip hasn't changed since we attempted to build a block. Use what we - // already have. - debug!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); - - return None; + if !force { + // the chain tip hasn't changed since we attempted to build a block. Use what we + // already have. + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + + return None; + } } else { // there are new microblocks! // TODO: only consider rebuilding our anchored block if we (a) have // time, and (b) the new microblocks are worth more than the new BTC // fee minus the old BTC fee - debug!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } } else { - // no microblock stream to confirm, and the stacks tip hasn't changed - debug!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height); + if !force { + // no microblock stream to confirm, and the stacks tip hasn't changed + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height); - return None; + return None; + } } } else { if self.burn_block.burn_header_hash == prev_block.my_burn_hash { // only try and re-mine if there was no sortition since the last chain tip - debug!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", + info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.my_burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); } else { - debug!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", + info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.my_burn_hash); } } } - best_attempt + 1 + (best_attempt + 1, max_txs) }; - Some(attempt) + Some((attempt, u64::try_from(max_txs).expect("too many txs"))) } /// Generate the VRF proof for the block we're going to build. @@ -1677,6 +2199,214 @@ impl BlockMinerThread { microblock_info_opt.map(|(stream, _)| stream) } + /// Get the list of possible burn addresses this miner is using + pub fn get_miner_addrs(config: &Config, keychain: &Keychain) -> Vec { + let mut op_signer = keychain.generate_op_signer(); + let mut btc_addrs = vec![ + // legacy + BitcoinAddress::from_bytes_legacy( + config.burnchain.get_bitcoin_network().1, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_data(&op_signer.get_public_key().to_bytes()).0, + ) + .expect("FATAL: failed to construct legacy bitcoin address"), + ]; + if config.miner.segwit { + btc_addrs.push( + // segwit p2wpkh + BitcoinAddress::from_bytes_segwit_p2wpkh( + config.burnchain.get_bitcoin_network().1, + &Hash160::from_data(&op_signer.get_public_key().to_bytes_compressed()).0, + ) + .expect("FATAL: failed to construct segwit p2wpkh address"), + ); + } + btc_addrs + .into_iter() + .map(|addr| format!("{}", &addr)) + .collect() + } + + /// Obtain the target burn fee cap, when considering how well this miner is performing. + pub fn get_mining_spend_amount( + config: &Config, + keychain: &Keychain, + burnchain: &Burnchain, + sortdb: &SortitionDB, + recipients: &[PoxAddress], + start_mine_height: u64, + at_burn_block: Option, + mut get_prior_winning_prob: F, + mut set_prior_winning_prob: G, + ) -> u64 + where + F: FnMut(u64) -> f64, + G: FnMut(u64, f64), + { + let config_file_burn_fee_cap = config.get_burnchain_config().burn_fee_cap; + let miner_config = config.get_miner_config(); + + if miner_config.target_win_probability < 0.00001 { + // this field is effectively zero + return config_file_burn_fee_cap; + } + let Some(miner_stats) = config.get_miner_stats() else { + return config_file_burn_fee_cap; + }; + + let Ok(tip) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map_err(|e| { + warn!("Failed to load canonical burn chain tip: {:?}", &e); + e + }) else { + return config_file_burn_fee_cap; + }; + let tip = if let Some(at_burn_block) = at_burn_block.as_ref() { + let ih = sortdb.index_handle(&tip.sortition_id); + let Ok(Some(ancestor_tip)) = ih.get_block_snapshot_by_height(*at_burn_block) else { + warn!( + "Failed to load ancestor tip at burn height {}", + at_burn_block + ); + return config_file_burn_fee_cap; + }; + ancestor_tip + } else { + tip + }; + + let Ok(active_miners_and_commits) = MinerStats::get_active_miners(sortdb, at_burn_block) + .map_err(|e| { + warn!("Failed to get active miners: {:?}", &e); + e + }) + else { + return config_file_burn_fee_cap; + }; + if active_miners_and_commits.len() == 0 { + warn!("No active miners detected; using config file burn_fee_cap"); + return config_file_burn_fee_cap; + } + + let active_miners: Vec<_> = active_miners_and_commits + .iter() + .map(|(miner, _cmt)| miner.as_str()) + .collect(); + + info!("Active miners: {:?}", &active_miners); + + let Ok(unconfirmed_block_commits) = miner_stats + .get_unconfirmed_commits(tip.block_height + 1, &active_miners) + .map_err(|e| { + warn!("Failed to find unconfirmed block-commits: {}", &e); + e + }) + else { + return config_file_burn_fee_cap; + }; + + let unconfirmed_miners_and_amounts: Vec<(String, u64)> = unconfirmed_block_commits + .iter() + .map(|cmt| (cmt.apparent_sender.to_string(), cmt.burn_fee)) + .collect(); + + info!( + "Found unconfirmed block-commits: {:?}", + &unconfirmed_miners_and_amounts + ); + + let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( + &active_miners_and_commits, + &unconfirmed_block_commits, + &recipients, + ); + let win_probs = if miner_config.fast_rampup { + // look at spends 6+ blocks in the future + let win_probs = MinerStats::get_future_win_distribution( + &active_miners_and_commits, + &unconfirmed_block_commits, + &recipients, + ); + win_probs + } else { + // look at the current spends + let Ok(unconfirmed_burn_dist) = miner_stats + .get_unconfirmed_burn_distribution( + burnchain, + sortdb, + &active_miners_and_commits, + unconfirmed_block_commits, + recipients, + at_burn_block, + ) + .map_err(|e| { + warn!("Failed to get unconfirmed burn distribution: {:?}", &e); + e + }) + else { + return config_file_burn_fee_cap; + }; + + let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); + win_probs + }; + + info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!( + "Unconfirmed win probabilities (fast_rampup={}): {:?}", + miner_config.fast_rampup, &win_probs + ); + + let miner_addrs = Self::get_miner_addrs(config, keychain); + let win_prob = miner_addrs + .iter() + .find_map(|x| win_probs.get(x)) + .copied() + .unwrap_or(0.0); + + info!( + "This miner's win probability at {} is {}", + tip.block_height, &win_prob + ); + set_prior_winning_prob(tip.block_height, win_prob); + + if win_prob < config.miner.target_win_probability { + // no mining strategy is viable, so just quit. + // Unless we're spinning up, that is. + if start_mine_height + 6 < tip.block_height + && config.miner.underperform_stop_threshold.is_some() + { + let underperform_stop_threshold = + config.miner.underperform_stop_threshold.unwrap_or(0); + info!( + "Miner is spun up, but is not meeting target win probability as of {}", + tip.block_height + ); + // we've spun up and we're underperforming. How long do we tolerate this? + let mut underperformed_count = 0; + for depth in 0..underperform_stop_threshold { + let prior_burn_height = tip.block_height.saturating_sub(depth); + let prior_win_prob = get_prior_winning_prob(prior_burn_height); + if prior_win_prob < config.miner.target_win_probability { + info!( + "Miner underperformed in block {} ({}/{})", + prior_burn_height, underperformed_count, underperform_stop_threshold + ); + underperformed_count += 1; + } + } + if underperformed_count == underperform_stop_threshold { + warn!( + "Miner underperformed since burn height {}; spinning down", + start_mine_height + 6 + underperform_stop_threshold + ); + return 0; + } + } + } + + config_file_burn_fee_cap + } + /// Produce the block-commit for this anchored block, if we can. /// Returns the op on success /// Returns None if we fail somehow. @@ -1706,15 +2436,6 @@ impl BlockMinerThread { } }; - // let burn_fee_cap = self.config.burnchain.burn_fee_cap; - let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); - let sunset_burn = self.burnchain.expected_sunset_burn( - self.burn_block.block_height + 1, - burn_fee_cap, - target_epoch_id, - ); - let rest_commit = burn_fee_cap - sunset_burn; - let commit_outs = if !self .burnchain .pox_constants @@ -1728,6 +2449,32 @@ impl BlockMinerThread { vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] }; + let burn_fee_cap = Self::get_mining_spend_amount( + &self.config, + &self.keychain, + &self.burnchain, + burn_db, + &commit_outs, + self.globals.get_start_mining_height(), + None, + |block_height| { + self.globals + .get_estimated_win_prob(block_height) + .unwrap_or(0.0) + }, + |block_height, win_prob| self.globals.add_estimated_win_prob(block_height, win_prob), + ); + if burn_fee_cap == 0 { + warn!("Calculated burn_fee_cap is 0; will not mine"); + return None; + } + let sunset_burn = self.burnchain.expected_sunset_burn( + self.burn_block.block_height + 1, + burn_fee_cap, + target_epoch_id, + ); + let rest_commit = burn_fee_cap - sunset_burn; + // let's commit, but target the current burnchain tip with our modulus let op = self.inner_generate_block_commit_op( block_hash, @@ -1830,6 +2577,19 @@ impl BlockMinerThread { self.ongoing_commit.clone(), ); + let miner_config = self.config.get_miner_config(); + let last_miner_config_opt = self.globals.get_last_miner_config(); + let force_remine = if let Some(last_miner_config) = last_miner_config_opt { + last_miner_config != miner_config + } else { + false + }; + if force_remine { + info!("Miner config changed; forcing a re-mine attempt"); + } + + self.globals.set_last_miner_config(miner_config); + // NOTE: read-write access is needed in order to be able to query the recipient set. // This is an artifact of the way the MARF is built (see #1449) let mut burn_db = @@ -1855,8 +2615,14 @@ impl BlockMinerThread { .ok()? .expect("FATAL: no epoch defined") .epoch_id; - let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; - let attempt = self.get_mine_attempt(&chain_state, &parent_block_info)?; + + let (Some(mut parent_block_info), _) = + self.load_block_parent_info(&mut burn_db, &mut chain_state) + else { + return None; + }; + let (attempt, max_txs) = + self.get_mine_attempt(&chain_state, &parent_block_info, force_remine)?; let vrf_proof = self.make_vrf_proof()?; // Generates a new secret key for signing the trail of microblocks @@ -1969,6 +2735,24 @@ impl BlockMinerThread { } }; + let miner_config = self.config.get_miner_config(); + + if attempt > 1 + && miner_config.min_tx_count > 0 + && u64::try_from(anchored_block.txs.len()).expect("too many txs") + < miner_config.min_tx_count + { + info!("Relayer: Succeeded assembling subsequent block with {} txs, but expected at least {}", anchored_block.txs.len(), miner_config.min_tx_count); + return None; + } + + if miner_config.only_increase_tx_count + && max_txs > u64::try_from(anchored_block.txs.len()).expect("too many txs") + { + info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {} txs", anchored_block.txs.len(), max_txs); + return None; + } + info!( "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", if parent_block_info.parent_block_total_burn == 0 { @@ -1992,6 +2776,11 @@ impl BlockMinerThread { &vrf_proof, target_epoch_id, )?; + let burn_fee = if let BlockstackOperationType::LeaderBlockCommit(ref op) = &op { + op.burn_fee + } else { + 0 + }; // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all @@ -1999,10 +2788,13 @@ impl BlockMinerThread { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(&burn_db) - .expect("FATAL: could not query chain tip") - { + if let Some(stacks_tip) = Self::pick_best_tip( + &self.globals, + &self.config, + &mut burn_db, + &mut chain_state, + None, + ) { let is_miner_blocked = self .globals .get_miner_status() @@ -2014,7 +2806,7 @@ impl BlockMinerThread { &self.burnchain, &burn_db, &chain_state, - self.config.miner.unprocessed_block_deadline_secs, + miner_config.unprocessed_block_deadline_secs, ); if stacks_tip.anchored_block_hash != anchored_block.header.parent_block || parent_block_info.parent_consensus_hash != stacks_tip.consensus_hash @@ -2022,7 +2814,7 @@ impl BlockMinerThread { || is_miner_blocked || has_unprocessed { - debug!( + info!( "Relayer: Cancel block-commit; chain tip(s) have changed or cancelled"; "block_hash" => %anchored_block.block_hash(), "tx_count" => anchored_block.txs.len(), @@ -2049,8 +2841,9 @@ impl BlockMinerThread { } let mut op_signer = self.keychain.generate_op_signer(); - debug!( + info!( "Relayer: Submit block-commit"; + "burn_fee" => burn_fee, "block_hash" => %anchored_block.block_hash(), "tx_count" => anchored_block.txs.len(), "target_height" => anchored_block.header.total_work.work, @@ -2369,8 +3162,6 @@ impl RelayerThread { ); #[cfg(any(test, feature = "testing"))] { - use std::fs; - use std::io::Write; use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this block somewhere @@ -2976,11 +3767,13 @@ impl RelayerThread { return None; } + let miner_config = self.config.get_miner_config(); + let has_unprocessed = BlockMinerThread::unprocessed_blocks_prevent_mining( &self.burnchain, self.sortdb_ref(), self.chainstate_ref(), - self.config.miner.unprocessed_block_deadline_secs, + miner_config.unprocessed_block_deadline_secs, ); if has_unprocessed { debug!( @@ -3371,6 +4164,36 @@ impl RelayerThread { self.miner_thread.is_none() } + /// Try loading up a saved VRF key + pub(crate) fn load_saved_vrf_key(path: &str) -> Option { + let mut f = match fs::File::open(path) { + Ok(f) => f, + Err(e) => { + warn!("Could not open {}: {:?}", &path, &e); + return None; + } + }; + let mut registered_key_bytes = vec![]; + if let Err(e) = f.read_to_end(&mut registered_key_bytes) { + warn!( + "Failed to read registered key bytes from {}: {:?}", + path, &e + ); + return None; + } + + let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { + warn!( + "Did not load registered key from {}: could not decode JSON", + &path + ); + return None; + }; + + info!("Loaded registered key from {}", &path); + Some(registered_key) + } + /// Top-level dispatcher pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { debug!("Relayer: received next directive"); @@ -3382,10 +4205,18 @@ impl RelayerThread { true } RelayerDirective::RegisterKey(last_burn_block) => { - debug!("Relayer: directive Register VRF key"); - self.rotate_vrf_and_register(&last_burn_block); + let mut saved_key_opt = None; + if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { + saved_key_opt = Self::load_saved_vrf_key(&path); + } + if let Some(saved_key) = saved_key_opt { + self.globals.resume_leader_key(saved_key); + } else { + debug!("Relayer: directive Register VRF key"); + self.rotate_vrf_and_register(&last_burn_block); + debug!("Relayer: directive Registered VRF key"); + } self.globals.counters.bump_blocks_processed(); - debug!("Relayer: directive Registered VRF key"); true } RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { @@ -4411,6 +5242,7 @@ impl StacksNode { /// Called from the main thread. pub fn process_burnchain_state( &mut self, + config: &Config, sortdb: &SortitionDB, sort_id: &SortitionId, ibd: bool, @@ -4453,18 +5285,46 @@ impl StacksNode { SortitionDB::get_leader_keys_by_block(&ic, &block_snapshot.sortition_id) .expect("Unexpected SortitionDB error fetching key registers"); - let num_key_registers = key_registers.len(); - - self.globals - .try_activate_leader_key_registration(block_height, key_registers); + self.globals.set_last_sortition(block_snapshot); + let ret = last_sortitioned_block.map(|x| x.0); + let num_key_registers = key_registers.len(); debug!( "Processed burnchain state at height {}: {} leader keys, {} block-commits (ibd = {})", block_height, num_key_registers, num_block_commits, ibd ); - self.globals.set_last_sortition(block_snapshot); - last_sortitioned_block.map(|x| x.0) + // save the registered VRF key + let activated_key_opt = self + .globals + .try_activate_leader_key_registration(block_height, key_registers); + + let Some(activated_key) = activated_key_opt else { + return ret; + }; + let Some(path) = config.miner.activated_vrf_key_path.as_ref() else { + return ret; + }; + info!("Activated VRF key; saving to {}", &path); + let Ok(key_json) = serde_json::to_string(&activated_key) else { + warn!("Failed to serialize VRF key"); + return ret; + }; + let mut f = match fs::File::create(&path) { + Ok(f) => f, + Err(e) => { + warn!("Failed to create {}: {:?}", &path, &e); + return ret; + } + }; + + if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + return ret; + } + + info!("Saved activated VRF key to {}", &path); + return ret; } /// Join all inner threads diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index bc76a128ca..8b264365b0 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -146,7 +146,7 @@ impl RunLoopCallbacks { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct RegisteredKey { /// burn block height we intended this VRF key register to land in pub target_block_height: u64, diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 3688acb153..45055097d1 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -623,11 +623,12 @@ impl RunLoop { sortdb: &SortitionDB, last_stacks_pox_reorg_recover_time: &mut u128, ) { + let miner_config = config.get_miner_config(); let delay = cmp::max( config.node.chain_liveness_poll_time_secs, cmp::max( - config.miner.first_attempt_time_ms, - config.miner.subsequent_attempt_time_ms, + miner_config.first_attempt_time_ms, + miner_config.subsequent_attempt_time_ms, ) / 1000, ); @@ -743,11 +744,12 @@ impl RunLoop { last_burn_pox_reorg_recover_time: &mut u128, last_announce_time: &mut u128, ) { + let miner_config = config.get_miner_config(); let delay = cmp::max( config.node.chain_liveness_poll_time_secs, cmp::max( - config.miner.first_attempt_time_ms, - config.miner.subsequent_attempt_time_ms, + miner_config.first_attempt_time_ms, + miner_config.subsequent_attempt_time_ms, ) / 1000, ); @@ -972,6 +974,7 @@ impl RunLoop { self.counters.clone(), self.pox_watchdog_comms.clone(), self.should_keep_running.clone(), + mine_start, ); self.set_globals(globals.clone()); @@ -1170,7 +1173,12 @@ impl RunLoop { let sortition_id = &block.sortition_id; // Have the node process the new block, that can include, or not, a sortition. - node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd); + node.process_burnchain_state( + self.config(), + burnchain.sortdb_mut(), + sortition_id, + ibd, + ); // Now, tell the relayer to check if it won a sortition during this block, // and, if so, to process and advertize the block. This is basically a @@ -1240,6 +1248,7 @@ impl RunLoop { // once we've synced to the chain tip once, don't apply this check again. // this prevents a possible corner case in the event of a PoX fork. mine_start = 0; + globals.set_start_mining_height_if_zero(sortition_db_height); // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index fdb09dd22c..75c6ec3666 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -141,7 +141,6 @@ fn bitcoind_integration(segwit_flag: bool) { conf.burnchain.password = Some("secret".to_string()); conf.burnchain.local_mining_public_key = Some("04ee0b1602eb18fef7986887a7e8769a30c9df981d33c8380d255edef003abdcd243a0eb74afdf6740e6c423e62aec631519a24cf5b1d62bf8a3e06ddc695dcb77".to_string()); - conf.miner.min_tx_fee = 0; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; conf.miner.segwit = segwit_flag; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 568912feec..844a314bc6 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -982,7 +982,6 @@ fn bigger_microblock_streams_in_2_05() { conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 34ac467bc0..8be3edad0f 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -4956,7 +4956,6 @@ fn test_v1_unlock_height_with_current_stackers() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -5218,7 +5217,6 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index eab6ea5685..99863c95e0 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -130,7 +130,6 @@ fn disable_pox() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -660,7 +659,6 @@ fn pox_2_unlock_all() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 9e13e597dd..40a4dddb47 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -96,7 +96,6 @@ fn trait_invocation_behavior() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 9b002f6253..4376da2d41 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -148,7 +148,6 @@ fn fix_to_pox_contract() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -784,7 +783,6 @@ fn verify_auto_unlock_behavior() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index c2057d6430..ffc7873dfc 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -181,7 +181,6 @@ fn integration_test_get_info() { }); conf.burnchain.commit_anchor_block_within = 5000; - conf.miner.min_tx_fee = 0; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 03f61b5e4c..0630e71387 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; use std::convert::TryInto; use std::sync::atomic::AtomicU64; use std::sync::Arc; @@ -23,11 +39,12 @@ use stacks::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_TE use stacks::util_lib::strings::StacksString; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, to_hex}; use super::burnchains::bitcoin_regtest_controller::ParsedUTXO; +use super::neon_node::{BlockMinerThread, TipCandidate}; use super::Config; use crate::helium::RunLoop; use crate::tests::neon_integrations::{get_chain_info, next_block_and_wait}; @@ -518,8 +535,6 @@ fn should_succeed_mining_valid_txs() { 100000, ); - conf.miner.min_tx_fee = 0; - let num_rounds = 6; let mut run_loop = RunLoop::new(conf.clone()); @@ -993,3 +1008,332 @@ fn test_btc_to_sat_errors() { assert!(ParsedUTXO::serialized_btc_to_sat("7.4e-7").is_none()); assert!(ParsedUTXO::serialized_btc_to_sat("5.96e-6").is_none()); } + +#[test] +fn test_sort_and_populate_candidates() { + let empty: Vec = vec![]; + assert_eq!( + empty, + BlockMinerThread::sort_and_populate_candidates(vec![]) + ); + let candidates = vec![ + TipCandidate { + stacks_height: 1, + consensus_hash: ConsensusHash([0x01; 20]), + anchored_block_hash: BlockHeaderHash([0x01; 32]), + parent_consensus_hash: ConsensusHash([0x00; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x00; 32]), + burn_height: 100, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x02; 20]), + anchored_block_hash: BlockHeaderHash([0x02; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 102, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x12; 20]), + anchored_block_hash: BlockHeaderHash([0x12; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 101, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x22; 20]), + anchored_block_hash: BlockHeaderHash([0x22; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 104, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 4, + consensus_hash: ConsensusHash([0x04; 20]), + anchored_block_hash: BlockHeaderHash([0x04; 32]), + parent_consensus_hash: ConsensusHash([0x03; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x03; 32]), + burn_height: 105, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 3, + consensus_hash: ConsensusHash([0x03; 20]), + anchored_block_hash: BlockHeaderHash([0x03; 32]), + parent_consensus_hash: ConsensusHash([0x02; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x02; 32]), + burn_height: 105, + num_earlier_siblings: 0, + }, + ]; + let sorted_candidates = BlockMinerThread::sort_and_populate_candidates(candidates); + assert_eq!( + sorted_candidates, + vec![ + TipCandidate { + stacks_height: 1, + consensus_hash: ConsensusHash([0x01; 20]), + anchored_block_hash: BlockHeaderHash([0x01; 32]), + parent_consensus_hash: ConsensusHash([0x00; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x00; 32]), + burn_height: 100, + num_earlier_siblings: 0 + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x12; 20]), + anchored_block_hash: BlockHeaderHash([0x12; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 101, + num_earlier_siblings: 0 + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x02; 20]), + anchored_block_hash: BlockHeaderHash([0x02; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 102, + num_earlier_siblings: 1 + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x22; 20]), + anchored_block_hash: BlockHeaderHash([0x22; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 104, + num_earlier_siblings: 2 + }, + TipCandidate { + stacks_height: 3, + consensus_hash: ConsensusHash([0x03; 20]), + anchored_block_hash: BlockHeaderHash([0x03; 32]), + parent_consensus_hash: ConsensusHash([0x02; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x02; 32]), + burn_height: 105, + num_earlier_siblings: 0 + }, + TipCandidate { + stacks_height: 4, + consensus_hash: ConsensusHash([0x04; 20]), + anchored_block_hash: BlockHeaderHash([0x04; 32]), + parent_consensus_hash: ConsensusHash([0x03; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x03; 32]), + burn_height: 105, + num_earlier_siblings: 0 + } + ] + ); +} + +#[test] +fn test_inner_pick_best_tip() { + // chain structure as folows: + // + // Bitcoin chain + // 100 101 102 103 104 105 106 + // | | | | | | + // Stacks chain | | | + // 1 <- 2 | |.-- 3 <- 4 + // \ | / + // *----- 2 <------*| + // \ | + // *--------------2 + // + // If there are no previous best-tips, then: + // At Bitcoin height 105, the best tip is (4,105) + // At Bitcoin height 104, the best tip is (3,104) + // At Bitcoin height 103, the best tip is (2,101) + // At Bitcoin height 102, the best tip is (2,101) + // At Bitcoin height 101, the best tip is (2,101) + // At Bitcoin height 100, the best tip is (1,100) + // + let candidates = vec![ + TipCandidate { + stacks_height: 1, + consensus_hash: ConsensusHash([0x01; 20]), + anchored_block_hash: BlockHeaderHash([0x01; 32]), + parent_consensus_hash: ConsensusHash([0x00; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x00; 32]), + burn_height: 100, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x02; 20]), + anchored_block_hash: BlockHeaderHash([0x02; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 102, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x12; 20]), + anchored_block_hash: BlockHeaderHash([0x12; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 101, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 2, + consensus_hash: ConsensusHash([0x22; 20]), + anchored_block_hash: BlockHeaderHash([0x22; 32]), + parent_consensus_hash: ConsensusHash([0x01; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x01; 32]), + burn_height: 104, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 4, + consensus_hash: ConsensusHash([0x04; 20]), + anchored_block_hash: BlockHeaderHash([0x04; 32]), + parent_consensus_hash: ConsensusHash([0x03; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x03; 32]), + burn_height: 106, + num_earlier_siblings: 0, + }, + TipCandidate { + stacks_height: 3, + consensus_hash: ConsensusHash([0x03; 20]), + anchored_block_hash: BlockHeaderHash([0x03; 32]), + parent_consensus_hash: ConsensusHash([0x02; 20]), + parent_anchored_block_hash: BlockHeaderHash([0x02; 32]), + burn_height: 105, + num_earlier_siblings: 0, + }, + ]; + + let sorted_candidates = BlockMinerThread::sort_and_populate_candidates(candidates.clone()); + assert_eq!( + None, + BlockMinerThread::inner_pick_best_tip(vec![], HashMap::new()) + ); + assert_eq!( + Some(sorted_candidates[5].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates.clone(), HashMap::new()) + ); + assert_eq!( + Some(sorted_candidates[0].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..1].to_vec(), HashMap::new()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..2].to_vec(), HashMap::new()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..3].to_vec(), HashMap::new()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..4].to_vec(), HashMap::new()) + ); + assert_eq!( + Some(sorted_candidates[4].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..5].to_vec(), HashMap::new()) + ); + + // suppose now that we previously picked (2,104) as the best-tip. + // No other tips at Stacks height 2 will be accepted, nor will those at heights 3 and 4 (since + // they descend from the wrong height-2 block). + let mut best_tips = HashMap::new(); + best_tips.insert(2, sorted_candidates[3].clone()); + + assert_eq!( + Some(sorted_candidates[3].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates.clone(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[0].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..1].to_vec(), best_tips.clone()) + ); + assert_eq!( + None, + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..2].to_vec(), best_tips.clone()) + ); + assert_eq!( + None, + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..3].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[3].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..4].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[3].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..5].to_vec(), best_tips.clone()) + ); + + // now suppose that we previously picked (2,102) as the best-tip. + // Conflicting blocks are (2,101) and (2,104) + let mut best_tips = HashMap::new(); + best_tips.insert(2, sorted_candidates[2].clone()); + + assert_eq!( + Some(sorted_candidates[5].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates.clone(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[0].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..1].to_vec(), best_tips.clone()) + ); + assert_eq!( + None, + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..2].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[2].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..3].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[2].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..4].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[4].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..5].to_vec(), best_tips.clone()) + ); + + // now suppose that we previously picked both (2,101) and (3,105) as the best-tips. + // these best-tips are in conflict, but that shouldn't prohibit us from choosing (4,106) as the + // best tip even though it doesn't confirm (2,101). However, it would mean that (2,102) and + // (2,104) are in conflict. + let mut best_tips = HashMap::new(); + best_tips.insert(2, sorted_candidates[1].clone()); + best_tips.insert(3, sorted_candidates[4].clone()); + + assert_eq!( + Some(sorted_candidates[5].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates.clone(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[0].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..1].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..2].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..3].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..4].to_vec(), best_tips.clone()) + ); + assert_eq!( + Some(sorted_candidates[1].clone()), + BlockMinerThread::inner_pick_best_tip(sorted_candidates[0..5].to_vec(), best_tips.clone()) + ); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 02461ce840..52a03b60ed 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -34,6 +34,7 @@ use stacks::chainstate::stacks::{ }; use stacks::clarity_cli::vm_execute as execute; use stacks::core; +use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, @@ -69,6 +70,7 @@ use super::{ }; use crate::burnchains::bitcoin_regtest_controller::{BitcoinRPCRequest, UTXO}; use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; +use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; @@ -146,7 +148,6 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -2375,7 +2376,6 @@ fn microblock_fork_poison_integration_test() { conf.miner.subsequent_attempt_time_ms = 5_000; conf.node.wait_time_for_blocks = 1_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -3123,9 +3123,6 @@ fn filter_low_fee_tx_integration_test() { }); } - // exclude the first 5 transactions from miner consideration - conf.miner.min_tx_fee = 1500; - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -3213,9 +3210,6 @@ fn filter_long_runtime_tx_integration_test() { }); } - // all transactions have high-enough fees... - conf.miner.min_tx_fee = 1; - // ...but none of them will be mined since we allot zero ms to do so conf.miner.first_attempt_time_ms = 0; conf.miner.subsequent_attempt_time_ms = 0; @@ -3294,8 +3288,6 @@ fn miner_submit_twice() { amount: 1049230, }); - // all transactions have high-enough fees... - conf.miner.min_tx_fee = 1; conf.node.mine_microblocks = false; // one should be mined in first attempt, and two should be in second attempt conf.miner.first_attempt_time_ms = 20; @@ -3415,7 +3407,6 @@ fn size_check_integration_test() { conf.node.microblock_frequency = 5000; conf.miner.microblock_attempt_time_ms = 120_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -3592,7 +3583,6 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.node.microblock_frequency = 5_000; conf.miner.microblock_attempt_time_ms = 120_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -3789,7 +3779,6 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -3984,7 +3973,6 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; conf.burnchain.epochs = Some(epochs); - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -4247,7 +4235,6 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.node.microblock_frequency = 15000; conf.miner.microblock_attempt_time_ms = 120_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -4423,7 +4410,6 @@ fn block_replay_integration_test() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 5_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -4874,7 +4860,6 @@ fn mining_events_integration_test() { conf.node.wait_time_for_microblocks = 1000; conf.node.microblock_frequency = 1000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -5122,7 +5107,6 @@ fn block_limit_hit_integration_test() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 1000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -5340,7 +5324,6 @@ fn microblock_limit_hit_integration_test() { conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -5552,7 +5535,6 @@ fn block_large_tx_integration_test() { conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -5687,7 +5669,6 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 1000; - conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -10836,3 +10817,336 @@ fn microblock_miner_multiple_attempts() { channel.stop_chains_coordinator(); } + +#[test] +#[ignore] +fn min_txs() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let spender_sk = StacksPrivateKey::new(); + let spender_addr = to_addr(&spender_sk); + let spender_princ: PrincipalData = spender_addr.into(); + + let (mut conf, _miner_account) = neon_integration_test_conf(); + + test_observer::spawn(); + + conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + conf.miner.min_tx_count = 4; + conf.miner.first_attempt_time_ms = 0; + conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.min_txs.json".to_string()); + + if fs::metadata("/tmp/activate_vrf_key.min_txs.json").is_ok() { + fs::remove_file("/tmp/activate_vrf_key.min_txs.json").unwrap(); + } + + let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + conf.initial_balances.push(InitialBalance { + address: spender_princ.clone(), + amount: spender_bal, + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let _client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let _sort_height = channel.get_sortitions_processed(); + + for i in 0..2 { + let code = format!("(print \"hello world {}\")", i); + let publish = make_contract_publish( + &spender_sk, + i as u64, + 1000, + &format!("test-publish-{}", &i), + &code, + ); + submit_tx(&http_origin, &publish); + + debug!("Try to build too-small a block {}", &i); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); + } + + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + if transactions.len() > 1 { + debug!("Got block: {:?}", &block); + assert!(transactions.len() >= 4); + } + } + + let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.min_txs.json"); + assert!(saved_vrf_key.is_some()); + + test_observer::clear(); +} + +#[test] +#[ignore] +fn filter_txs_by_type() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let spender_sk = StacksPrivateKey::new(); + let spender_addr = to_addr(&spender_sk); + let spender_princ: PrincipalData = spender_addr.into(); + + let (mut conf, _miner_account) = neon_integration_test_conf(); + + test_observer::spawn(); + + conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + conf.miner.min_tx_count = 4; + conf.miner.first_attempt_time_ms = 0; + conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.filter_txs.json".to_string()); + conf.miner.txs_to_consider = [MemPoolWalkTxTypes::TokenTransfer].into_iter().collect(); + + if fs::metadata("/tmp/activate_vrf_key.filter_txs.json").is_ok() { + fs::remove_file("/tmp/activate_vrf_key.filter_txs.json").unwrap(); + } + + let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + conf.initial_balances.push(InitialBalance { + address: spender_princ.clone(), + amount: spender_bal, + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let _client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let _sort_height = channel.get_sortitions_processed(); + let mut sent_txids = HashSet::new(); + for i in 0..2 { + let code = format!("(print \"hello world {}\")", i); + let publish = make_contract_publish( + &spender_sk, + i as u64, + 1000, + &format!("test-publish-{}", &i), + &code, + ); + let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); + sent_txids.insert(parsed.txid()); + + submit_tx(&http_origin, &publish); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); + } + + let blocks = test_observer::get_blocks(); + for block in blocks { + info!("block: {:?}", &block); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if sent_txids.contains(&parsed.txid()) { + panic!("Included a smart contract"); + } + } + } + + let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.filter_txs.json"); + assert!(saved_vrf_key.is_some()); + + test_observer::clear(); +} + +#[test] +#[ignore] +fn filter_txs_by_origin() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let spender_sk = StacksPrivateKey::new(); + let spender_addr = to_addr(&spender_sk); + let spender_princ: PrincipalData = spender_addr.into(); + + let (mut conf, _miner_account) = neon_integration_test_conf(); + + test_observer::spawn(); + + conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + conf.miner.min_tx_count = 4; + conf.miner.first_attempt_time_ms = 0; + conf.miner.filter_origins = + [StacksAddress::from_string("STA2MZWV9N67TBYVWTE0PSSKMJ2F6YXW7DX96QAM").unwrap()] + .into_iter() + .collect(); + + let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + conf.initial_balances.push(InitialBalance { + address: spender_princ.clone(), + amount: spender_bal, + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let _client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let _sort_height = channel.get_sortitions_processed(); + let mut sent_txids = HashSet::new(); + for i in 0..2 { + let code = format!("(print \"hello world {}\")", i); + let publish = make_contract_publish( + &spender_sk, + i as u64, + 1000, + &format!("test-publish-{}", &i), + &code, + ); + let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); + sent_txids.insert(parsed.txid()); + + submit_tx(&http_origin, &publish); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); + } + + let blocks = test_observer::get_blocks(); + for block in blocks { + info!("block: {:?}", &block); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if sent_txids.contains(&parsed.txid()) { + panic!("Included a smart contract"); + } + } + } + + test_observer::clear(); +} From 9012cf768a58d97654d6d89bda5a21b5e950224b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 17 Jan 2024 16:26:03 -0500 Subject: [PATCH 2/2] chore: cargo fmt-stacks --- stackslib/src/core/mempool.rs | 2 +- stackslib/src/core/tests/mod.rs | 4 ++-- stackslib/src/cost_estimates/fee_scalar.rs | 5 +---- stackslib/src/net/httpcore.rs | 16 ++++++++++++---- 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 0146065e63..36c52fa008 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -20,9 +20,9 @@ use std::hash::Hasher; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; +use std::str::FromStr; use std::time::Instant; use std::{fs, io}; -use std::str::FromStr; use clarity::vm::types::PrincipalData; use rand::distributions::Uniform; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index cfa950f1f5..35a933045a 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -58,8 +58,8 @@ use crate::chainstate::stacks::{ C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::core::mempool::{ - db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, - BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, + db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, + BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::net::Error as NetError; diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index b7fc814ff3..2ac4e592ac 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -4,6 +4,7 @@ use std::iter::FromIterator; use std::path::Path; use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::{ClaritySerializable, STXBalance}; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{ Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction, @@ -12,10 +13,6 @@ use serde_json::Value as JsonValue; use super::metrics::CostMetric; use super::{EstimatorError, FeeEstimator, FeeRateEstimate}; - -use clarity::vm::database::ClaritySerializable; -use clarity::vm::database::STXBalance; - use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::TransactionPayload; diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 017a151af6..169677eb8c 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -32,8 +32,8 @@ use stacks_common::types::chainstate::{ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use stacks_common::util::chunked_encoding::*; -use stacks_common::util::retry::{BoundReader, RetryReader}; use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::retry::{BoundReader, RetryReader}; use url::Url; use crate::burnchains::Txid; @@ -436,12 +436,16 @@ pub trait RPCRequestHandler: HttpRequest + HttpResponse + RPCRequestHandlerClone pub struct StacksHttpRequest { preamble: HttpRequestPreamble, contents: HttpRequestContents, - start_time: u128 + start_time: u128, } impl StacksHttpRequest { pub fn new(preamble: HttpRequestPreamble, contents: HttpRequestContents) -> Self { - Self { preamble, contents, start_time: get_epoch_time_ms() } + Self { + preamble, + contents, + start_time: get_epoch_time_ms(), + } } /// Instantiate a request to a remote Stacks peer @@ -472,7 +476,11 @@ impl StacksHttpRequest { preamble.path_and_query_str = decoded_path; } - Ok(Self { preamble, contents, start_time: get_epoch_time_ms() }) + Ok(Self { + preamble, + contents, + start_time: get_epoch_time_ms(), + }) } /// Get a reference to the request premable metadata