From ec96e7afd35400d2ec4ff069e475093805db3f5e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:19:00 -0500 Subject: [PATCH 1/9] fix: rc_consensus_hash in the burn view is the stacks tip consensus hash, not the reward cycle consensus hash --- stackslib/src/chainstate/burn/db/sortdb.rs | 32 ++++++++++++---------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 4c63a28968..c6ebd99a7a 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3883,26 +3883,13 @@ impl SortitionDB { .unwrap_or(&burnchain.first_block_hash) .clone(); - let rc = burnchain - .block_height_to_reward_cycle(chain_tip.block_height) - .expect("FATAL: block height does not have a reward cycle"); - - let rc_height = burnchain.reward_cycle_to_block_height(rc); - let rc_consensus_hash = SortitionDB::get_ancestor_snapshot( - conn, - cmp::min(chain_tip.block_height, rc_height), - &chain_tip.sortition_id, - )? - .map(|sn| sn.consensus_hash) - .ok_or(db_error::NotFoundError)?; - test_debug!( "Chain view: {},{}-{},{},{}", chain_tip.block_height, chain_tip.burn_header_hash, stable_block_height, &burn_stable_block_hash, - &rc_consensus_hash, + &chain_tip.canonical_stacks_tip_consensus_hash, ); Ok(BurnchainView { burn_block_height: chain_tip.block_height, @@ -3910,7 +3897,7 @@ impl SortitionDB { burn_stable_block_height: stable_block_height, burn_stable_block_hash: burn_stable_block_hash, last_burn_block_hashes: last_burn_block_hashes, - rc_consensus_hash, + rc_consensus_hash: chain_tip.canonical_stacks_tip_consensus_hash, }) } } @@ -4099,6 +4086,21 @@ impl SortitionDB { Ok((consensus_hash, stacks_block_hash)) } + #[cfg(test)] + pub fn set_canonical_stacks_chain_tip( + conn: &Connection, + ch: &ConsensusHash, + bhh: &BlockHeaderHash, + height: u64, + ) -> Result<(), db_error> { + let tip = SortitionDB::get_canonical_burn_chain_tip(conn)?; + let args: &[&dyn ToSql] = &[ch, bhh, &u64_to_sql(height)?, &tip.sortition_id]; + conn.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 + WHERE sortition_id = ?4", args) + .map_err(db_error::SqliteError)?; + Ok(()) + } + /// Get the maximum arrival index for any known snapshot. fn get_max_arrival_index(conn: &Connection) -> Result { match conn From bc716db03038581abba7099c0b9c9bcdfff147d7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:19:58 -0500 Subject: [PATCH 2/9] fix: a bad slot signature should be a distinct error --- stackslib/src/net/api/poststackerdbchunk.rs | 24 ++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index 190ba1f710..371a75b5a2 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -116,6 +116,7 @@ impl HttpRequest for RPCPostStackerDBChunkRequestHandler { pub enum StackerDBErrorCodes { DataAlreadyExists, NoSuchSlot, + BadSigner, } impl StackerDBErrorCodes { @@ -123,6 +124,7 @@ impl StackerDBErrorCodes { match self { Self::DataAlreadyExists => 0, Self::NoSuchSlot => 1, + Self::BadSigner => 2, } } @@ -130,6 +132,7 @@ impl StackerDBErrorCodes { match self { Self::DataAlreadyExists => "Data for this slot and version already exist", Self::NoSuchSlot => "No such StackerDB slot", + Self::BadSigner => "Signature does not match slot signer", } } @@ -183,11 +186,18 @@ impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { &HttpNotFound::new("StackerDB not found".to_string()), )); } - if let Err(_e) = tx.try_replace_chunk( + if let Err(e) = tx.try_replace_chunk( &contract_identifier, &stackerdb_chunk.get_slot_metadata(), &stackerdb_chunk.data, ) { + test_debug!( + "Failed to replace chunk {}.{} in {}: {:?}", + stackerdb_chunk.slot_id, + stackerdb_chunk.slot_version, + &contract_identifier, + &e + ); let slot_metadata_opt = match tx.get_slot_metadata(&contract_identifier, stackerdb_chunk.slot_id) { Ok(slot_opt) => slot_opt, @@ -209,11 +219,15 @@ impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { let (reason, slot_metadata_opt) = if let Some(slot_metadata) = slot_metadata_opt { + let code = if let NetError::BadSlotSigner(..) = e { + StackerDBErrorCodes::BadSigner + } else { + StackerDBErrorCodes::DataAlreadyExists + }; + ( - serde_json::to_string( - &StackerDBErrorCodes::DataAlreadyExists.into_json(), - ) - .unwrap_or("(unable to encode JSON)".to_string()), + serde_json::to_string(&code.into_json()) + .unwrap_or("(unable to encode JSON)".to_string()), Some(slot_metadata), ) } else { From 99c209c9f18f61521082d38d239a323c2efb262e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:22:46 -0500 Subject: [PATCH 3/9] fix: NACK getchunks and getchunksinv requests with NackErrorCodes::StaleView if the rc_consensus_hash doesn't match --- stackslib/src/net/chat.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 223b7e1bbd..7b1d4cc7f9 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1339,8 +1339,8 @@ impl ConversationP2P { self.update_from_stacker_db_handshake_data(stackerdb_accept); } else { // remote peer's burnchain view has diverged, so assume no longer replicating (we - // can't talk to it anyway). This can happen once per reward cycle for a few - // minutes as nodes begin the next reward cycle, but it's harmless -- at worst, it + // can't talk to it anyway). This can happen once per burnchain block for a few + // seconds as nodes begin processing the next Stacks blocks, but it's harmless -- at worst, it // just means that no stacker DB replication happens between this peer and // localhost during this time. self.clear_stacker_db_handshake_data(); @@ -1779,13 +1779,16 @@ impl ConversationP2P { let local_peer = network.get_local_peer(); let burnchain_view = network.get_chain_view(); + // remote peer's Stacks chain tip is different from ours, meaning it might have a different + // stackerdb configuration view (and we won't be able to authenticate their chunks, and + // vice versa) if burnchain_view.rc_consensus_hash != getchunkinv.rc_consensus_hash { debug!( "{:?}: NACK StackerDBGetChunkInv; {} != {}", local_peer, &burnchain_view.rc_consensus_hash, &getchunkinv.rc_consensus_hash ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::InvalidPoxFork, + NackErrorCodes::StaleView, ))); } @@ -1827,7 +1830,7 @@ impl ConversationP2P { local_peer, &burnchain_view.rc_consensus_hash, &getchunk.rc_consensus_hash ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::InvalidPoxFork, + NackErrorCodes::StaleView, ))); } From b259ba3e85b3ba579ec994f4d03d6210b73d3ef4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:23:15 -0500 Subject: [PATCH 4/9] fix: fix comments on rc_consensus_hash --- stackslib/src/net/mod.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2d806e2866..a5d516a3cf 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -975,6 +975,7 @@ pub mod NackErrorCodes { pub const InvalidMessage: u32 = 5; pub const NoSuchDB: u32 = 6; pub const StaleVersion: u32 = 7; + pub const StaleView: u32 = 8; } #[derive(Debug, Clone, PartialEq)] @@ -997,7 +998,9 @@ pub struct NatPunchData { /// Inform the remote peer of (a page of) the list of stacker DB contracts this node supports #[derive(Debug, Clone, PartialEq)] pub struct StackerDBHandshakeData { - /// current reward cycle ID + /// current reward cycle consensus hash (i.e. the consensus hash of the Stacks tip in the + /// current reward cycle, which commits to both the Stacks block tip and the underlying PoX + /// history). pub rc_consensus_hash: ConsensusHash, /// list of smart contracts that we index. /// there can be as many as 256 entries. @@ -1009,7 +1012,7 @@ pub struct StackerDBHandshakeData { pub struct StackerDBGetChunkInvData { /// smart contract being used to determine chunk quantity and order pub contract_id: QualifiedContractIdentifier, - /// consensus hash of the sortition that started this reward cycle + /// consensus hash of the Stacks chain tip in this reward cycle pub rc_consensus_hash: ConsensusHash, } @@ -1028,7 +1031,7 @@ pub struct StackerDBChunkInvData { pub struct StackerDBGetChunkData { /// smart contract being used to determine slot quantity and order pub contract_id: QualifiedContractIdentifier, - /// consensus hash of the sortition that started this reward cycle + /// consensus hash of the Stacks chain tip in this reward cycle pub rc_consensus_hash: ConsensusHash, /// slot ID pub slot_id: u32, @@ -1041,7 +1044,7 @@ pub struct StackerDBGetChunkData { pub struct StackerDBPushChunkData { /// smart contract being used to determine chunk quantity and order pub contract_id: QualifiedContractIdentifier, - /// consensus hash of the sortition that started this reward cycle + /// consensus hash of the Stacks chain tip in this reward cycle pub rc_consensus_hash: ConsensusHash, /// the pushed chunk pub chunk_data: StackerDBChunkData, From 8e049789f55740e389107f705ca6bdbe1aac6987 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:23:29 -0500 Subject: [PATCH 5/9] fix: force an initial burnchain view load for the p2p network if it hasn't completed a full state-machine pass yet --- stackslib/src/net/p2p.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 3e182ddf3c..074f3ee4ce 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5224,7 +5224,12 @@ impl PeerNetwork { // update burnchain snapshot if we need to (careful -- it's expensive) let sn = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn())?; let mut ret: HashMap> = HashMap::new(); - if sn.block_height != self.chain_view.burn_block_height { + let mut need_stackerdb_refresh = sn.canonical_stacks_tip_consensus_hash + != self.burnchain_tip.canonical_stacks_tip_consensus_hash; + + if sn.block_height != self.chain_view.burn_block_height + || self.num_state_machine_passes == 0 + { debug!( "{:?}: load chain view for burn block {}", &self.local_peer, sn.block_height @@ -5303,7 +5308,17 @@ impl PeerNetwork { .get_last_selected_anchor_block_txid()? .unwrap_or(Txid([0x00; 32])); - // refresh stackerdb configs + test_debug!( + "{:?}: chain view is {:?}", + &self.get_local_peer(), + &self.chain_view + ); + need_stackerdb_refresh = true; + } + + if need_stackerdb_refresh { + // refresh stackerdb configs -- canonical stacks tip has changed + debug!("{:?}: Refresh all stackerdbs", &self.get_local_peer()); let mut new_stackerdb_configs = HashMap::new(); let stacker_db_configs = mem::replace(&mut self.stacker_db_configs, HashMap::new()); for (stackerdb_contract_id, stackerdb_config) in stacker_db_configs.into_iter() { From ec918217bfe18806f627ea292a48509d039e31e8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:23:53 -0500 Subject: [PATCH 6/9] feat: test neighbors with stale views --- stackslib/src/net/stackerdb/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index b37fde4e10..bdba2aca94 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -151,6 +151,8 @@ pub struct StackerDBSyncResult { dead: HashSet, /// neighbors that misbehaved while syncing broken: HashSet, + /// neighbors that have stale views, but are otherwise online + pub(crate) stale: HashSet, } /// Settings for the Stacker DB @@ -262,6 +264,8 @@ pub struct StackerDBSync { /// whether or not we should immediately re-fetch chunks because we learned about new chunks /// from our peers when they replied to our chunk-pushes with new inventory state need_resync: bool, + /// Track stale neighbors + pub(crate) stale_neighbors: HashSet, } impl StackerDBSyncResult { @@ -274,6 +278,7 @@ impl StackerDBSyncResult { chunks_to_store: vec![chunk.chunk_data], dead: HashSet::new(), broken: HashSet::new(), + stale: HashSet::new(), } } } From adba5786dc1da70d9f1a815ddd39db7176039a80 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:24:12 -0500 Subject: [PATCH 7/9] feat: track neighbors with stale views --- stackslib/src/net/stackerdb/sync.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index d01d4ff03f..0d10fc3217 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -33,9 +33,9 @@ use crate::net::stackerdb::{ StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBSyncState, StackerDBs, }; use crate::net::{ - Error as net_error, NackData, Neighbor, NeighborAddress, NeighborKey, StackerDBChunkData, - StackerDBChunkInvData, StackerDBGetChunkData, StackerDBGetChunkInvData, StackerDBPushChunkData, - StacksMessageType, + Error as net_error, NackData, NackErrorCodes, Neighbor, NeighborAddress, NeighborKey, + StackerDBChunkData, StackerDBChunkInvData, StackerDBGetChunkData, StackerDBGetChunkInvData, + StackerDBPushChunkData, StacksMessageType, }; const MAX_CHUNKS_IN_FLIGHT: usize = 6; @@ -72,6 +72,7 @@ impl StackerDBSync { total_pushed: 0, last_run_ts: 0, need_resync: false, + stale_neighbors: HashSet::new(), }; dbsync.reset(None, config); dbsync @@ -178,6 +179,7 @@ impl StackerDBSync { chunks_to_store: chunks, dead: self.comms.take_dead_neighbors(), broken: self.comms.take_broken_neighbors(), + stale: std::mem::replace(&mut self.stale_neighbors, HashSet::new()), }; // keep all connected replicas, and replenish from config hints and the DB as needed @@ -677,6 +679,7 @@ impl StackerDBSync { &network.get_chain_view().rc_consensus_hash, &db_data.rc_consensus_hash ); + self.connected_replicas.remove(&naddr); continue; } db_data @@ -688,6 +691,10 @@ impl StackerDBSync { &naddr, data.error_code ); + self.connected_replicas.remove(&naddr); + if data.error_code == NackErrorCodes::StaleView { + self.stale_neighbors.insert(naddr); + } continue; } x => { @@ -800,10 +807,15 @@ impl StackerDBSync { &naddr, data.error_code ); + self.connected_replicas.remove(&naddr); + if data.error_code == NackErrorCodes::StaleView { + self.stale_neighbors.insert(naddr); + } continue; } x => { info!("Received unexpected message {:?}", &x); + self.connected_replicas.remove(&naddr); continue; } }; @@ -929,10 +941,14 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); + if data.error_code == NackErrorCodes::StaleView { + self.stale_neighbors.insert(naddr); + } continue; } x => { info!("Received unexpected message {:?}", &x); + self.connected_replicas.remove(&naddr); continue; } }; @@ -1072,6 +1088,9 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); + if data.error_code == NackErrorCodes::StaleView { + self.stale_neighbors.insert(naddr); + } continue; } x => { From 4eb625a8ffa1ea1092b8b38de01b65eea2f75110 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 14:24:25 -0500 Subject: [PATCH 8/9] chore: test that a peer with a stale view will not be acknowledged, but it will once its view converges --- stackslib/src/net/stackerdb/tests/sync.rs | 196 +++++++++++++++++++++- 1 file changed, 195 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 7e1c5f15da..544208bf0f 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -26,11 +26,12 @@ use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::net::relay::Relayer; use crate::net::stackerdb::db::SlotValidation; use crate::net::stackerdb::{StackerDBConfig, StackerDBs}; @@ -280,6 +281,199 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { }) } +#[test] +fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { + with_timeout(600, || { + std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 2); + + peer_1_config.allowed = -1; + peer_2_config.allowed = -1; + + // short-lived walks... + peer_1_config.connection_opts.walk_max_duration = 10; + peer_2_config.connection_opts.walk_max_duration = 10; + + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + + // set up stacker DBs for both peers + let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); + let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + // peer 1 gets the DB + setup_stackerdb(&mut peer_1, idx_1, true, 1); + setup_stackerdb(&mut peer_2, idx_2, false, 1); + + // verify that peer 1 got the data + let peer_1_db_chunks = load_stackerdb(&peer_1, idx_1); + assert_eq!(peer_1_db_chunks.len(), 1); + assert_eq!(peer_1_db_chunks[0].0.slot_id, 0); + assert_eq!(peer_1_db_chunks[0].0.slot_version, 1); + assert!(peer_1_db_chunks[0].1.len() > 0); + + // verify that peer 2 did NOT get the data + let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); + assert_eq!(peer_2_db_chunks.len(), 1); + assert_eq!(peer_2_db_chunks[0].0.slot_id, 0); + assert_eq!(peer_2_db_chunks[0].0.slot_version, 0); + assert!(peer_2_db_chunks[0].1.len() == 0); + + let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); + let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); + + // force peer 2 to have a stale view + let (old_tip_ch, old_tip_bh) = { + let sortdb = peer_1.sortdb(); + let (tip_bh, tip_ch) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); + SortitionDB::set_canonical_stacks_chain_tip( + sortdb.conn(), + &ConsensusHash([0x22; 20]), + &BlockHeaderHash([0x33; 32]), + 45, + ) + .unwrap(); + (tip_bh, tip_ch) + }; + + let mut i = 0; + let mut peer_1_stale = false; + let mut peer_2_stale = false; + loop { + // run peer network state-machines + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); + + if let Ok(mut res) = res_1 { + for sync_res in res.stacker_db_sync_results.iter() { + assert_eq!(sync_res.chunks_to_store.len(), 0); + if sync_res.stale.len() > 0 { + peer_1_stale = true; + } + } + Relayer::process_stacker_db_chunks( + &mut peer_1.network.stackerdbs, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + Relayer::process_pushed_stacker_db_chunks( + &mut peer_1.network.stackerdbs, + &peer_1_db_configs, + &mut res.unhandled_messages, + None, + ) + .unwrap(); + } + + if let Ok(mut res) = res_2 { + for sync_res in res.stacker_db_sync_results.iter() { + assert_eq!(sync_res.chunks_to_store.len(), 0); + if sync_res.stale.len() > 0 { + peer_2_stale = true; + } + } + Relayer::process_stacker_db_chunks( + &mut peer_2.network.stackerdbs, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + Relayer::process_pushed_stacker_db_chunks( + &mut peer_2.network.stackerdbs, + &peer_2_db_configs, + &mut res.unhandled_messages, + None, + ) + .unwrap(); + } + + if peer_1_stale && peer_2_stale { + break; + } + + i += 1; + } + + debug!("Completed stacker DB stale detection in {} step(s)", i); + + // fix and re-run + { + let sortdb = peer_1.sortdb(); + SortitionDB::set_canonical_stacks_chain_tip(sortdb.conn(), &old_tip_ch, &old_tip_bh, 0) + .unwrap(); + + // force chain view refresh + peer_1.network.num_state_machine_passes = 0; + } + + let mut i = 0; + loop { + // run peer network state-machines + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); + + if let Ok(mut res) = res_1 { + Relayer::process_stacker_db_chunks( + &mut peer_1.network.stackerdbs, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + Relayer::process_pushed_stacker_db_chunks( + &mut peer_1.network.stackerdbs, + &peer_1_db_configs, + &mut res.unhandled_messages, + None, + ) + .unwrap(); + } + + if let Ok(mut res) = res_2 { + Relayer::process_stacker_db_chunks( + &mut peer_2.network.stackerdbs, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + Relayer::process_pushed_stacker_db_chunks( + &mut peer_2.network.stackerdbs, + &peer_2_db_configs, + &mut res.unhandled_messages, + None, + ) + .unwrap(); + } + + let db1 = load_stackerdb(&peer_1, idx_1); + let db2 = load_stackerdb(&peer_2, idx_2); + + if db1 == db2 { + break; + } + i += 1; + } + + debug!("Completed stacker DB sync in {} step(s)", i); + }) +} + #[test] #[ignore] fn test_stackerdb_replica_2_neighbors_10_chunks() { From e97b2441efd395216902decc0e28eef2d08bd2be Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Feb 2024 15:17:05 -0500 Subject: [PATCH 9/9] fix: instantiate burnchain DB earlier in the test framework, since the p2p network needs it to exist --- testnet/stacks-node/src/node.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index b63b4ddbc1..fd049ee5cc 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -346,6 +346,15 @@ impl Node { } let burnchain_config = config.get_burnchain(); + + // instantiate DBs + let _burnchain_db = BurnchainDB::connect( + &burnchain_config.get_burnchaindb_path(), + &burnchain_config, + true, + ) + .expect("FATAL: failed to connect to burnchain DB"); + run_loop::announce_boot_receipts( &mut event_dispatcher, &chain_state, @@ -526,6 +535,7 @@ impl Node { let consensus_hash = burnchain_tip.block_snapshot.consensus_hash; let burnchain = self.config.get_burnchain(); + let sortdb = SortitionDB::open( &self.config.get_burn_db_file_path(), true,