From 2f7aedd5bac2778a0cc0da6bd61936c6ba503dbc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:20:40 -0400 Subject: [PATCH 01/20] chore: move existing relayer tests into net/tests/relay/epoch2x.rs, to keep them separate from new Nakamoto functionality --- stackslib/src/net/tests/relay/epoch2x.rs | 3719 ++++++++++++++++++++++ stackslib/src/net/tests/relay/mod.rs | 18 + 2 files changed, 3737 insertions(+) create mode 100644 stackslib/src/net/tests/relay/epoch2x.rs create mode 100644 stackslib/src/net/tests/relay/mod.rs diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs new file mode 100644 index 0000000000..e7409ac779 --- /dev/null +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -0,0 +1,3719 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::collections::HashMap; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; +use rand::Rng; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; +use stacks_common::types::Address; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; + +use crate::burnchains::tests::TestMiner; +use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::tests::{ + make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_user_stacks_transfer, +}; +use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::*; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::asn::*; +use crate::net::chat::*; +use crate::net::codec::*; +use crate::net::db::PeerDB; +use crate::net::download::*; +use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; +use crate::net::httpcore::StacksHttpMessage; +use crate::net::inv::inv2x::*; +use crate::net::p2p::*; +use crate::net::relay::*; +use crate::net::test::*; +use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; +use crate::net::{Error as net_error, *}; +use crate::util_lib::test::*; + +#[test] +fn test_sample_neighbors() { + let neighbors: Vec<_> = (0..10) + .map(|i| { + let nk = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: i, + }; + nk + }) + .collect(); + + let neighbors_set: HashSet<_> = neighbors.clone().into_iter().collect(); + + let empty_distribution: HashMap = HashMap::new(); + + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 1).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 5).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 10).len(), + 0 + ); + + let flat_distribution: HashMap<_, _> = neighbors.iter().map(|nk| (nk.clone(), 1)).collect(); + + assert_eq!( + RelayerStats::sample_neighbors(flat_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(flat_distribution.clone(), 1).len(), + 1 + ); + + let flat_full_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 10) + .into_iter() + .collect(); + + assert_eq!(flat_full_sample_set, neighbors_set); + + let flat_partial_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 5) + .into_iter() + .collect(); + + assert_eq!(flat_partial_sample_set.len(), 5); + + let flat_unit_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 1) + .into_iter() + .collect(); + + assert_eq!(flat_unit_sample_set.len(), 1); + + let biased_distribution: HashMap<_, _> = neighbors + .iter() + .enumerate() + .map(|(i, nk)| (nk.clone(), if i == 0 { 10 } else { 1 })) + .collect(); + + assert_eq!( + RelayerStats::sample_neighbors(biased_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(biased_distribution.clone(), 1).len(), + 1 + ); + + let flat_full_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 10) + .into_iter() + .collect(); + + assert_eq!(flat_full_sample_set, neighbors_set); + + let flat_partial_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 5) + .into_iter() + .collect(); + + assert_eq!(flat_partial_sample_set.len(), 5); + + let flat_unit_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 1) + .into_iter() + .collect(); + + assert_eq!(flat_unit_sample_set.len(), 1); +} + +#[test] +fn test_relayer_stats_add_relyed_messages() { + let mut relay_stats = RelayerStats::new(); + + let all_transactions = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + assert!(all_transactions.len() > MAX_RECENT_MESSAGES); + + eprintln!("Test with {} transactions", all_transactions.len()); + + let nk = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + }; + + // never overflow recent messages for a neighbor + for (i, tx) in all_transactions.iter().enumerate() { + relay_stats.add_relayed_message(nk.clone(), tx); + + assert_eq!(relay_stats.recent_messages.len(), 1); + assert!(relay_stats.recent_messages.get(&nk).unwrap().len() <= MAX_RECENT_MESSAGES); + + assert_eq!(relay_stats.recent_updates.len(), 1); + } + + assert_eq!( + relay_stats.recent_messages.get(&nk).unwrap().len(), + MAX_RECENT_MESSAGES + ); + + for i in (all_transactions.len() - MAX_RECENT_MESSAGES)..MAX_RECENT_MESSAGES { + let digest = all_transactions[i].get_digest(); + let mut found = false; + for (_, hash) in relay_stats.recent_messages.get(&nk).unwrap().iter() { + found = found || (*hash == digest); + } + if !found { + assert!(false); + } + } + + // never overflow number of neighbors tracked + for i in 0..(MAX_RELAYER_STATS + 1) { + let mut new_nk = nk.clone(); + new_nk.peer_version += i as u32; + + relay_stats.add_relayed_message(new_nk, &all_transactions[0]); + + assert!(relay_stats.recent_updates.len() <= i + 1); + assert!(relay_stats.recent_updates.len() <= MAX_RELAYER_STATS); + } +} + +#[test] +fn test_relayer_merge_stats() { + let mut relayer_stats = RelayerStats::new(); + + let na = NeighborAddress { + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + public_key_hash: Hash160([0u8; 20]), + }; + + let relay_stats = RelayStats { + num_messages: 1, + num_bytes: 1, + last_seen: 1, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, 1); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + let now = get_epoch_time_secs() + 60; + + let relay_stats_2 = RelayStats { + num_messages: 2, + num_bytes: 2, + last_seen: now, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats_2.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); + assert!( + relayer_stats.relay_stats.get(&na).unwrap().last_seen < now + && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() + ); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + let relay_stats_3 = RelayStats { + num_messages: 3, + num_bytes: 3, + last_seen: 0, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats_3.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); + assert!( + relayer_stats.relay_stats.get(&na).unwrap().last_seen < now + && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() + ); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + for i in 0..(MAX_RELAYER_STATS + 1) { + let na = NeighborAddress { + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 14321 + (i as u16), + public_key_hash: Hash160([0u8; 20]), + }; + + let now = get_epoch_time_secs() + (i as u64) + 1; + + let relay_stats = RelayStats { + num_messages: 1, + num_bytes: 1, + last_seen: now, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats.clone()); + + relayer_stats.merge_relay_stats(rs); + assert!(relayer_stats.relay_stats.len() <= MAX_RELAYER_STATS); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, now); + } +} + +#[test] +fn test_relay_inbound_peer_rankings() { + let mut relay_stats = RelayerStats::new(); + + let all_transactions = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + assert!(all_transactions.len() > MAX_RECENT_MESSAGES); + + let nk_1 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + }; + + let nk_2 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54322, + }; + + let nk_3 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54323, + }; + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 0); + + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 1); + assert_eq!(*dups.get(&nk_1).unwrap(), 3); + + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 2); + assert_eq!(*dups.get(&nk_1).unwrap(), 3); + assert_eq!(*dups.get(&nk_2).unwrap(), 4); + + // total dups == 7 + let dist = relay_stats.get_inbound_relay_rankings( + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + &all_transactions[0], + 0, + ); + assert_eq!(*dist.get(&nk_1).unwrap(), 7 - 3 + 1); + assert_eq!(*dist.get(&nk_2).unwrap(), 7 - 4 + 1); + assert_eq!(*dist.get(&nk_3).unwrap(), 7 + 1); + + // high warmup period + let dist = relay_stats.get_inbound_relay_rankings( + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + &all_transactions[0], + 100, + ); + assert_eq!(*dist.get(&nk_1).unwrap(), 100 + 1); + assert_eq!(*dist.get(&nk_2).unwrap(), 100 + 1); + assert_eq!(*dist.get(&nk_3).unwrap(), 100 + 1); +} + +#[test] +fn test_relay_outbound_peer_rankings() { + let relay_stats = RelayerStats::new(); + + let asn1 = ASEntry4 { + prefix: 0x10000000, + mask: 8, + asn: 1, + org: 1, + }; + + let asn2 = ASEntry4 { + prefix: 0x20000000, + mask: 8, + asn: 2, + org: 2, + }; + + let nk_1 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x10, 0x11, 0x12, 0x13, + ]), + port: 54321, + }; + + let nk_2 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x23, + ]), + port: 54322, + }; + + let nk_3 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x24, + ]), + port: 54323, + }; + + let n1 = Neighbor { + addr: nk_1.clone(), + public_key: Secp256k1PublicKey::from_hex( + "0260569384baa726f877d47045931e5310383f18d0b243a9b6c095cee6ef19abd6", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 1, + org: 1, + in_degree: 0, + out_degree: 0, + }; + + let n2 = Neighbor { + addr: nk_2.clone(), + public_key: Secp256k1PublicKey::from_hex( + "02465f9ff58dfa8e844fec86fa5fc3fd59c75ea807e20d469b0a9f885d2891fbd4", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 2, + org: 2, + in_degree: 0, + out_degree: 0, + }; + + let n3 = Neighbor { + addr: nk_3.clone(), + public_key: Secp256k1PublicKey::from_hex( + "032d8a1ea2282c1514fdc1a6f21019561569d02a225cf7c14b4f803b0393cef031", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 2, + org: 2, + in_degree: 0, + out_degree: 0, + }; + + let peerdb = PeerDB::connect_memory( + 0x80000000, + 0, + 4032, + UrlString::try_from("http://foo.com").unwrap(), + &vec![asn1, asn2], + &vec![n1.clone(), n2.clone(), n3.clone()], + ) + .unwrap(); + + let asn_count = RelayerStats::count_ASNs( + peerdb.conn(), + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + ) + .unwrap(); + assert_eq!(asn_count.len(), 3); + assert_eq!(*asn_count.get(&nk_1).unwrap(), 1); + assert_eq!(*asn_count.get(&nk_2).unwrap(), 2); + assert_eq!(*asn_count.get(&nk_3).unwrap(), 2); + + let ranking = relay_stats + .get_outbound_relay_rankings(&peerdb, &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()]) + .unwrap(); + assert_eq!(ranking.len(), 3); + assert_eq!(*ranking.get(&nk_1).unwrap(), 5 - 1 + 1); + assert_eq!(*ranking.get(&nk_2).unwrap(), 5 - 2 + 1); + assert_eq!(*ranking.get(&nk_3).unwrap(), 5 - 2 + 1); + + let ranking = relay_stats + .get_outbound_relay_rankings(&peerdb, &vec![nk_2.clone(), nk_3.clone()]) + .unwrap(); + assert_eq!(ranking.len(), 2); + assert_eq!(*ranking.get(&nk_2).unwrap(), 4 - 2 + 1); + assert_eq!(*ranking.get(&nk_3).unwrap(), 4 - 2 + 1); +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_3_peers_push_available() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_3_peers_push_available", + 4200, + 3, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 3); + + // peer 0 produces the blocks + peer_configs[0].connection_opts.disable_chat_neighbors = true; + + // peer 1 downloads the blocks from peer 0, and sends + // BlocksAvailable and MicroblocksAvailable messages to + // peer 2. + peer_configs[1].connection_opts.disable_chat_neighbors = true; + + // peer 2 learns about the blocks and microblocks from peer 1's + // BlocksAvaiable and MicroblocksAvailable messages, but + // not from inv syncs. + peer_configs[2].connection_opts.disable_chat_neighbors = true; + peer_configs[2].connection_opts.disable_inv_sync = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + peer_configs[2].connection_opts.disable_natpunch = true; + + // do not push blocks and microblocks; only announce them + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[2].connection_opts.disable_block_push = true; + + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + peer_configs[2].connection_opts.disable_microblock_push = true; + + // generous timeouts + peer_configs[0].connection_opts.connect_timeout = 180; + peer_configs[1].connection_opts.connect_timeout = 180; + peer_configs[2].connection_opts.connect_timeout = 180; + peer_configs[0].connection_opts.timeout = 180; + peer_configs[1].connection_opts.timeout = 180; + peer_configs[2].connection_opts.timeout = 180; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + let peer_2 = peer_configs[2].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + peer_configs[2].add_neighbor(&peer_1); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + // only produce blocks for a single reward + // cycle, since pushing block/microblock + // announcements in reward cycles the remote + // peer doesn't know about won't work. + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + assert_eq!(block_data.len(), 5); + + block_data + }, + |ref mut peers| { + // make sure peer 2's inv has an entry for peer 1, even + // though it's not doing an inv sync. This is required for the downloader to + // work, and for (Micro)BlocksAvailable messages to be accepted + let peer_1_nk = peers[1].to_neighbor().addr; + let peer_2_nk = peers[2].to_neighbor().addr; + let bc = peers[1].config.burnchain.clone(); + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { + stats.scans = 1; + stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); + stats.inv.merge_blocks_inv( + 0, + 30, + vec![0, 0, 0, 0, 0], + vec![0, 0, 0, 0, 0], + false, + ); + } else { + panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); + } + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + let peer_1_nk = peers[1].to_neighbor().addr; + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + + inv_state + .get_stats_mut(&peer_1_nk) + .unwrap() + .inv + .num_reward_cycles = this_reward_cycle; + inv_state.get_stats_mut(&peer_1_nk).unwrap().inv.pox_inv = vec![0x3f]; + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 2"); + } + } + + // peer 2 should never see a BlocksInv + // message. That would imply it asked for an inv + for (_, convo) in peers[2].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // TODO + true + }, + |_| true, + ); + }) +} + +fn is_peer_connected(peer: &TestPeer, dest: &NeighborKey) -> bool { + let event_id = match peer.network.events.get(dest) { + Some(evid) => *evid, + None => { + return false; + } + }; + + match peer.network.peers.get(&event_id) { + Some(convo) => { + return convo.is_authenticated(); + } + None => { + return false; + } + } +} + +fn push_message( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + msg: StacksMessageType, +) -> bool { + let event_id = match peer.network.events.get(dest) { + Some(evid) => *evid, + None => { + panic!("Unreachable peer: {:?}", dest); + } + }; + + let relay_msg = match peer.network.peers.get_mut(&event_id) { + Some(convo) => convo + .sign_relay_message( + &peer.network.local_peer, + &peer.network.chain_view, + relay_hints, + msg, + ) + .unwrap(), + None => { + panic!("No such event ID {} from neighbor {}", event_id, dest); + } + }; + + match peer.network.relay_signed_message(dest, relay_msg.clone()) { + Ok(_) => { + return true; + } + Err(net_error::OutboxOverflow) => { + test_debug!( + "{:?} outbox overflow; try again later", + &peer.to_neighbor().addr + ); + return false; + } + Err(net_error::SendError(msg)) => { + warn!( + "Failed to send to {:?}: SendError({})", + &peer.to_neighbor().addr, + msg + ); + return false; + } + Err(e) => { + test_debug!( + "{:?} encountered fatal error when forwarding: {:?}", + &peer.to_neighbor().addr, + &e + ); + assert!(false); + unreachable!(); + } + } +} + +fn http_rpc(peer_http: u16, request: StacksHttpRequest) -> Result { + use std::net::TcpStream; + + let mut sock = TcpStream::connect( + &format!("127.0.0.1:{}", peer_http) + .parse::() + .unwrap(), + ) + .unwrap(); + + let request_bytes = request.try_serialize().unwrap(); + match sock.write_all(&request_bytes) { + Ok(_) => {} + Err(e) => { + test_debug!("Client failed to write: {:?}", &e); + return Err(net_error::WriteError(e)); + } + } + + let mut resp = vec![]; + match sock.read_to_end(&mut resp) { + Ok(_) => { + if resp.len() == 0 { + test_debug!("Client did not receive any data"); + return Err(net_error::PermanentlyDrained); + } + } + Err(e) => { + test_debug!("Client failed to read: {:?}", &e); + return Err(net_error::ReadError(e)); + } + } + + test_debug!("Client received {} bytes", resp.len()); + let response = StacksHttp::parse_response( + &request.preamble().verb, + &request.preamble().path_and_query_str, + &resp, + ) + .unwrap(); + match response { + StacksHttpMessage::Response(x) => Ok(x), + _ => { + panic!("Did not receive a Response"); + } + } +} + +pub fn broadcast_message( + broadcaster: &mut TestPeer, + relay_hints: Vec, + msg: StacksMessageType, +) -> bool { + let request = NetworkRequest::Broadcast(relay_hints, msg); + match broadcaster.network.dispatch_request(request) { + Ok(_) => true, + Err(e) => { + error!("Failed to broadcast: {:?}", &e); + false + } + } +} + +fn push_block( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block: StacksBlock, +) -> bool { + test_debug!( + "{:?}: Push block {}/{} to {:?}", + peer.to_neighbor().addr, + &consensus_hash, + block.block_hash(), + dest + ); + + let sn = SortitionDB::get_block_snapshot_consensus( + peer.sortdb.as_ref().unwrap().conn(), + &consensus_hash, + ) + .unwrap() + .unwrap(); + let consensus_hash = sn.consensus_hash; + + let msg = StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + }); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_block( + peer: &mut TestPeer, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block: StacksBlock, +) -> bool { + test_debug!( + "{:?}: Broadcast block {}/{}", + peer.to_neighbor().addr, + &consensus_hash, + block.block_hash(), + ); + + let sn = SortitionDB::get_block_snapshot_consensus( + peer.sortdb.as_ref().unwrap().conn(), + &consensus_hash, + ) + .unwrap() + .unwrap(); + let consensus_hash = sn.consensus_hash; + + let msg = StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + }); + broadcast_message(peer, relay_hints, msg) +} + +fn push_microblocks( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block_hash: BlockHeaderHash, + microblocks: Vec, +) -> bool { + test_debug!( + "{:?}: Push {} microblocksblock {}/{} to {:?}", + peer.to_neighbor().addr, + microblocks.len(), + &consensus_hash, + &block_hash, + dest + ); + let msg = StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), + microblocks: microblocks, + }); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_microblocks( + peer: &mut TestPeer, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block_hash: BlockHeaderHash, + microblocks: Vec, +) -> bool { + test_debug!( + "{:?}: broadcast {} microblocksblock {}/{}", + peer.to_neighbor().addr, + microblocks.len(), + &consensus_hash, + &block_hash, + ); + let msg = StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), + microblocks: microblocks, + }); + broadcast_message(peer, relay_hints, msg) +} + +fn push_transaction( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + tx: StacksTransaction, +) -> bool { + test_debug!( + "{:?}: Push tx {} to {:?}", + peer.to_neighbor().addr, + tx.txid(), + dest + ); + let msg = StacksMessageType::Transaction(tx); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_transaction( + peer: &mut TestPeer, + relay_hints: Vec, + tx: StacksTransaction, +) -> bool { + test_debug!("{:?}: broadcast tx {}", peer.to_neighbor().addr, tx.txid(),); + let msg = StacksMessageType::Transaction(tx); + broadcast_message(peer, relay_hints, msg) +} + +fn http_get_info(http_port: u16) -> RPCPeerInfoData { + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "GET".to_string(), + "/v2/info".to_string(), + ); + request.keep_alive = false; + let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); + let response = http_rpc(http_port, getinfo).unwrap(); + let peer_info = response.decode_peer_info().unwrap(); + peer_info +} + +fn http_post_block(http_port: u16, consensus_hash: &ConsensusHash, block: &StacksBlock) -> bool { + test_debug!( + "upload block {}/{} to localhost:{}", + consensus_hash, + block.block_hash(), + http_port + ); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/blocks".to_string(), + ); + request.keep_alive = false; + let post_block = + StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); + + let response = http_rpc(http_port, post_block).unwrap(); + let accepted = response.decode_stacks_block_accepted().unwrap(); + accepted.accepted +} + +fn http_post_microblock( + http_port: u16, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + mblock: &StacksMicroblock, +) -> bool { + test_debug!( + "upload microblock {}/{}-{} to localhost:{}", + consensus_hash, + block_hash, + mblock.block_hash(), + http_port + ); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/microblocks".to_string(), + ); + request.keep_alive = false; + let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let post_microblock = StacksHttpRequest::new( + request, + HttpRequestContents::new() + .payload_stacks(mblock) + .for_specific_tip(tip), + ); + + let response = http_rpc(http_port, post_microblock).unwrap(); + let payload = response.get_http_payload_ok().unwrap(); + let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); + return true; +} + +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( + outbound_test: bool, + disable_push: bool, +) { + with_timeout(600, move || { + let original_blocks_and_microblocks = RefCell::new(vec![]); + let blocks_and_microblocks = RefCell::new(vec![]); + let idx = RefCell::new(0); + let sent_blocks = RefCell::new(false); + let sent_microblocks = RefCell::new(false); + + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks", + 4210, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 produces the blocks and pushes them to peer 1 + // peer 1 receives the blocks and microblocks. It + // doesn't download them, nor does it try to get invs + peer_configs[0].connection_opts.disable_block_advertisement = true; + + peer_configs[1].connection_opts.disable_inv_sync = true; + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // force usage of blocksavailable/microblocksavailable? + if disable_push { + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + } + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + + if outbound_test { + // neighbor relationship is symmetric -- peer 1 has an outbound connection + // to peer 0. + peer_configs[1].add_neighbor(&peer_0); + } + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + let saved_copy: Vec<(ConsensusHash, StacksBlock, Vec)> = + block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| { + (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) + }) + .collect(); + *blocks_and_microblocks.borrow_mut() = saved_copy.clone(); + *original_blocks_and_microblocks.borrow_mut() = saved_copy; + block_data + }, + |ref mut peers| { + if !disable_push { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = + BurnchainHeaderHash([0u8; 32]); + } + } + + // make sure peer 1's inv has an entry for peer 0, even + // though it's not doing an inv sync. This is required for the downloader to + // work + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + match peers[1].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_0_nk).is_none() { + test_debug!("initialize inv statistics for peer 0 in peer 1"); + inv_state.add_peer(peer_0_nk.clone(), true); + } else { + test_debug!("peer 1 has inv state for peer 0"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + if is_peer_connected(&peers[0], &peer_1_nk) { + // randomly push a block and/or microblocks to peer 1. + let mut block_data = blocks_and_microblocks.borrow_mut(); + let original_block_data = original_blocks_and_microblocks.borrow(); + let mut next_idx = idx.borrow_mut(); + let data_to_push = { + if block_data.len() > 0 { + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) + } else { + // start over (can happen if a message gets + // dropped due to a timeout) + test_debug!("Reset block transmission (possible timeout)"); + *block_data = (*original_block_data).clone(); + *next_idx = thread_rng().gen::() % block_data.len(); + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) + } + }; + + if let Some((consensus_hash, block, microblocks)) = data_to_push { + test_debug!( + "Push block {}/{} and microblocks", + &consensus_hash, + block.block_hash() + ); + + let block_hash = block.block_hash(); + let mut sent_blocks = sent_blocks.borrow_mut(); + let mut sent_microblocks = sent_microblocks.borrow_mut(); + + let pushed_block = if !*sent_blocks { + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + consensus_hash.clone(), + block, + ) + } else { + true + }; + + *sent_blocks = pushed_block; + + if pushed_block { + let pushed_microblock = if !*sent_microblocks { + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + consensus_hash, + block_hash, + microblocks, + ) + } else { + true + }; + + *sent_microblocks = pushed_microblock; + + if pushed_block && pushed_microblock { + block_data.remove(*next_idx); + if block_data.len() > 0 { + *next_idx = thread_rng().gen::() % block_data.len(); + } + *sent_blocks = false; + *sent_microblocks = false; + } + } + test_debug!("{} blocks/microblocks remaining", block_data.len()); + } + } + + // peer 0 should never see a GetBlocksInv message. + // peer 1 should never see a BlocksInv message + for (_, convo) in peers[0].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::GetBlocksInv), + 0 + ); + } + for (_, convo) in peers[1].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_upload_blocks_http() { + with_timeout(600, || { + let (port_sx, port_rx) = std::sync::mpsc::sync_channel(1); + let (block_sx, block_rx) = std::sync::mpsc::sync_channel(1); + + std::thread::spawn(move || loop { + eprintln!("Get port"); + let remote_port: u16 = port_rx.recv().unwrap(); + eprintln!("Got port {}", remote_port); + + eprintln!("Send getinfo"); + let peer_info = http_get_info(remote_port); + eprintln!("Got getinfo! {:?}", &peer_info); + let idx = peer_info.stacks_tip_height as usize; + + eprintln!("Get blocks and microblocks"); + let blocks_and_microblocks: Vec<( + ConsensusHash, + Option, + Option>, + )> = block_rx.recv().unwrap(); + eprintln!("Got blocks and microblocks!"); + + if idx >= blocks_and_microblocks.len() { + eprintln!("Out of blocks to send!"); + return; + } + + eprintln!( + "Upload block {}", + &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash() + ); + http_post_block( + remote_port, + &blocks_and_microblocks[idx].0, + blocks_and_microblocks[idx].1.as_ref().unwrap(), + ); + for mblock in blocks_and_microblocks[idx].2.as_ref().unwrap().iter() { + eprintln!("Upload microblock {}", mblock.block_hash()); + http_post_microblock( + remote_port, + &blocks_and_microblocks[idx].0, + &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash(), + mblock, + ); + } + }); + + let original_blocks_and_microblocks = RefCell::new(vec![]); + let port_sx_cell = RefCell::new(port_sx); + let block_sx_cell = RefCell::new(block_sx); + + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_upload_blocks_http", + 4250, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 produces the blocks + peer_configs[0].connection_opts.disable_chat_neighbors = true; + + // peer 0 sends them to peer 1 + peer_configs[1].connection_opts.disable_chat_neighbors = true; + peer_configs[1].connection_opts.disable_inv_sync = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // generous timeouts + peer_configs[0].connection_opts.timeout = 180; + peer_configs[1].connection_opts.timeout = 180; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + // only produce blocks for a single reward + // cycle, since pushing block/microblock + // announcements in reward cycles the remote + // peer doesn't know about won't work. + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + assert_eq!(block_data.len(), 5); + + *original_blocks_and_microblocks.borrow_mut() = block_data.clone(); + + block_data + }, + |ref mut peers| { + let blocks_and_microblocks = original_blocks_and_microblocks.borrow().clone(); + let remote_port = peers[1].config.http_port; + + let port_sx = port_sx_cell.borrow_mut(); + let block_sx = block_sx_cell.borrow_mut(); + + let _ = (*port_sx).try_send(remote_port); + let _ = (*block_sx).try_send(blocks_and_microblocks); + }, + |ref peer| { + // check peer health + // TODO + true + }, + |_| true, + ); + }) +} + +fn make_test_smart_contract_transaction( + peer: &mut TestPeer, + name: &str, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, +) -> StacksTransaction { + // make a smart contract + let contract = " + (define-data-var bar int 0) + (define-public (get-bar) (ok (var-get bar))) + (define-public (set-bar (x int) (y int)) + (begin (var-set bar (/ x y)) (ok (var-get bar))))"; + + let cost_limits = peer.config.connection_opts.read_only_call_limit.clone(); + + let tx_contract = peer + .with_mining_state( + |ref mut sortdb, ref mut miner, ref mut spending_account, ref mut stacks_node| { + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + spending_account.as_transaction_auth().unwrap().into(), + TransactionPayload::new_smart_contract( + &name.to_string(), + &contract.to_string(), + None, + ) + .unwrap(), + ); + + let chain_tip = + StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let cur_nonce = stacks_node + .chainstate + .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }) + .unwrap(); + + test_debug!( + "Nonce of {:?} is {} at {}/{}", + &spending_account.origin_address().unwrap(), + cur_nonce, + consensus_hash, + block_hash + ); + + // spending_account.set_nonce(cur_nonce + 1); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(cur_nonce); + tx_contract.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + spending_account.sign_as_origin(&mut tx_signer); + + let tx_contract_signed = tx_signer.get_tx().unwrap(); + + test_debug!( + "make transaction {:?} off of {:?}/{:?}: {:?}", + &tx_contract_signed.txid(), + consensus_hash, + block_hash, + &tx_contract_signed + ); + + Ok(tx_contract_signed) + }, + ) + .unwrap(); + + tx_contract +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_transactions() { + with_timeout(600, || { + let blocks_and_microblocks = RefCell::new(vec![]); + let blocks_idx = RefCell::new(0); + let sent_txs = RefCell::new(vec![]); + let done = RefCell::new(false); + + let peers = run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_push_transactions", + 4220, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 generates blocks and microblocks, and pushes + // them to peer 1. Peer 0 also generates transactions + // and pushes them to peer 1. + peer_configs[0].connection_opts.disable_block_advertisement = true; + + // let peer 0 drive this test, as before, by controlling + // when peer 1 sees blocks. + peer_configs[1].connection_opts.disable_inv_sync = true; + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + peer_configs[0].connection_opts.outbox_maxlen = 100; + peer_configs[1].connection_opts.inbox_maxlen = 100; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + let initial_balances = vec![ + ( + PrincipalData::from( + peer_configs[0].spending_account.origin_address().unwrap(), + ), + 1000000, + ), + ( + PrincipalData::from( + peer_configs[1].spending_account.origin_address().unwrap(), + ), + 1000000, + ), + ]; + + peer_configs[0].initial_balances = initial_balances.clone(); + peer_configs[1].initial_balances = initial_balances.clone(); + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for b in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + if b == 0 { + // prime with first block + peers[i].process_stacks_epoch_at_tip(&stacks_block, &vec![]); + } + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + + // peers must be connected to each other + let mut peer_0_to_1 = false; + let mut peer_1_to_0 = false; + for (nk, event_id) in peers[0].network.events.iter() { + match peers[0].network.peers.get(event_id) { + Some(convo) => { + if *nk == peer_1_nk { + peer_0_to_1 = true; + } + } + None => {} + } + } + for (nk, event_id) in peers[1].network.events.iter() { + match peers[1].network.peers.get(event_id) { + Some(convo) => { + if *nk == peer_0_nk { + peer_1_to_0 = true; + } + } + None => {} + } + } + + if !peer_0_to_1 || !peer_1_to_0 { + test_debug!( + "Peers not bi-directionally connected: 0->1 = {}, 1->0 = {}", + peer_0_to_1, + peer_1_to_0 + ); + return; + } + + // make sure peer 2's inv has an entry for peer 1, even + // though it's not doing an inv sync. + match peers[1].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_0_nk).is_none() { + test_debug!("initialize inv statistics for peer 0 in peer 1"); + inv_state.add_peer(peer_0_nk, true); + } else { + test_debug!("peer 1 has inv state for peer 0"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + let done_flag = *done.borrow(); + if is_peer_connected(&peers[0], &peer_1_nk) { + // only submit the next transaction if the previous + // one is accepted + let has_last_transaction = { + let expected_txs: std::cell::Ref<'_, Vec> = + sent_txs.borrow(); + if let Some(tx) = (*expected_txs).last() { + let txid = tx.txid(); + if !peers[1].mempool.as_ref().unwrap().has_tx(&txid) { + debug!("Peer 1 still waiting for transaction {}", &txid); + push_transaction(&mut peers[0], &peer_1_nk, vec![], (*tx).clone()); + false + } else { + true + } + } else { + true + } + }; + + if has_last_transaction { + // push blocks and microblocks in order, and push a + // transaction that can only be validated once the + // block and microblocks are processed. + let ( + ( + block_consensus_hash, + block, + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ), + idx, + ) = { + let block_data = blocks_and_microblocks.borrow(); + let mut idx = blocks_idx.borrow_mut(); + + let microblocks = block_data[*idx].2.clone(); + let microblocks_consensus_hash = block_data[*idx].0.clone(); + let microblocks_block_hash = block_data[*idx].1.block_hash(); + + *idx += 1; + if *idx >= block_data.len() { + *idx = 1; + } + + let block = block_data[*idx].1.clone(); + let block_consensus_hash = block_data[*idx].0.clone(); + ( + ( + block_consensus_hash, + block, + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ), + *idx, + ) + }; + + if !done_flag { + test_debug!( + "Push microblocks built by {}/{} (idx={})", + µblocks_consensus_hash, + µblocks_block_hash, + idx + ); + + let block_hash = block.block_hash(); + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ); + + test_debug!( + "Push block {}/{} and microblocks (idx = {})", + &block_consensus_hash, + block.block_hash(), + idx + ); + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + block_consensus_hash.clone(), + block, + ); + + // create a transaction against the resulting + // (anchored) chain tip + let tx = make_test_smart_contract_transaction( + &mut peers[0], + &format!("test-contract-{}", &block_hash.to_hex()[0..10]), + &block_consensus_hash, + &block_hash, + ); + + // push or post + push_transaction(&mut peers[0], &peer_1_nk, vec![], tx.clone()); + + let mut expected_txs = sent_txs.borrow_mut(); + expected_txs.push(tx); + } else { + test_debug!("Done pushing data"); + } + } + } + + // peer 0 should never see a GetBlocksInv message. + // peer 1 should never see a BlocksInv message + for (_, convo) in peers[0].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::GetBlocksInv), + 0 + ); + } + for (_, convo) in peers[1].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |ref mut peers| { + // all blocks downloaded. only stop if peer 1 has + // all the transactions + let mut done_flag = done.borrow_mut(); + *done_flag = true; + + let txs = + MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + test_debug!("Peer 1 has {} txs", txs.len()); + txs.len() == sent_txs.borrow().len() + }, + ); + + // peer 1 should have all the transactions + let blocks_and_microblocks = blocks_and_microblocks.into_inner(); + + let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + let expected_txs = sent_txs.into_inner(); + for tx in txs.iter() { + let mut found = false; + for expected_tx in expected_txs.iter() { + if tx.tx.txid() == expected_tx.txid() { + found = true; + break; + } + } + if !found { + panic!("Transaction not found: {:?}", &tx.tx); + } + } + + // peer 1 should have 1 tx per chain tip + for ((consensus_hash, block, _), sent_tx) in + blocks_and_microblocks.iter().zip(expected_txs.iter()) + { + let block_hash = block.block_hash(); + let tx_infos = MemPoolDB::get_txs_after( + peers[1].mempool.as_ref().unwrap().conn(), + consensus_hash, + &block_hash, + 0, + 1000, + ) + .unwrap(); + test_debug!( + "Check {}/{} (height {}): expect {}", + &consensus_hash, + &block_hash, + block.header.total_work.work, + &sent_tx.txid() + ); + assert_eq!(tx_infos.len(), 1); + assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); + } + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_peers_broadcast() { + with_timeout(600, || { + let blocks_and_microblocks = RefCell::new(vec![]); + let blocks_idx = RefCell::new(0); + let sent_txs = RefCell::new(vec![]); + let done = RefCell::new(false); + let num_peers = 3; + let privk = StacksPrivateKey::new(); + + let peers = run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_peers_broadcast", + 4230, + num_peers, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), num_peers); + + // peer 0 generates blocks and microblocks, and pushes + // them to peers 1..n. Peer 0 also generates transactions + // and broadcasts them to the network. + + peer_configs[0].connection_opts.disable_inv_sync = true; + peer_configs[0].connection_opts.disable_inv_chat = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state. + for i in 0..peer_configs.len() { + peer_configs[i].connection_opts.disable_natpunch = true; + peer_configs[i].connection_opts.disable_network_prune = true; + peer_configs[i].connection_opts.timeout = 600; + peer_configs[i].connection_opts.connect_timeout = 600; + + // do one walk + peer_configs[i].connection_opts.num_initial_walks = 0; + peer_configs[i].connection_opts.walk_retry_count = 0; + peer_configs[i].connection_opts.walk_interval = 600; + + // don't throttle downloads + peer_configs[i].connection_opts.download_interval = 0; + peer_configs[i].connection_opts.inv_sync_interval = 0; + + let max_inflight = peer_configs[i].connection_opts.max_inflight_blocks; + peer_configs[i].connection_opts.max_clients_per_host = + ((num_peers + 1) as u64) * max_inflight; + peer_configs[i].connection_opts.soft_max_clients_per_host = + ((num_peers + 1) as u64) * max_inflight; + peer_configs[i].connection_opts.num_neighbors = (num_peers + 1) as u64; + peer_configs[i].connection_opts.soft_num_neighbors = (num_peers + 1) as u64; + } + + let initial_balances = vec![( + PrincipalData::from(peer_configs[0].spending_account.origin_address().unwrap()), + 1000000, + )]; + + for i in 0..peer_configs.len() { + peer_configs[i].initial_balances = initial_balances.clone(); + } + + // connectivity + let peer_0 = peer_configs[0].to_neighbor(); + for i in 1..peer_configs.len() { + peer_configs[i].add_neighbor(&peer_0); + let peer_i = peer_configs[i].to_neighbor(); + peer_configs[0].add_neighbor(&peer_i); + } + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let done_flag = *done.borrow(); + + let mut connectivity_0_to_n = HashSet::new(); + let mut connectivity_n_to_0 = HashSet::new(); + + let peer_0_nk = peers[0].to_neighbor().addr; + + for (nk, event_id) in peers[0].network.events.iter() { + if let Some(convo) = peers[0].network.peers.get(event_id) { + if convo.is_authenticated() { + connectivity_0_to_n.insert(nk.clone()); + } + } + } + for i in 1..peers.len() { + for (nk, event_id) in peers[i].network.events.iter() { + if *nk != peer_0_nk { + continue; + } + + if let Some(convo) = peers[i].network.peers.get(event_id) { + if convo.is_authenticated() { + if let Some(inv_state) = &peers[i].network.inv_state { + if let Some(inv_stats) = inv_state.block_stats.get(&peer_0_nk) { + if inv_stats.inv.num_reward_cycles >= 5 { + connectivity_n_to_0.insert(peers[i].to_neighbor().addr); + } + } + } + } + } + } + } + + if connectivity_0_to_n.len() < peers.len() - 1 + || connectivity_n_to_0.len() < peers.len() - 1 + { + test_debug!( + "Network not connected: 0 --> N = {}, N --> 0 = {}", + connectivity_0_to_n.len(), + connectivity_n_to_0.len() + ); + return; + } + + let ((tip_consensus_hash, tip_block, _), idx) = { + let block_data = blocks_and_microblocks.borrow(); + let idx = blocks_idx.borrow(); + (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) + }; + + if idx > 0 { + let mut caught_up = true; + for i in 1..peers.len() { + peers[i] + .with_db_state(|sortdb, chainstate, relayer, mempool| { + let (canonical_consensus_hash, canonical_block_hash) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .unwrap(); + + if canonical_consensus_hash != tip_consensus_hash + || canonical_block_hash != tip_block.block_hash() + { + debug!( + "Peer {} is not caught up yet (at {}/{}, need {}/{})", + i + 1, + &canonical_consensus_hash, + &canonical_block_hash, + &tip_consensus_hash, + &tip_block.block_hash() + ); + caught_up = false; + } + Ok(()) + }) + .unwrap(); + } + if !caught_up { + return; + } + } + + // caught up! + // find next block + let ((consensus_hash, block, microblocks), idx) = { + let block_data = blocks_and_microblocks.borrow(); + let mut idx = blocks_idx.borrow_mut(); + if *idx >= block_data.len() { + test_debug!("Out of blocks and microblocks to push"); + return; + } + + let ret = block_data[*idx].clone(); + *idx += 1; + (ret, *idx) + }; + + if !done_flag { + test_debug!( + "Broadcast block {}/{} and microblocks (idx = {})", + &consensus_hash, + block.block_hash(), + idx + ); + + let block_hash = block.block_hash(); + + // create a transaction against the current + // (anchored) chain tip + let tx = make_test_smart_contract_transaction( + &mut peers[0], + &format!("test-contract-{}", &block_hash.to_hex()[0..10]), + &tip_consensus_hash, + &tip_block.block_hash(), + ); + + let mut expected_txs = sent_txs.borrow_mut(); + expected_txs.push(tx.clone()); + + test_debug!( + "Broadcast {}/{} and its microblocks", + &consensus_hash, + &block.block_hash() + ); + // next block + broadcast_block(&mut peers[0], vec![], consensus_hash.clone(), block); + broadcast_microblocks( + &mut peers[0], + vec![], + consensus_hash, + block_hash, + microblocks, + ); + + // NOTE: first transaction will be dropped since the other nodes haven't + // processed the first-ever Stacks block when their relayer code gets + // around to considering it. + broadcast_transaction(&mut peers[0], vec![], tx); + } else { + test_debug!("Done pushing data"); + } + }, + |ref peer| { + // check peer health -- no message errors + // (i.e. no relay cycles) + for (_, convo) in peer.network.peers.iter() { + assert_eq!(convo.stats.msgs_err, 0); + } + true + }, + |ref mut peers| { + // all blocks downloaded. only stop if peer 1 has + // all the transactions + let mut done_flag = done.borrow_mut(); + *done_flag = true; + + let mut ret = true; + for i in 1..peers.len() { + let txs = + MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + test_debug!("Peer {} has {} txs", i + 1, txs.len()); + ret = ret && txs.len() == sent_txs.borrow().len() - 1; + } + ret + }, + ); + + // peers 1..n should have all the transactions + let blocks_and_microblocks = blocks_and_microblocks.into_inner(); + let expected_txs = sent_txs.into_inner(); + + for i in 1..peers.len() { + let txs = MemPoolDB::get_all_txs(peers[i].mempool.as_ref().unwrap().conn()).unwrap(); + for tx in txs.iter() { + let mut found = false; + for expected_tx in expected_txs.iter() { + if tx.tx.txid() == expected_tx.txid() { + found = true; + break; + } + } + if !found { + panic!("Transaction not found: {:?}", &tx.tx); + } + } + + // peers 1..n should have 1 tx per chain tip (except for the first block) + for ((consensus_hash, block, _), sent_tx) in + blocks_and_microblocks.iter().zip(expected_txs[1..].iter()) + { + let block_hash = block.block_hash(); + let tx_infos = MemPoolDB::get_txs_after( + peers[i].mempool.as_ref().unwrap().conn(), + consensus_hash, + &block_hash, + 0, + 1000, + ) + .unwrap(); + assert_eq!(tx_infos.len(), 1); + assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); + } + } + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_antientropy() { + with_timeout(600, move || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_antientropy", + 4240, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 mines blocks, but does not advertize them nor announce them as + // available via its inventory. It only uses its anti-entropy protocol to + // discover that peer 1 doesn't have them, and sends them to peer 1 that way. + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[0].connection_opts.disable_block_download = true; + + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // permit anti-entropy protocol even if nat'ed + peer_configs[0].connection_opts.antientropy_public = true; + peer_configs[1].connection_opts.antientropy_public = true; + peer_configs[0].connection_opts.antientropy_retry = 1; + peer_configs[1].connection_opts.antientropy_retry = 1; + + // make peer 0 go slowly + peer_configs[0].connection_opts.max_block_push = 2; + peer_configs[0].connection_opts.max_microblock_push = 2; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + // peer 0 is inbound to peer 1 + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + // cap with an empty sortition, so the antientropy protocol picks up all stacks + // blocks + let (_, burn_header_hash, consensus_hash) = peers[0].next_burnchain_block(vec![]); + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(vec![]); + } + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push((sn.consensus_hash.clone(), None, None)); + + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let tip_opt = peers[1] + .with_db_state(|sortdb, chainstate, _, _| { + let tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + Ok(tip_opt) + }) + .unwrap(); + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { + with_timeout(600, move || { + let sortitions = RefCell::new(vec![]); + let blocks_and_microblocks = RefCell::new(vec![]); + let idx = RefCell::new(0usize); + let pushed_idx = RefCell::new(0usize); + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_buffered_messages", + 4242, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 mines blocks, but it does not present its inventory. + peer_configs[0].connection_opts.disable_inv_chat = true; + peer_configs[0].connection_opts.disable_block_download = true; + + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // peer 0 ignores peer 1's handshakes + peer_configs[0].connection_opts.disable_inbound_handshakes = true; + + // disable anti-entropy + peer_configs[0].connection_opts.max_block_push = 0; + peer_configs[0].connection_opts.max_microblock_push = 0; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + // peer 0 is inbound to peer 1 + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for block_num in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + if block_num == 0 { + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + peers[i].process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + } else { + let mut all_sortitions = sortitions.borrow_mut(); + all_sortitions.push(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data.clone()[1..] + .to_vec() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let mut i = idx.borrow_mut(); + let mut pushed_i = pushed_idx.borrow_mut(); + let all_sortitions = sortitions.borrow(); + let all_blocks_and_microblocks = blocks_and_microblocks.borrow(); + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + + let tip_opt = peers[1] + .with_db_state(|sortdb, chainstate, _, _| { + let tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + Ok(tip_opt) + }) + .unwrap(); + + if !is_peer_connected(&peers[0], &peer_1_nk) { + debug!("Peer 0 not connected to peer 1"); + return; + } + + if let Some(tip) = tip_opt { + debug!( + "Push at {}, need {}", + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, + *pushed_i + ); + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 + == *pushed_i as u64 + { + // next block + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + (*all_blocks_and_microblocks)[*pushed_i].0.clone(), + (*all_blocks_and_microblocks)[*pushed_i].1.clone(), + ); + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + (*all_blocks_and_microblocks)[*pushed_i].0.clone(), + (*all_blocks_and_microblocks)[*pushed_i].1.block_hash(), + (*all_blocks_and_microblocks)[*pushed_i].2.clone(), + ); + *pushed_i += 1; + } + debug!( + "Sortition at {}, need {}", + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, + *i + ); + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 + == *i as u64 + { + let event_id = { + let mut ret = 0; + for (nk, event_id) in peers[1].network.events.iter() { + ret = *event_id; + break; + } + if ret == 0 { + return; + } + ret + }; + let mut update_sortition = false; + for (event_id, pending) in peers[1].network.pending_messages.iter() { + debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); + if pending.len() >= 1 { + update_sortition = true; + } + } + if update_sortition { + debug!("Advance sortition!"); + peers[1].next_burnchain_block_raw((*all_sortitions)[*i].clone()); + *i += 1; + } + } + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +pub fn make_contract_tx( + sender: &StacksPrivateKey, + cur_nonce: u64, + tx_fee: u64, + name: &str, + contract: &str, +) -> StacksTransaction { + let sender_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) + .expect("Failed to create p2pkh spending condition from public key."); + + let spending_auth = TransactionAuth::Standard(sender_spending_condition); + + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + spending_auth.clone(), + TransactionPayload::new_smart_contract(&name.to_string(), &contract.to_string(), None) + .unwrap(), + ); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(cur_nonce); + tx_contract.set_tx_fee(tx_fee); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + tx_signer.sign_origin(sender).unwrap(); + + let tx_contract_signed = tx_signer.get_tx().unwrap(); + tx_contract_signed +} + +#[test] +fn test_static_problematic_tests() { + let spender_sk_1 = StacksPrivateKey::new(); + let spender_sk_2 = StacksPrivateKey::new(); + let spender_sk_3 = StacksPrivateKey::new(); + + let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; + let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); + let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); + let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + + let tx_edge = make_contract_tx( + &spender_sk_1, + 0, + (tx_edge_body.len() * 100) as u64, + "test-edge", + &tx_edge_body, + ); + + // something just over the limit of the expression depth + let exceeds_repeat_factor = edge_repeat_factor + 1; + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + + let tx_exceeds = make_contract_tx( + &spender_sk_2, + 0, + (tx_exceeds_body.len() * 100) as u64, + "test-exceeds", + &tx_exceeds_body, + ); + + // something stupidly high over the expression depth + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + + let tx_high = make_contract_tx( + &spender_sk_3, + 0, + (tx_high_body.len() * 100) as u64, + "test-high", + &tx_high_body, + ); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_edge, + ASTRules::Typical + ) + .is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_exceeds, + ASTRules::Typical + ) + .is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_high, + ASTRules::Typical + ) + .is_ok()); + + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_edge, + ASTRules::Typical + ) + .is_ok()); + assert!(!Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_exceeds, + ASTRules::PrecheckSize + ) + .is_ok()); + assert!(!Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_high, + ASTRules::PrecheckSize + ) + .is_ok()); +} + +#[test] +fn process_new_blocks_rejects_problematic_asts() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; + + let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); + peer_config.initial_balances = initial_balances; + peer_config.epochs = Some(vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: i64::MAX as u64, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + ]); + let burnchain = peer_config.burnchain.clone(); + + // activate new AST rules right away + let mut peer = TestPeer::new(peer_config); + let mut sortdb = peer.sortdb.take().unwrap(); + { + let mut tx = sortdb + .tx_begin() + .expect("FATAL: failed to begin tx on sortition DB"); + SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) + .expect("FATAL: failed to override AST PrecheckSize rule height"); + tx.commit() + .expect("FATAL: failed to commit sortition DB transaction"); + } + peer.sortdb = Some(sortdb); + + let chainstate_path = peer.chainstate_path.clone(); + + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + }; + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + + let bad_tx = make_contract_tx( + &privk, + 0, + (tx_high_body.len() * 100) as u64, + "test-high", + &tx_high_body, + ); + let bad_txid = bad_tx.txid(); + let bad_tx_len = { + let mut bytes = vec![]; + bad_tx.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let mblock_privk = StacksPrivateKey::new(); + + // make one tenure with a valid block, but problematic microblocks + let (burn_ops, block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let coinbase_tx = make_coinbase(miner, 0); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + + let block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx.clone()], + ) + .unwrap() + .0; + + (block, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + let coinbase_tx = make_coinbase(miner, 0); + + let mblock_privk = miner.next_microblock_privkey(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + + // this tx would be problematic without our checks + if let Err(ChainstateError::ProblematicTransaction(txid)) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx.clone(), bad_tx.clone()], + ) + { + assert_eq!(txid, bad_txid); + } else { + panic!("Did not get Error::ProblematicTransaction"); + } + + // make a bad block anyway + // don't worry about the state root + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx.clone()], + ) + .unwrap(); + + let mut bad_block = bad_block.0; + bad_block.txs.push(bad_tx.clone()); + + let txid_vecs = bad_block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + bad_block.header.tx_merkle_root = merkle_tree.root(); + + let sort_ic = sortdb.index_conn(); + chainstate + .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) + .unwrap(); + + // make a bad microblock + let mut microblock_builder = StacksMicroblockBuilder::new( + parent_header_hash.clone(), + parent_consensus_hash.clone(), + chainstate, + &sort_ic, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + + // miner should fail with just the bad tx, since it's problematic + let mblock_err = microblock_builder + .mine_next_microblock_from_txs(vec![(bad_tx.clone(), bad_tx_len)], &mblock_privk) + .unwrap_err(); + if let ChainstateError::NoTransactionsToMine = mblock_err { + } else { + panic!("Did not get NoTransactionsToMine"); + } + + let token_transfer = + make_user_stacks_transfer(&privk, 0, 200, &recipient.to_account_principal(), 123); + let tt_len = { + let mut bytes = vec![]; + token_transfer.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + let mut bad_mblock = microblock_builder + .mine_next_microblock_from_txs( + vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], + &mblock_privk, + ) + .unwrap(); + + // miner shouldn't include the bad tx, since it's problematic + assert_eq!(bad_mblock.txs.len(), 1); + bad_mblock.txs.push(bad_tx.clone()); + + // force it in anyway + let txid_vecs = bad_mblock + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + bad_mblock.header.tx_merkle_root = merkle_tree.root(); + bad_mblock.sign(&mblock_privk).unwrap(); + + (bad_block, vec![bad_mblock]) + }, + ); + + let bad_mblock = microblocks.pop().unwrap(); + let (_, _, new_consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch(&bad_block, &new_consensus_hash, &vec![]); + + // stuff them all into each possible field of NetworkResult + // p2p messages + let nk = NeighborKey { + peer_version: 1, + network_id: 2, + addrbytes: PeerAddress([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), + port: 19, + }; + let preamble = Preamble { + peer_version: 1, + network_id: 2, + seq: 3, + burn_block_height: 4, + burn_block_hash: BurnchainHeaderHash([5u8; 32]), + burn_stable_block_height: 6, + burn_stable_block_hash: BurnchainHeaderHash([7u8; 32]), + additional_data: 8, + signature: MessageSignature([9u8; 65]), + payload_len: 10, + }; + let bad_msgs = vec![ + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(new_consensus_hash.clone(), bad_block.clone())], + }), + }, + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockId::new( + &new_consensus_hash, + &bad_block.block_hash(), + ), + microblocks: vec![bad_mblock.clone()], + }), + }, + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Transaction(bad_tx.clone()), + }, + ]; + let mut unsolicited = HashMap::new(); + unsolicited.insert(nk.clone(), bad_msgs.clone()); + + let mut network_result = + NetworkResult::new(0, 0, 0, 0, 0, ConsensusHash([0x01; 20]), HashMap::new()); + network_result.consume_unsolicited(unsolicited); + + assert!(network_result.has_blocks()); + assert!(network_result.has_microblocks()); + assert!(network_result.has_transactions()); + + network_result.consume_http_uploads( + bad_msgs + .into_iter() + .map(|msg| msg.payload) + .collect::>(), + ); + + assert!(network_result.has_blocks()); + assert!(network_result.has_microblocks()); + assert!(network_result.has_transactions()); + + assert_eq!(network_result.uploaded_transactions.len(), 1); + assert_eq!(network_result.uploaded_blocks.len(), 1); + assert_eq!(network_result.uploaded_microblocks.len(), 1); + assert_eq!(network_result.pushed_transactions.len(), 1); + assert_eq!(network_result.pushed_blocks.len(), 1); + assert_eq!(network_result.pushed_microblocks.len(), 1); + + network_result + .blocks + .push((new_consensus_hash.clone(), bad_block.clone(), 123)); + network_result.confirmed_microblocks.push(( + new_consensus_hash.clone(), + vec![bad_mblock.clone()], + 234, + )); + + let mut sortdb = peer.sortdb.take().unwrap(); + let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = + Relayer::process_new_blocks( + &mut network_result, + &mut sortdb, + &mut peer.stacks_node.as_mut().unwrap().chainstate, + None, + ) + .unwrap(); + + // despite this data showing up in all aspects of the network result, none of it actually + // gets relayed + assert_eq!(processed_blocks.len(), 0); + assert_eq!(processed_mblocks.len(), 0); + assert_eq!(relay_mblocks.len(), 0); + assert_eq!(bad_neighbors.len(), 0); + + let txs_relayed = Relayer::process_transactions( + &mut network_result, + &sortdb, + &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.mempool.as_mut().unwrap(), + None, + ) + .unwrap(); + assert_eq!(txs_relayed.len(), 0); +} + +#[test] +fn test_block_pay_to_contract_gated_at_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + peer_config.epochs = Some(epochs); + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + 0, + Some(PrincipalData::Contract( + QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") + .unwrap(), + )), + ); + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx], + ) + .unwrap(); + + (anchored_block.0, vec![]) + }; + + // tenures 26 and 27 should fail, since the block is a pay-to-contract block + // Pay-to-contract should only be supported if the block is in epoch 2.1, which + // activates at tenure 27. + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + panic!("Stored pay-to-contract stacks block before epoch 2.1"); + } + Err(chainstate_error::InvalidStacksBlock(_)) => {} + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid pay-to-contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +#[test] +fn test_block_versioned_smart_contract_gated_at_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); + + let initial_balances = vec![( + PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + 1000000, + )]; + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + + peer_config.epochs = Some(epochs); + peer_config.initial_balances = initial_balances; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = + make_coinbase_with_nonce(miner, parent_tip.stacks_block_height as usize, 0, None); + + let versioned_contract = make_smart_contract_with_version( + miner, + 1, + tip.block_height.try_into().unwrap(), + 0, + Some(ClarityVersion::Clarity1), + Some(1000), + ); + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx, versioned_contract], + ) + .unwrap(); + + eprintln!("{:?}", &anchored_block.0); + (anchored_block.0, vec![]) + }; + + // tenures 26 and 27 should fail, since the block contains a versioned smart contract. + // Versioned smart contracts should only be supported if the block is in epoch 2.1, which + // activates at tenure 27. + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + eprintln!("{:?}", &stacks_block); + panic!("Stored pay-to-contract stacks block before epoch 2.1"); + } + Err(chainstate_error::InvalidStacksBlock(_)) => {} + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid versioned smart contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +#[test] +fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); + + let initial_balances = vec![( + PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + 1000000, + )]; + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + + peer_config.epochs = Some(epochs); + peer_config.initial_balances = initial_balances; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + let versioned_contract_opt: RefCell> = RefCell::new(None); + let nonce: RefCell = RefCell::new(0); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let next_nonce = *nonce.borrow(); + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + next_nonce, + None, + ); + + let versioned_contract = make_smart_contract_with_version( + miner, + next_nonce + 1, + tip.block_height.try_into().unwrap(), + 0, + Some(ClarityVersion::Clarity1), + Some(1000), + ); + + *versioned_contract_opt.borrow_mut() = Some(versioned_contract); + *nonce.borrow_mut() = next_nonce + 1; + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx], + ) + .unwrap(); + + eprintln!("{:?}", &anchored_block.0); + (anchored_block.0, vec![]) + }; + + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + + // the empty block should be accepted + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Did not accept valid block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + + // the mempool would reject a versioned contract transaction, since we're not yet at + // tenure 28 + let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); + let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), + &consensus_hash, + &stacks_block.block_hash(), + &versioned_contract, + versioned_contract_len as u64, + ) { + Err(MemPoolRejection::Other(msg)) => { + assert!(msg.find("not supported in this epoch").is_some()); + } + Err(e) => { + panic!("will_admit_mempool_tx {:?}", &e); + } + Ok(_) => { + panic!("will_admit_mempool_tx succeeded"); + } + }; + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid versioned smart contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + + // the mempool would accept a versioned contract transaction, since we're not yet at + // tenure 28 + let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); + let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), + &consensus_hash, + &stacks_block.block_hash(), + &versioned_contract, + versioned_contract_len as u64, + ) { + Err(e) => { + panic!("will_admit_mempool_tx {:?}", &e); + } + Ok(_) => {} + }; + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +// TODO: process bans +// TODO: test sending invalid blocks-available and microblocks-available (should result in a ban) +// TODO: test sending invalid transactions (should result in a ban) +// TODO: test bandwidth limits (sending too much should result in a nack, and then a ban) diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs new file mode 100644 index 0000000000..04e8e0fd4f --- /dev/null +++ b/stackslib/src/net/tests/relay/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod epoch2x; +pub mod nakamoto; From b25685817fb194e5d8c89c5cb866d1e97f55269f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:21:29 -0400 Subject: [PATCH 02/20] chore: move unsolicited message handling logic into its own file (net/unsolicited.rs), and implement unsolicited NakamotoBlock validation and handling --- stackslib/src/net/p2p.rs | 778 +---------------------- stackslib/src/net/unsolicited.rs | 1000 ++++++++++++++++++++++++++++++ 2 files changed, 1020 insertions(+), 758 deletions(-) create mode 100644 stackslib/src/net/unsolicited.rs diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4e358128ff..6c82950b55 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -284,7 +284,7 @@ pub struct PeerNetwork { // work state -- we can be walking, fetching block inventories, fetching blocks, pruning, etc. pub work_state: PeerNetworkWorkState, pub nakamoto_work_state: PeerNetworkWorkState, - have_data_to_download: bool, + pub(crate) have_data_to_download: bool, // neighbor walk state pub walk: Option>, @@ -960,13 +960,15 @@ impl PeerNetwork { })? } - /// Broadcast a message to a list of neighbors + /// Broadcast a message to a list of neighbors. + /// Neighbors in the `relay_hints` vec will *not* receive data, since they were the one(s) that + /// sent this peer the message in the first place. pub fn broadcast_message( &mut self, mut neighbor_keys: Vec, relay_hints: Vec, message_payload: StacksMessageType, - ) -> () { + ) { debug!( "{:?}: Will broadcast '{}' to up to {} neighbors; relayed by {:?}", &self.local_peer, @@ -1292,7 +1294,7 @@ impl PeerNetwork { match request { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { - debug!("Request to ban {:?}", neighbor_key); + info!("Request to ban {:?}", neighbor_key); match self.events.get(neighbor_key) { Some(event_id) => { debug!("Will ban {:?} (event {})", neighbor_key, event_id); @@ -1344,6 +1346,18 @@ impl PeerNetwork { } Ok(all_neighbors.into_iter().collect()) } + StacksMessageType::NakamotoBlocks(ref data) => { + // send to each neighbor that needs one + let mut all_neighbors = HashSet::new(); + for nakamoto_block in data.blocks.iter() { + let mut neighbors = + self.sample_broadcast_peers(&relay_hints, nakamoto_block)?; + for nk in neighbors.drain(..) { + all_neighbors.insert(nk); + } + } + Ok(all_neighbors.into_iter().collect()) + } StacksMessageType::Transaction(ref data) => { self.sample_broadcast_peers(&relay_hints, data) } @@ -4366,759 +4380,6 @@ impl PeerNetwork { Some(outbound_neighbor_key) } - /// Update a peer's inventory state to indicate that the given block is available. - /// If updated, return the sortition height of the bit in the inv that was set. - /// Only valid for epoch 2.x - fn handle_unsolicited_inv_update_epoch2x( - &mut self, - sortdb: &SortitionDB, - event_id: usize, - outbound_neighbor_key: &NeighborKey, - consensus_hash: &ConsensusHash, - microblocks: bool, - ) -> Result, net_error> { - let epoch = self.get_current_epoch(); - if epoch.epoch_id >= StacksEpochId::Epoch30 { - info!( - "{:?}: Ban peer event {} for sending an inv 2.x update for {} in epoch 3.x", - event_id, - self.get_local_peer(), - consensus_hash - ); - self.bans.insert(event_id); - - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { - self.bans.insert(*outbound_event_id); - } - return Ok(None); - } - - let block_sortition_height = match self.inv_state { - Some(ref mut inv) => { - let res = if microblocks { - inv.set_microblocks_available( - &self.burnchain, - outbound_neighbor_key, - sortdb, - consensus_hash, - ) - } else { - inv.set_block_available( - &self.burnchain, - outbound_neighbor_key, - sortdb, - consensus_hash, - ) - }; - - match res { - Ok(Some(block_height)) => block_height, - Ok(None) => { - debug!( - "{:?}: We already know the inventory state in {} for {}", - &self.local_peer, outbound_neighbor_key, consensus_hash - ); - return Ok(None); - } - Err(net_error::NotFoundError) => { - // is this remote node simply ahead of us? - if let Some(convo) = self.peers.get(&event_id) { - if self.chain_view.burn_block_height < convo.burnchain_tip_height { - debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); - return Err(net_error::NotFoundError); - } - } - // not ahead of us -- it's a bad consensus hash - debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); - return Ok(None); - } - Err(net_error::InvalidMessage) => { - // punish this peer - info!( - "Peer {:?} sent an invalid update for {}", - &outbound_neighbor_key, - if microblocks { - "streamed microblocks" - } else { - "blocks" - } - ); - self.bans.insert(event_id); - - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { - self.bans.insert(*outbound_event_id); - } - return Ok(None); - } - Err(e) => { - warn!( - "Failed to update inv state for {:?}: {:?}", - &outbound_neighbor_key, &e - ); - return Ok(None); - } - } - } - None => { - return Ok(None); - } - }; - Ok(Some(block_sortition_height)) - } - - /// Buffer a message for re-processing once the burnchain view updates - fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> () { - if let Some(msgs) = self.pending_messages.get_mut(&event_id) { - // check limits: - // at most 1 BlocksAvailable - // at most 1 MicroblocksAvailable - // at most 1 BlocksData - // at most $self.connection_opts.max_buffered_microblocks MicroblocksDatas - let mut blocks_available = 0; - let mut microblocks_available = 0; - let mut blocks_data = 0; - let mut microblocks_data = 0; - for msg in msgs.iter() { - match &msg.payload { - StacksMessageType::BlocksAvailable(_) => { - blocks_available += 1; - } - StacksMessageType::MicroblocksAvailable(_) => { - microblocks_available += 1; - } - StacksMessageType::Blocks(_) => { - blocks_data += 1; - } - StacksMessageType::Microblocks(_) => { - microblocks_data += 1; - } - _ => {} - } - } - - if let StacksMessageType::BlocksAvailable(_) = &msg.payload { - if blocks_available >= self.connection_opts.max_buffered_blocks_available { - debug!( - "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_available - ); - return; - } - } - if let StacksMessageType::MicroblocksAvailable(_) = &msg.payload { - if microblocks_available >= self.connection_opts.max_buffered_microblocks_available - { - debug!( - "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_available - ); - return; - } - } - if let StacksMessageType::Blocks(_) = &msg.payload { - if blocks_data >= self.connection_opts.max_buffered_blocks { - debug!( - "{:?}: Drop BlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_data - ); - return; - } - } - if let StacksMessageType::Microblocks(_) = &msg.payload { - if microblocks_data >= self.connection_opts.max_buffered_microblocks { - debug!( - "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_data - ); - return; - } - } - msgs.push(msg); - debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, - event_id, - msgs.len() - ); - } else { - self.pending_messages.insert(event_id, vec![msg]); - debug!( - "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id - ); - } - } - - /// Do we need a block or microblock stream, given its sortition's consensus hash? - fn need_block_or_microblock_stream( - sortdb: &SortitionDB, - chainstate: &StacksChainState, - consensus_hash: &ConsensusHash, - is_microblock: bool, - ) -> Result { - let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? - .ok_or(chainstate_error::NoSuchBlockError)?; - let block_hash_opt = if sn.sortition { - Some(sn.winning_stacks_block_hash) - } else { - None - }; - - let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; - if is_microblock { - // checking for microblock absence - Ok(inv.microblocks_bitvec[0] == 0) - } else { - // checking for block absence - Ok(inv.block_bitvec[0] == 0) - } - } - - /// Handle unsolicited BlocksAvailable. - /// Update our inv for this peer. - /// Mask errors. - /// Return whether or not we need to buffer this message - fn handle_unsolicited_BlocksAvailable( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - new_blocks: &BlocksAvailableData, - ibd: bool, - buffer: bool, - ) -> bool { - let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { - Some(onk) => onk, - None => { - return false; - } - }; - - debug!( - "{:?}: Process BlocksAvailable from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key, - new_blocks.available.len() - ); - - let mut to_buffer = false; - for (consensus_hash, block_hash) in new_blocks.available.iter() { - let block_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - consensus_hash, - false, - ) { - Ok(Some(bsh)) => bsh, - Ok(None) => { - continue; - } - Err(net_error::NotFoundError) => { - if buffer { - debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); - to_buffer = true; - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e - ); - continue; - } - }; - - let need_block = match PeerNetwork::need_block_or_microblock_stream( - sortdb, - chainstate, - &consensus_hash, - false, - ) { - Ok(x) => x, - Err(e) => { - warn!( - "Failed to determine if we need block for consensus hash {}: {:?}", - &consensus_hash, &e - ); - false - } - }; - - debug!( - "Need block {}/{}? {}", - &consensus_hash, &block_hash, need_block - ); - - if need_block { - // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); - - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); - } - self.have_data_to_download = true; - } - None => {} - } - } - } - - to_buffer - } - - /// Handle unsolicited MicroblocksAvailable. - /// Update our inv for this peer. - /// Mask errors. - /// Return whether or not we need to buffer this message - fn handle_unsolicited_MicroblocksAvailable( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - new_mblocks: &BlocksAvailableData, - ibd: bool, - buffer: bool, - ) -> bool { - let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { - Some(onk) => onk, - None => { - return false; - } - }; - - debug!( - "{:?}: Process MicroblocksAvailable from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key, - new_mblocks.available.len() - ); - - let mut to_buffer = false; - for (consensus_hash, block_hash) in new_mblocks.available.iter() { - let mblock_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - consensus_hash, - true, - ) { - Ok(Some(bsh)) => bsh, - Ok(None) => { - continue; - } - Err(net_error::NotFoundError) => { - if buffer { - debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); - to_buffer = true; - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e - ); - continue; - } - }; - - let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( - sortdb, - chainstate, - &consensus_hash, - true, - ) { - Ok(x) => x, - Err(e) => { - warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); - false - } - }; - - debug!( - "Need microblock stream {}/{}? {}", - &consensus_hash, &block_hash, need_microblock_stream - ); - - if need_microblock_stream { - // have the downloader request this microblock stream if it's new to us - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_microblock_sortition_height_available( - mblock_sortition_height, - ibd, - need_microblock_stream, - ); - - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); - } - self.have_data_to_download = true; - } - None => {} - } - } - } - to_buffer - } - - /// Handle unsolicited BlocksData. - /// Don't (yet) validate the data, but do update our inv for the peer that sent it, if we have - /// an outbound connection to that peer. Accept the blocks data either way if it corresponds - /// to a winning sortition -- this will cause the blocks data to be fed into the relayer, which - /// will then decide whether or not it needs to be stored and/or forwarded. - /// Mask errors. - fn handle_unsolicited_BlocksData( - &mut self, - sortdb: &SortitionDB, - event_id: usize, - new_blocks: &BlocksData, - buffer: bool, - ) -> bool { - let (remote_neighbor_key, remote_is_authenticated) = match self.peers.get(&event_id) { - Some(convo) => (convo.to_neighbor_key(), convo.is_authenticated()), - None => { - test_debug!( - "{:?}: No such neighbor event={}", - &self.local_peer, - event_id - ); - return false; - } - }; - - if !remote_is_authenticated { - // drop -- a correct peer will have authenticated before sending this message - test_debug!( - "{:?}: Drop unauthenticated BlocksData from {:?}", - &self.local_peer, - &remote_neighbor_key - ); - return false; - } - - let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); - - debug!( - "{:?}: Process BlocksData from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key_opt - .as_ref() - .unwrap_or(&remote_neighbor_key), - new_blocks.blocks.len() - ); - - let mut to_buffer = false; - - for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { - let sn = match SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), - &consensus_hash, - ) { - Ok(Some(sn)) => sn, - Ok(None) => { - if buffer { - debug!( - "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, - &consensus_hash, - &block.block_hash(), - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash() - ) - ); - to_buffer = true; - } else { - debug!( - "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, - &consensus_hash, - &block.block_hash(), - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash() - ) - ); - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, consensus_hash, &e - ); - continue; - } - }; - - if !sn.pox_valid { - info!( - "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, consensus_hash - ); - continue; - } - - if sn.winning_stacks_block_hash != block.block_hash() { - info!( - "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", - &self.local_peer, - block.block_hash(), - sn.winning_stacks_block_hash, - sn.sortition - ); - continue; - } - - // only bother updating the inventory for this event's peer if we have an outbound - // connection to it. - if let Some(outbound_neighbor_key) = outbound_neighbor_key_opt.as_ref() { - let _ = self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - &sn.consensus_hash, - false, - ); - } - } - - to_buffer - } - - /// Handle unsolicited MicroblocksData. - /// Returns whether or not to buffer (if buffer is true) - /// Returns whether or not to pass to the relayer (if buffer is false). - fn handle_unsolicited_MicroblocksData( - &mut self, - chainstate: &StacksChainState, - event_id: usize, - new_microblocks: &MicroblocksData, - buffer: bool, - ) -> bool { - let (remote_neighbor_key, remote_is_authenticated) = match self.peers.get(&event_id) { - Some(convo) => (convo.to_neighbor_key(), convo.is_authenticated()), - None => { - test_debug!( - "{:?}: No such neighbor event={}", - &self.local_peer, - event_id - ); - return false; - } - }; - - if !remote_is_authenticated { - // drop -- a correct peer will have authenticated before sending this message - test_debug!( - "{:?}: Drop unauthenticated MicroblocksData from {:?}", - &self.local_peer, - &remote_neighbor_key - ); - return false; - } - - let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); - - debug!( - "{:?}: Process MicroblocksData from {:?} for {} with {} entries", - &self.local_peer, - outbound_neighbor_key_opt - .as_ref() - .unwrap_or(&remote_neighbor_key), - &new_microblocks.index_anchor_block, - new_microblocks.microblocks.len() - ); - - // do we have the associated anchored block? - match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { - Ok(Some(_)) => { - // yup; can process now - debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); - !buffer - } - Ok(None) => { - if buffer { - debug!( - "{:?}: Will buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block - ); - true - } else { - debug!( - "{:?}: Will not buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block - ); - false - } - } - Err(e) => { - warn!( - "{:?}: Failed to get header hashes for {:?}: {:?}", - &self.local_peer, &new_microblocks.index_anchor_block, &e - ); - false - } - } - } - - /// Returns (true, x) if we should buffer the message and try again - /// Returns (x, true) if the relayer should receive the message - fn handle_unsolicited_message( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - preamble: &Preamble, - payload: &StacksMessageType, - ibd: bool, - buffer: bool, - ) -> (bool, bool) { - match payload { - // Update our inv state for this peer, but only do so if we have an - // outbound connection to it and it's authenticated (we don't synchronize inv - // state with inbound peers). Since we will have received this message - // from an _inbound_ conversation, we need to find the reciprocal _outbound_ - // conversation and use _that_ conversation's neighbor key to identify - // which inventory we need to update. - StacksMessageType::BlocksAvailable(ref new_blocks) => { - let to_buffer = self.handle_unsolicited_BlocksAvailable( - sortdb, chainstate, event_id, new_blocks, ibd, buffer, - ); - (to_buffer, false) - } - StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { - let to_buffer = self.handle_unsolicited_MicroblocksAvailable( - sortdb, - chainstate, - event_id, - new_mblocks, - ibd, - buffer, - ); - (to_buffer, false) - } - StacksMessageType::Blocks(ref new_blocks) => { - // update inv state for this peer - let to_buffer = - self.handle_unsolicited_BlocksData(sortdb, event_id, new_blocks, buffer); - - // forward to relayer for processing - (to_buffer, true) - } - StacksMessageType::Microblocks(ref new_mblocks) => { - let to_buffer = self.handle_unsolicited_MicroblocksData( - chainstate, - event_id, - new_mblocks, - buffer, - ); - - // only forward to the relayer if we don't need to buffer it. - (to_buffer, true) - } - StacksMessageType::StackerDBPushChunk(ref data) => { - match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { - Ok(x) => { - // don't buffer, but do reject if invalid - (false, x) - } - Err(e) => { - info!( - "{:?}: failed to handle unsolicited {:?}: {:?}", - &self.local_peer, payload, &e - ); - (false, false) - } - } - } - _ => (false, true), - } - } - - /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. - /// Return messages that we couldn't handle here, but key them by neighbor, not event. - /// Drop invalid messages. - /// If buffer is true, then re-try handling this message once the burnchain view advances. - fn handle_unsolicited_messages( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - unsolicited: HashMap>, - ibd: bool, - buffer: bool, - ) -> HashMap> { - let mut unhandled: HashMap> = HashMap::new(); - for (event_id, messages) in unsolicited.into_iter() { - if messages.len() == 0 { - // no messages for this event - continue; - } - - let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { - convo.to_neighbor_key() - } else { - debug!( - "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", - &self.local_peer, - event_id, - messages.len() - ); - continue; - }; - - debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); - - for message in messages.into_iter() { - if !buffer { - debug!( - "{:?}: Re-try handling buffered message {} from {:?}", - &self.local_peer, - &message.payload.get_message_description(), - &neighbor_key - ); - } - let (to_buffer, relay) = self.handle_unsolicited_message( - sortdb, - chainstate, - event_id, - &message.preamble, - &message.payload, - ibd, - buffer, - ); - if buffer && to_buffer { - self.buffer_data_message(event_id, message); - } else if relay { - // forward to relayer for processing - debug!( - "{:?}: Will forward message {} from {:?} to relayer", - &self.local_peer, - &message.payload.get_message_description(), - &neighbor_key - ); - if let Some(msgs) = unhandled.get_mut(&neighbor_key) { - msgs.push(message); - } else { - unhandled.insert(neighbor_key.clone(), vec![message]); - } - } - } - } - unhandled - } - /// Find unauthenticated inbound conversations fn find_unauthenticated_inbound_convos(&self) -> Vec { let mut ret = vec![]; @@ -6047,6 +5308,7 @@ impl PeerNetwork { self.num_state_machine_passes, self.num_inv_sync_passes, self.num_downloader_passes, + self.peers.len(), self.chain_view.burn_block_height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), @@ -6126,8 +5388,8 @@ mod test { use crate::net::atlas::*; use crate::net::codec::*; use crate::net::db::*; - use crate::net::relay::test::make_contract_tx; use crate::net::test::*; + use crate::net::tests::relay::epoch2x::make_contract_tx; use crate::net::*; use crate::util_lib::test::*; diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs new file mode 100644 index 0000000000..29d9009f6f --- /dev/null +++ b/stackslib/src/net/unsolicited.rs @@ -0,0 +1,1000 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; + +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainstateError, StacksBlockHeader}; +use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState}; +use crate::net::{ + BlocksAvailableData, BlocksData, BlocksDatum, Error as NetError, MicroblocksData, + NakamotoBlocksData, NeighborKey, Preamble, StacksMessage, StacksMessageType, +}; + +/// This module contains all of the code needed to handle unsolicited messages -- that is, messages +/// that get pushed to us. These include: +/// +/// * BlocksAvailable (epoch 2.x) +/// * MicroblocksAvailable (epoch 2.x) +/// * BlocksData (epoch 2.x) +/// * NakamotoBlocksData (epoch 3.x) +/// +/// Normally, the PeerNetwork will attempt to validate each message and pass it to the Relayer via +/// a NetworkResult. However, some kinds of messages (such as these) cannot be always be +/// validated, because validation depends on chainstate data that is not yet available. For +/// example, if this node is behind the burnchain chain tip, it will be unable to verify blocks +/// pushed to it for sortitions that have yet to be processed locally. +/// +/// In the event that a message cannot be validated, the PeerNetwork will instead store these +/// messages internally (in `self.pending_messages`), and try to validate them again once the +/// burnchain view changes. +/// +/// Transactions are not considered here, but are handled separately with the mempool +/// synchronization state machine. + +impl PeerNetwork { + /// Check that the sender is authenticated. + /// Returns Some(remote sender address) if so + /// Returns None otherwise + fn check_peer_authenticated(&self, event_id: usize) -> Option { + let Some((remote_neighbor_key, remote_is_authenticated)) = self + .peers + .get(&event_id) + .map(|convo| (convo.to_neighbor_key(), convo.is_authenticated())) + else { + test_debug!( + "{:?}: No such neighbor event={}", + &self.local_peer, + event_id + ); + return None; + }; + + if !remote_is_authenticated { + // drop -- a correct peer will have authenticated before sending this message + test_debug!( + "{:?}: Unauthenticated neighbor {:?}", + &self.local_peer, + &remote_neighbor_key + ); + return None; + } + Some(remote_neighbor_key) + } + + /// Update a peer's inventory state to indicate that the given block is available. + /// If updated, return the sortition height of the bit in the inv that was set. + /// Only valid for epoch 2.x + fn handle_unsolicited_inv_update_epoch2x( + &mut self, + sortdb: &SortitionDB, + event_id: usize, + outbound_neighbor_key: &NeighborKey, + consensus_hash: &ConsensusHash, + microblocks: bool, + ) -> Result, NetError> { + let Some(inv) = self.inv_state.as_mut() else { + return Ok(None); + }; + + let res = if microblocks { + inv.set_microblocks_available( + &self.burnchain, + outbound_neighbor_key, + sortdb, + consensus_hash, + ) + } else { + inv.set_block_available( + &self.burnchain, + outbound_neighbor_key, + sortdb, + consensus_hash, + ) + }; + + let block_sortition_height = match res { + Ok(Some(block_height)) => block_height, + Ok(None) => { + debug!( + "{:?}: We already know the inventory state in {} for {}", + &self.local_peer, outbound_neighbor_key, consensus_hash + ); + return Ok(None); + } + Err(NetError::NotFoundError) => { + // is this remote node simply ahead of us? + if let Some(convo) = self.peers.get(&event_id) { + if self.chain_view.burn_block_height < convo.burnchain_tip_height { + debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); + return Err(NetError::NotFoundError); + } + } + // not ahead of us -- it's a bad consensus hash + debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); + return Ok(None); + } + Err(NetError::InvalidMessage) => { + // punish this peer + info!( + "Peer {:?} sent an invalid update for {}", + &outbound_neighbor_key, + if microblocks { + "streamed microblocks" + } else { + "blocks" + } + ); + self.bans.insert(event_id); + + if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { + self.bans.insert(*outbound_event_id); + } + return Ok(None); + } + Err(e) => { + warn!( + "Failed to update inv state for {:?}: {:?}", + &outbound_neighbor_key, &e + ); + return Ok(None); + } + }; + Ok(Some(block_sortition_height)) + } + + /// Buffer a message for re-processing once the burnchain view updates. + /// If there is no space for the message, then silently drop it. + fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) { + let Some(msgs) = self.pending_messages.get_mut(&event_id) else { + self.pending_messages.insert(event_id, vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.local_peer, event_id + ); + return; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + let mut blocks_available = 0; + let mut microblocks_available = 0; + let mut blocks_data = 0; + let mut microblocks_data = 0; + let mut nakamoto_blocks_data = 0; + for msg in msgs.iter() { + match &msg.payload { + StacksMessageType::BlocksAvailable(_) => { + blocks_available += 1; + if blocks_available >= self.connection_opts.max_buffered_blocks_available { + debug!( + "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", + &self.local_peer, event_id, blocks_available + ); + return; + } + } + StacksMessageType::MicroblocksAvailable(_) => { + microblocks_available += 1; + if microblocks_available + >= self.connection_opts.max_buffered_microblocks_available + { + debug!( + "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", + &self.local_peer, event_id, microblocks_available + ); + return; + } + } + StacksMessageType::Blocks(_) => { + blocks_data += 1; + if blocks_data >= self.connection_opts.max_buffered_blocks { + debug!( + "{:?}: Drop BlocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, blocks_data + ); + return; + } + } + StacksMessageType::Microblocks(_) => { + microblocks_data += 1; + if microblocks_data >= self.connection_opts.max_buffered_microblocks { + debug!( + "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, microblocks_data + ); + return; + } + } + StacksMessageType::NakamotoBlocks(_) => { + nakamoto_blocks_data += 1; + if nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks { + debug!( + "{:?}: Drop NakamotoBlocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, nakamoto_blocks_data + ); + return; + } + } + _ => {} + } + } + + msgs.push(msg); + debug!( + "{:?}: Event {} has {} messages buffered", + &self.local_peer, + event_id, + msgs.len() + ); + } + + /// Do we need a block or microblock stream, given its sortition's consensus hash? + fn need_block_or_microblock_stream( + sortdb: &SortitionDB, + chainstate: &StacksChainState, + consensus_hash: &ConsensusHash, + is_microblock: bool, + ) -> Result { + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError)?; + let block_hash_opt = if sn.sortition { + Some(sn.winning_stacks_block_hash) + } else { + None + }; + + let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; + if is_microblock { + // checking for microblock absence + Ok(inv.microblocks_bitvec[0] == 0) + } else { + // checking for block absence + Ok(inv.block_bitvec[0] == 0) + } + } + + /// Handle unsolicited BlocksAvailable. If it is valid, and it represents a block that this + /// peer does not have, then hint to the epoch2x downloader that it needs to go and fetch it. + /// Also, update this peer's copy of the remote sender's inv to indicate that it has the block, + /// so the downloader can eventually request the block regardless of whether or not the hint is + /// effective. + /// + /// This function only accepts BlocksAvailable messages from outbound peers, since we only + /// track inventories for outbound peers. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the blocks' validity will be checked against the + /// sortition DB, and if they correspond to real sortitions, then the remote peer's inventory + /// will be updated and the local peer's downloader will be alerted to this block. + /// + /// Errors pertaining to the validity of the message are logged but not returned. + fn handle_unsolicited_BlocksAvailable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + new_blocks: &BlocksAvailableData, + ibd: bool, + buffer: bool, + ) -> bool { + let Some(outbound_neighbor_key) = self.find_outbound_neighbor(event_id) else { + // we only accept BlocksAvailable from outbound peers, since we only crawl invs from + // outbound peers. + return false; + }; + + debug!( + "{:?}: Process BlocksAvailable from {:?} with {} entries", + &self.local_peer, + &outbound_neighbor_key, + new_blocks.available.len() + ); + + let mut to_buffer = false; + for (consensus_hash, block_hash) in new_blocks.available.iter() { + let block_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + consensus_hash, + false, + ) { + Ok(Some(bsh)) => bsh, + Ok(None) => { + continue; + } + Err(NetError::NotFoundError) => { + if buffer { + debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + to_buffer = true; + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", + &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + ); + continue; + } + }; + + let need_block = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + false, + ) { + Ok(x) => x, + Err(e) => { + warn!( + "Failed to determine if we need block for consensus hash {}: {:?}", + &consensus_hash, &e + ); + false + } + }; + + debug!( + "Need block {}/{}? {}", + &consensus_hash, &block_hash, need_block + ); + + if need_block { + // have the downloader request this block if it's new and we don't have it + match self.block_downloader { + Some(ref mut downloader) => { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); + } + self.have_data_to_download = true; + } + None => {} + } + } + } + + to_buffer + } + + /// Handle unsolicited MicroblocksAvailable. If it is valid, and it represents a microblock stream that this + /// peer does not have, then hint to the epoch2x downloader that it needs to go and fetch it. + /// Also, update this peer's copy of the remote sender's inv to indicate that it has the stream, + /// so the downloader can eventually request the stream regardless of whether or not the hint is + /// effective. + /// + /// This function only accepts MicroblocksAvailable messages from outbound peers, since we only + /// track inventories for outbound peers. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the remote peer's inventory will be updated and + /// the local peer's downloader will be alerted to the presence of these microblocks. + /// + /// Errors pertaining to the validity of the message are logged but not returned. + /// + /// Return whether or not we need to buffer this message for subsequent consideration. + fn handle_unsolicited_MicroblocksAvailable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + new_mblocks: &BlocksAvailableData, + ibd: bool, + buffer: bool, + ) -> bool { + let Some(outbound_neighbor_key) = self.find_outbound_neighbor(event_id) else { + return false; + }; + + debug!( + "{:?}: Process MicroblocksAvailable from {:?} with {} entries", + &self.local_peer, + outbound_neighbor_key, + new_mblocks.available.len() + ); + + let mut to_buffer = false; + for (consensus_hash, block_hash) in new_mblocks.available.iter() { + let mblock_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + consensus_hash, + true, + ) { + Ok(Some(bsh)) => bsh, + Ok(None) => { + continue; + } + Err(NetError::NotFoundError) => { + if buffer { + debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + to_buffer = true; + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {:?}: {:?}", + &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + ); + continue; + } + }; + + let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + true, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); + false + } + }; + + debug!( + "Need microblock stream {}/{}? {}", + &consensus_hash, &block_hash, need_microblock_stream + ); + + if need_microblock_stream { + // have the downloader request this microblock stream if it's new to us + if let Some(downloader) = self.block_downloader.as_mut() { + downloader.hint_microblock_sortition_height_available( + mblock_sortition_height, + ibd, + need_microblock_stream, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); + } + self.have_data_to_download = true; + } + } + } + to_buffer + } + + /// Handle unsolicited BlocksData. + /// + /// Don't (yet) validate the data, but do update our inv for the peer that sent it, if we have + /// an outbound connection to that peer. + /// + /// Log but do nothing with errors in validation. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the block will be checked against the local + /// sortition DB, and if it corresponds to a sortition, the remote peer's inventory will be + /// updated to reflect that it has it. + /// + /// Returns true if we have to buffer this message; false if not. + fn handle_unsolicited_BlocksData( + &mut self, + sortdb: &SortitionDB, + event_id: usize, + new_blocks: &BlocksData, + buffer: bool, + ) -> bool { + let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); + + debug!( + "{:?}: Process BlocksData from {:?} with {} entries", + &self.local_peer, + outbound_neighbor_key_opt + .clone() + .or_else(|| { self.check_peer_authenticated(event_id) }), + new_blocks.blocks.len() + ); + + let mut to_buffer = false; + + for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { + let sn = match SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &consensus_hash, + ) { + Ok(Some(sn)) => sn, + Ok(None) => { + if buffer { + debug!( + "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", + &self.local_peer, + &consensus_hash, + &block.block_hash(), + StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash() + ) + ); + to_buffer = true; + } else { + debug!( + "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", + &self.local_peer, + &consensus_hash, + &block.block_hash(), + StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash() + ) + ); + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to query block snapshot for {}: {:?}", + &self.local_peer, consensus_hash, &e + ); + continue; + } + }; + + if !sn.pox_valid { + info!( + "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", + &self.local_peer, consensus_hash + ); + continue; + } + + if sn.winning_stacks_block_hash != block.block_hash() { + info!( + "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", + &self.local_peer, + block.block_hash(), + sn.winning_stacks_block_hash, + sn.sortition + ); + continue; + } + + // only bother updating the inventory for this event's peer if we have an outbound + // connection to it. + if let Some(outbound_neighbor_key) = outbound_neighbor_key_opt.as_ref() { + let _ = self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + &sn.consensus_hash, + false, + ); + } + } + + to_buffer + } + + /// Handle unsolicited MicroblocksData. + /// + /// Don't (yet) validate the data; just verify that it connects to two existing StacksBlocks, + /// and if so, keep it to be passed on to the relayer. + /// + /// Log but do nothing with errors in validation. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the microblocks will be checked against the local + /// sortition DB and chainstate DB, and if they correspond to a missing stream between two known + /// StacksBlocks, the remote peer's inventory will be updated to reflect that it has this + /// stream. + /// + /// Returns whether or not to buffer. If the microblocks correspond to existing chain state, + /// then this method will indicate to the opposite of `buffer`, which ensures that the messages + /// will never be buffered but instead processed immediately. Otherwise, no buffering will + /// take place. + fn handle_unsolicited_MicroblocksData( + &mut self, + chainstate: &StacksChainState, + event_id: usize, + new_microblocks: &MicroblocksData, + buffer: bool, + ) -> bool { + let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); + + debug!( + "{:?}: Process MicroblocksData from {:?} for {} with {} entries", + &self.local_peer, + outbound_neighbor_key_opt.or_else(|| { self.check_peer_authenticated(event_id) }), + &new_microblocks.index_anchor_block, + new_microblocks.microblocks.len() + ); + + // do we have the associated anchored block? + match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { + Ok(Some(_)) => { + // yup; can process now + debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); + !buffer + } + Ok(None) => { + if buffer { + debug!( + "{:?}: Will buffer unsolicited MicroblocksData({})", + &self.local_peer, &new_microblocks.index_anchor_block + ); + true + } else { + debug!( + "{:?}: Will not buffer unsolicited MicroblocksData({})", + &self.local_peer, &new_microblocks.index_anchor_block + ); + false + } + } + Err(e) => { + warn!( + "{:?}: Failed to get header hashes for {:?}: {:?}", + &self.local_peer, &new_microblocks.index_anchor_block, &e + ); + false + } + } + } + + /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially + /// buffer + pub(crate) fn is_nakamoto_block_bufferable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_block: &NakamotoBlock, + ) -> bool { + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&nakamoto_block.block_id()) + .unwrap_or(false) + { + debug!( + "{:?}: Aleady have Nakamoto block {}", + &self.local_peer, + &nakamoto_block.block_id() + ); + return false; + } + + let mut can_process = true; + let sn = match SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &nakamoto_block.header.consensus_hash, + ) { + Ok(Some(sn)) => sn, + Ok(None) => { + debug!( + "No sortition {} for block {}", + &nakamoto_block.header.consensus_hash, + &nakamoto_block.block_id() + ); + // we don't have the sortition for this, so we can't process it yet (i.e. we need + // to buffer) + can_process = false; + // load the tip so we can load the current reward set data + self.burnchain_tip.clone() + } + Err(e) => { + info!( + "{:?}: Failed to query block snapshot for {}: {:?}", + &self.local_peer, &nakamoto_block.header.consensus_hash, &e + ); + return false; + } + }; + + if !sn.pox_valid { + info!( + "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", + &self.local_peer, &nakamoto_block.header.consensus_hash + ); + return false; + } + + // block must be signed by reward set signers + // TODO + + // the block is well-formed, but we'd buffer if we can't process it yet + !can_process + } + + /// Handle an unsolicited NakamotoBlocksData message. + /// + /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. + /// This is because a 1-bit indicates the _entire_ tenure is present for a given sortition, and + /// this is usually impossible to tell here. Instead, this handler will return `true` if the + /// sortition identified by the block's consensus hash is known to this node (in which case, + /// the relayer can store it to staging). + /// + /// Returns true if this message should be buffered and re-processed + pub(crate) fn inner_handle_unsolicited_NakamotoBlocksData( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + remote_neighbor_key_opt: Option, + nakamoto_blocks: &NakamotoBlocksData, + ) -> bool { + debug!( + "{:?}: Process NakamotoBlocksData from {:?} with {} entries", + &self.local_peer, + &remote_neighbor_key_opt, + nakamoto_blocks.blocks.len() + ); + + let mut to_buffer = false; + for nakamoto_block in nakamoto_blocks.blocks.iter() { + if self.is_nakamoto_block_bufferable(sortdb, chainstate, nakamoto_block) { + debug!( + "{:?}: Will buffer unsolicited NakamotoBlocksData({}) ({})", + &self.local_peer, + &nakamoto_block.block_id(), + &nakamoto_block.header.consensus_hash, + ); + to_buffer = true; + }; + } + to_buffer + } + + /// Handle an unsolicited NakamotoBlocksData message. + /// + /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. + /// This is because a 1-bit indicates the _entire_ tenure is present for a given sortition, and + /// this is usually impossible to tell here. Instead, this handler will return `true` if the + /// sortition identified by the block's consensus hash is known to this node (in which case, + /// the relayer can store it to staging). + /// + /// Returns true if this message should be buffered and re-processed + /// + /// Wraps inner_handle_unsolicited_NakamotoBlocksData by resolving the event_id to the optional + /// neighbor key. + fn handle_unsolicited_NakamotoBlocksData( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + nakamoto_blocks: &NakamotoBlocksData, + ) -> bool { + let outbound_neighbor_key_opt = self + .find_outbound_neighbor(event_id) + .or_else(|| self.check_peer_authenticated(event_id)); + self.inner_handle_unsolicited_NakamotoBlocksData( + sortdb, + chainstate, + outbound_neighbor_key_opt, + nakamoto_blocks, + ) + } + + /// Handle an unsolicited message, with either the intention of just processing it (in which + /// case, `buffer` will be `false`), or with the intention of not only processing it, but also + /// determining if it can be bufferred and retried later (in which case, `buffer` will be + /// `true`). + /// + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid + /// later. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. + fn handle_unsolicited_message( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + preamble: &Preamble, + payload: &StacksMessageType, + ibd: bool, + buffer: bool, + ) -> (bool, bool) { + match payload { + // Update our inv state for this peer, but only do so if we have an + // outbound connection to it and it's authenticated (we don't synchronize inv + // state with inbound peers). Since we will have received this message + // from an _inbound_ conversation, we need to find the reciprocal _outbound_ + // conversation and use _that_ conversation's neighbor key to identify + // which inventory we need to update. + StacksMessageType::BlocksAvailable(ref new_blocks) => { + // no need to forward to relayer + let to_buffer = self.handle_unsolicited_BlocksAvailable( + sortdb, chainstate, event_id, new_blocks, ibd, buffer, + ); + (to_buffer, false) + } + StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { + // no need to forward to relayer + let to_buffer = self.handle_unsolicited_MicroblocksAvailable( + sortdb, + chainstate, + event_id, + new_mblocks, + ibd, + buffer, + ); + (to_buffer, false) + } + StacksMessageType::Blocks(ref new_blocks) => { + // update inv state for this peer, and always forward to the relayer + let to_buffer = + self.handle_unsolicited_BlocksData(sortdb, event_id, new_blocks, buffer); + + // forward to relayer for processing + (to_buffer, true) + } + StacksMessageType::Microblocks(ref new_mblocks) => { + // update inv state for this peer, and optionally forward to the relayer. + // Note that if these microblocks can be processed *now*, then they *will not* be + // buffered + let to_buffer = self.handle_unsolicited_MicroblocksData( + chainstate, + event_id, + new_mblocks, + buffer, + ); + + // only forward to the relayer if we don't need to buffer it. + (to_buffer, true) + } + StacksMessageType::NakamotoBlocks(ref new_blocks) => { + let to_buffer = if buffer { + self.handle_unsolicited_NakamotoBlocksData( + sortdb, chainstate, event_id, new_blocks, + ) + } else { + // nothing to do if we're not querying about whether we can buffer this. + false + }; + + (to_buffer, true) + } + StacksMessageType::StackerDBPushChunk(ref data) => { + match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { + Ok(x) => { + // don't buffer, but do reject if invalid + (false, x) + } + Err(e) => { + info!( + "{:?}: failed to handle unsolicited {:?}: {:?}", + &self.local_peer, payload, &e + ); + (false, false) + } + } + } + _ => (false, true), + } + } + + /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. + /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the + /// relayer can do something useful with them. + /// + /// Invalid messages are dropped silently, with an log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + pub fn handle_unsolicited_messages( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + unsolicited: HashMap>, + ibd: bool, + buffer: bool, + ) -> HashMap> { + let mut unhandled: HashMap> = HashMap::new(); + for (event_id, messages) in unsolicited.into_iter() { + if messages.len() == 0 { + // no messages for this event + continue; + } + if buffer && self.check_peer_authenticated(event_id).is_none() { + if cfg!(test) + && self + .connection_opts + .test_disable_unsolicited_message_authentication + { + test_debug!( + "{:?}: skip unsolicited message authentication", + &self.local_peer + ); + } else { + // do not buffer messages from unknown peers + // (but it's fine to process messages that were previosuly buffered, since the peer + // may have since disconnected) + debug!("Will not handle unsolicited messages from unauthenticated or dead event {}", event_id); + continue; + } + }; + + let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { + convo.to_neighbor_key() + } else { + debug!( + "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", + &self.local_peer, + event_id, + messages.len() + ); + continue; + }; + + debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); + + for message in messages.into_iter() { + if !buffer { + debug!( + "{:?}: Re-try handling buffered message {} from {:?}", + &self.local_peer, + &message.payload.get_message_description(), + &neighbor_key + ); + } + let (to_buffer, relay) = self.handle_unsolicited_message( + sortdb, + chainstate, + event_id, + &message.preamble, + &message.payload, + ibd, + buffer, + ); + if buffer && to_buffer { + self.buffer_data_message(event_id, message); + } else if relay { + // forward to relayer for processing + debug!( + "{:?}: Will forward message {} from {:?} to relayer", + &self.local_peer, + &message.payload.get_message_description(), + &neighbor_key + ); + if let Some(msgs) = unhandled.get_mut(&neighbor_key) { + msgs.push(message); + } else { + unhandled.insert(neighbor_key.clone(), vec![message]); + } + } + } + } + unhandled + } +} From f6dd43cc9b56f13391fa26db6ddb9bfb1bd80741 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:22:06 -0400 Subject: [PATCH 03/20] feat: nakamoto block-push logic for handling newly-received Nakamoto blocks. Also, removes tests that are now in net/tests/relay/epoch2x.rs --- stackslib/src/net/relay.rs | 4343 +++++------------------------------- 1 file changed, 563 insertions(+), 3780 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7236ef76e4..f8c5fae144 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -67,6 +67,8 @@ pub const RELAY_DUPLICATE_INFERENCE_WARMUP: usize = 128; pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, + /// connection options + connection_opts: ConnectionOptions, /// StackerDB connection stacker_dbs: StackerDBs, } @@ -77,12 +79,12 @@ pub struct RelayerStats { /// Note that we key on (addr, port), not the full NeighborAddress. /// (TODO: Nothing is done with this yet, but one day we'll use it to probe for network /// choke-points). - relay_stats: HashMap, - relay_updates: BTreeMap, + pub(crate) relay_stats: HashMap, + pub(crate) relay_updates: BTreeMap, /// Messages sent from each neighbor recently (includes duplicates) - recent_messages: HashMap>, - recent_updates: BTreeMap, + pub(crate) recent_messages: HashMap>, + pub(crate) recent_updates: BTreeMap, next_priority: u64, } @@ -93,6 +95,7 @@ pub struct ProcessedNetReceipts { pub num_new_blocks: u64, pub num_new_confirmed_microblocks: u64, pub num_new_unconfirmed_microblocks: u64, + pub num_new_nakamoto_blocks: u64, } /// A trait for implementing both mempool event observer methods and stackerdb methods. @@ -170,6 +173,16 @@ impl RelayPayload for StacksMicroblock { } } +impl RelayPayload for NakamotoBlock { + fn get_digest(&self) -> Sha512Trunc256Sum { + let h = self.block_id(); + Sha512Trunc256Sum(h.0) + } + fn get_id(&self) -> String { + format!("NakamotoBlock({})", self.block_id()) + } +} + impl RelayPayload for StacksTransaction { fn get_digest(&self) -> Sha512Trunc256Sum { let h = self.txid(); @@ -317,7 +330,7 @@ impl RelayerStats { } /// Map neighbors to the frequency of their AS numbers in the given neighbors list - fn count_ASNs( + pub(crate) fn count_ASNs( conn: &DBConn, neighbors: &[NeighborKey], ) -> Result, net_error> { @@ -442,7 +455,7 @@ impl RelayerStats { } for l in 0..count { - if norm <= 1 { + if norm == 0 { // just one option break; } @@ -461,8 +474,8 @@ impl RelayerStats { sampled += 1; // sample without replacement - rankings_vec[i].1 -= 1; - norm -= 1; + norm -= rankings_vec[i].1; + rankings_vec[i].1 = 0; break; } } @@ -475,19 +488,24 @@ impl RelayerStats { } impl Relayer { - pub fn new(handle: NetworkHandle, stacker_dbs: StackerDBs) -> Relayer { + pub fn new( + handle: NetworkHandle, + connection_opts: ConnectionOptions, + stacker_dbs: StackerDBs, + ) -> Relayer { Relayer { p2p: handle, + connection_opts, stacker_dbs, } } pub fn from_p2p(network: &mut PeerNetwork, stacker_dbs: StackerDBs) -> Relayer { let handle = network.new_handle(1024); - Relayer::new(handle, stacker_dbs) + Relayer::new(handle, network.connection_opts.clone(), stacker_dbs) } - /// Given blocks pushed to us, verify that they correspond to expected block data. + /// Given Stacks 2.x blocks pushed to us, verify that they correspond to expected block data. pub fn validate_blocks_push( conn: &SortitionDBConn, blocks_data: &BlocksData, @@ -518,10 +536,41 @@ impl Relayer { "No such sortition in block with consensus hash {}", consensus_hash ); + return Err(net_error::InvalidMessage); + } + } + Ok(()) + } + + /// Given Nakamoto blocks pushed to us, verify that they correspond to expected block data. + pub fn validate_nakamoto_blocks_push( + conn: &SortitionDBConn, + nakamoto_blocks_data: &NakamotoBlocksData, + ) -> Result<(), net_error> { + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + // is this the right Stacks block for this sortition? + let Some(sn) = SortitionDB::get_block_snapshot_consensus( + conn.conn(), + &nakamoto_block.header.consensus_hash, + )? + else { + // don't know this sortition yet + continue; + }; + + if !sn.pox_valid { + info!( + "Pushed block from consensus hash {} corresponds to invalid PoX state", + nakamoto_block.header.consensus_hash + ); + continue; + } - // TODO: once PoX is implemented, this can be permitted if we're missing the reward - // window's anchor block for the reward window in which this block lives. Until - // then, it's never okay -- this peer shall be considered broken. + if !sn.sortition { + info!( + "No such sortition in block with consensus hash {}", + &nakamoto_block.header.consensus_hash + ); return Err(net_error::InvalidMessage); } } @@ -668,7 +717,16 @@ impl Relayer { // do we have this block? don't lock the DB needlessly if so. if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&block.header.block_id())? + .has_nakamoto_block(&block.header.block_id()) + .map_err(|e| { + debug!( + "Failed to determine if we have Nakamoto block {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &e + ); + e + })? { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); @@ -676,7 +734,13 @@ impl Relayer { let block_sn = SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? - .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; + .ok_or_else(|| { + debug!( + "Failed to load snapshot for consensus hash {}", + &block.header.consensus_hash + ); + chainstate_error::DBError(db_error::NotFoundError) + })?; // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x // tenure, right after the last 2.x sortition @@ -757,14 +821,16 @@ impl Relayer { Ok(accepted) } - /// Process nakamoto blocks. + /// Process nakamoto blocks that we downloaded. /// Log errors but do not return them. - pub fn process_nakamoto_blocks( + /// Returns the list of blocks we accepted. + pub fn process_downloaded_nakamoto_blocks( sortdb: &SortitionDB, chainstate: &mut StacksChainState, blocks: impl Iterator, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(), chainstate_error> { + ) -> Result, chainstate_error> { + let mut accepted = vec![]; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let mut sort_handle = sortdb.index_handle(&tip.sortition_id); for block in blocks { @@ -773,13 +839,15 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, - block, + block.clone(), coord_comms, ) { warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); + } else { + accepted.push(block); } } - Ok(()) + Ok(accepted) } /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by @@ -1343,6 +1411,91 @@ impl Relayer { Ok((mblock_datas, bad_neighbors)) } + /// Preprocess all pushed Nakamoto blocks + /// Return the Nakamoto blocks we can accept (and who relayed them), as well as the + /// list of peers that served us invalid data. + pub(crate) fn process_pushed_nakamoto_blocks( + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + coord_comms: Option<&CoordinatorChannels>, + ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { + let mut new_blocks_and_relayers = vec![]; + let mut bad_neighbors = vec![]; + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + // process Nakamoto blocks pushed to us. + // If a neighbor sends us an invalid Nakamoto block, then ban them. + for (neighbor_key, relayers_and_block_data) in network_result.pushed_nakamoto_blocks.iter() + { + for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter() { + let mut good = true; + let mut accepted_blocks = vec![]; + if let Err(_e) = Relayer::validate_nakamoto_blocks_push( + &sortdb.index_conn(), + nakamoto_blocks_data, + ) { + // punish this peer + bad_neighbors.push((*neighbor_key).clone()); + good = false; + } + + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + if !good { + break; + } + let block_id = nakamoto_block.block_id(); + debug!( + "Received pushed Nakamoto block {} from {}", + block_id, neighbor_key + ); + let mut sort_handle = sortdb.index_handle(&tip.sortition_id); + match Self::process_new_nakamoto_block( + sortdb, + &mut sort_handle, + chainstate, + nakamoto_block.clone(), + coord_comms, + ) { + Ok(accepted) => { + if accepted { + debug!( + "Accepted Nakamoto block {} from {}", + &block_id, neighbor_key + ); + accepted_blocks.push(nakamoto_block.clone()); + } else { + debug!( + "Rejected Nakamoto block {} from {}", + &block_id, &neighbor_key, + ); + } + } + Err(chainstate_error::InvalidStacksBlock(msg)) => { + warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); + bad_neighbors.push((*neighbor_key).clone()); + good = false; + break; + } + Err(e) => { + warn!( + "Could not process pushed Nakamoto block {}: {:?}", + &block_id, &e + ); + good = false; + break; + } + } + } + if good && accepted_blocks.len() > 0 { + new_blocks_and_relayers.push((relayers.clone(), accepted_blocks)); + } + } + } + + Ok((new_blocks_and_relayers, bad_neighbors)) + } + /// Verify that a relayed transaction is not problematic. This is a static check -- we only /// look at the tx contents. /// @@ -1634,6 +1787,55 @@ impl Relayer { )) } + /// Process new Nakamoto blocks, both pushed and downloaded. + /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that + /// sent us invalid blocks. + pub fn process_new_nakamoto_blocks( + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + coord_comms: Option<&CoordinatorChannels>, + ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { + // process downloaded Nakamoto blocks. + // We treat them as singleton blocks fetched via zero relayers + let nakamoto_blocks = + std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); + let mut accepted_nakamoto_blocks_and_relayers = + match Self::process_downloaded_nakamoto_blocks( + sortdb, + chainstate, + nakamoto_blocks.into_values(), + coord_comms, + ) { + Ok(accepted) => accepted + .into_iter() + .map(|block| (vec![], vec![block])) + .collect(), + Err(e) => { + warn!("Failed to process downloaded Nakamoto blocks: {:?}", &e); + vec![] + } + }; + + // process pushed Nakamoto blocks + let (mut pushed_blocks_and_relayers, bad_neighbors) = + match Self::process_pushed_nakamoto_blocks( + network_result, + sortdb, + chainstate, + coord_comms, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to process pushed Nakamoot blocks: {:?}", &e); + (vec![], vec![]) + } + }; + + accepted_nakamoto_blocks_and_relayers.append(&mut pushed_blocks_and_relayers); + Ok((accepted_nakamoto_blocks_and_relayers, bad_neighbors)) + } + /// Produce blocks-available messages from blocks we just got. pub fn load_blocks_available_data( sortdb: &SortitionDB, @@ -1717,7 +1919,7 @@ impl Relayer { /// Store all new transactions we received, and return the list of transactions that we need to /// forward (as well as their relay hints). Also, garbage-collect the mempool. - fn process_transactions( + pub(crate) fn process_transactions( network_result: &mut NetworkResult, sortdb: &SortitionDB, chainstate: &mut StacksChainState, @@ -2005,31 +2207,87 @@ impl Relayer { ) } - /// Given a network result, consume and store all data. - /// * Add all blocks and microblocks to staging. - /// * Forward BlocksAvailable messages to neighbors for newly-discovered anchored blocks - /// * Forward MicroblocksAvailable messages to neighbors for newly-discovered confirmed microblock streams - /// * Forward along unconfirmed microblocks that we didn't already have - /// * Add all transactions to the mempool. - /// * Forward transactions we didn't already have. - /// * Reload the unconfirmed state, if necessary. - /// Mask errors from invalid data -- all errors due to invalid blocks and invalid data should be captured, and - /// turned into peer bans. - pub fn process_network_result( + /// Relay epoch2 block data + fn relay_epoch2_blocks( + &mut self, + _local_peer: &LocalPeer, + sortdb: &SortitionDB, + new_blocks: HashMap, + new_confirmed_microblocks: HashMap)>, + new_microblocks: Vec<(Vec, MicroblocksData)>, + ) { + // have the p2p thread tell our neighbors about newly-discovered blocks + let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); + let available = Relayer::load_blocks_available_data(sortdb, new_block_chs) + .unwrap_or(BlocksAvailableMap::new()); + if available.len() > 0 { + debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); + if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { + warn!("Failed to advertize new blocks: {:?}", &e); + } + } + + // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams + let new_mblock_chs = new_confirmed_microblocks + .iter() + .map(|(ch, _)| ch.clone()) + .collect(); + let mblocks_available = Relayer::load_blocks_available_data(sortdb, new_mblock_chs) + .unwrap_or(BlocksAvailableMap::new()); + if mblocks_available.len() > 0 { + debug!( + "{:?}: Confirmed microblock streams available: {}", + &_local_peer, + mblocks_available.len() + ); + if let Err(e) = self + .p2p + .advertize_microblocks(mblocks_available, new_confirmed_microblocks) + { + warn!("Failed to advertize new confirmed microblocks: {:?}", &e); + } + } + + // have the p2p thread forward all new unconfirmed microblocks + if new_microblocks.len() > 0 { + debug!( + "{:?}: Unconfirmed microblocks: {}", + &_local_peer, + new_microblocks.len() + ); + for (relayers, mblocks_msg) in new_microblocks.into_iter() { + debug!( + "{:?}: Send {} microblocks for {}", + &_local_peer, + mblocks_msg.microblocks.len(), + &mblocks_msg.index_anchor_block + ); + let msg = StacksMessageType::Microblocks(mblocks_msg); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast microblock: {:?}", &e); + } + } + } + } + + /// Process epoch2 block data. + /// Relays blocks and microblocks as needed + /// Returns (num new blocks, num new confirmed microblocks, num new unconfirmed microblocks) + fn process_new_epoch2_blocks( &mut self, _local_peer: &LocalPeer, network_result: &mut NetworkResult, sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, ibd: bool, coord_comms: Option<&CoordinatorChannels>, - event_observer: Option<&dyn RelayEventDispatcher>, - ) -> Result { + ) -> (u64, u64, u64) { let mut num_new_blocks = 0; let mut num_new_confirmed_microblocks = 0; let mut num_new_unconfirmed_microblocks = 0; - match Relayer::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { + + // Process epoch2 data + match Self::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { Ok((new_blocks, new_confirmed_microblocks, new_microblocks, bad_block_neighbors)) => { // report quantities of new data in the receipts num_new_blocks = new_blocks.len() as u64; @@ -2051,79 +2309,202 @@ impl Relayer { // only relay if not ibd if !ibd { - // have the p2p thread tell our neighbors about newly-discovered blocks - let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); - let available = Relayer::load_blocks_available_data(sortdb, new_block_chs)?; - if available.len() > 0 { - debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); - if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { - warn!("Failed to advertize new blocks: {:?}", &e); - } - } + self.relay_epoch2_blocks( + _local_peer, + sortdb, + new_blocks, + new_confirmed_microblocks, + new_microblocks, + ); + } + } + Err(e) => { + warn!("Failed to process new blocks: {:?}", &e); + } + } + ( + num_new_blocks, + num_new_confirmed_microblocks, + num_new_unconfirmed_microblocks, + ) + } - // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams - let new_mblock_chs = new_confirmed_microblocks - .iter() - .map(|(ch, _)| ch.clone()) - .collect(); - let mblocks_available = - Relayer::load_blocks_available_data(sortdb, new_mblock_chs)?; - if mblocks_available.len() > 0 { - debug!( - "{:?}: Confirmed microblock streams available: {}", - &_local_peer, - mblocks_available.len() + /// Get the last N sortitions, in order from the sortition tip to the n-1st ancestor + pub fn get_last_n_sortitions( + sortdb: &SortitionDB, + n: u64, + ) -> Result, chainstate_error> { + let mut ret = vec![]; + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + ret.push(sort_tip); + + for _i in 0..(n.saturating_sub(1)) { + let last_sn_parent_sortition_id = ret + .last() + .map(|sn| sn.parent_sortition_id.clone()) + .expect("Infallible -- ret is non-empty"); + let sn = SortitionDB::get_block_snapshot(sortdb.conn(), &last_sn_parent_sortition_id)? + .ok_or(db_error::NotFoundError)?; + ret.push(sn); + } + Ok(ret) + } + + /// Relay Nakamoto blocks. + /// By default, only sends them if we don't have them yet. + /// This can be overridden by setting `force_send` to true. + pub fn relay_epoch3_blocks( + &mut self, + _local_peer: &LocalPeer, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_blocks_and_relayers: Vec<(Vec, Vec)>, + force_send: bool, + ) { + debug!( + "{:?}: relay {} sets of Nakamoto blocks", + _local_peer, + nakamoto_blocks_and_relayers.len() + ); + + // the relay strategy is to only send blocks that are within + // `connection_opts.max_nakamoto_block_relay_age`, which is the number of + // burnchain sortitions that have happened since its tenure began. The + // intuition is that nodes that are in IBD will be downloading blocks anyway, + // but nodes that are at or near the chain tip would benefit from having blocks + // pushed to them. + let Ok(relay_sortitions) = + Self::get_last_n_sortitions(sortdb, self.connection_opts.max_nakamoto_block_relay_age) + .map_err(|e| warn!("Failed to load last N sortitions: {:?}", &e)) + else { + return; + }; + + let relay_tenures: HashSet<_> = relay_sortitions + .into_iter() + .map(|sn| sn.consensus_hash) + .collect(); + + for (relayers, blocks) in nakamoto_blocks_and_relayers.into_iter() { + let relay_blocks: Vec<_> = blocks + .into_iter() + .filter(|blk| { + // don't relay blocks for non-recent tenures + if !relay_tenures.contains(&blk.header.consensus_hash) { + test_debug!( + "Do not relay {} -- {} is not recent", + &blk.header.block_id(), + &blk.header.consensus_hash ); - if let Err(e) = self - .p2p - .advertize_microblocks(mblocks_available, new_confirmed_microblocks) - { - warn!("Failed to advertize new confirmed microblocks: {:?}", &e); - } + return false; + } + // don't relay blocks we already have. + // If we have a DB error in figuring this out, then don't relay by + // default (lest a faulty DB cause the node to spam the network). + if !force_send + && chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&blk.block_id()) + .unwrap_or(true) + { + return false; } + true + }) + .collect(); - // have the p2p thread forward all new unconfirmed microblocks - if new_microblocks.len() > 0 { - debug!( - "{:?}: Unconfirmed microblocks: {}", - &_local_peer, - new_microblocks.len() - ); - for (relayers, mblocks_msg) in new_microblocks.into_iter() { - debug!( - "{:?}: Send {} microblocks for {}", - &_local_peer, - mblocks_msg.microblocks.len(), - &mblocks_msg.index_anchor_block - ); - let msg = StacksMessageType::Microblocks(mblocks_msg); - if let Err(e) = self.p2p.broadcast_message(relayers, msg) { - warn!("Failed to broadcast microblock: {:?}", &e); - } - } + debug!( + "{:?}: Forward {} Nakamoto blocks from {:?}", + _local_peer, + relay_blocks.len(), + &relayers + ); + + if relay_blocks.len() == 0 { + continue; + } + + for _block in relay_blocks.iter() { + test_debug!( + "{:?}: Forward Nakamoto block {}/{}", + _local_peer, + &_block.header.consensus_hash, + &_block.header.block_hash() + ); + } + + let msg = StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: relay_blocks, + }); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); + } + } + } + + /// Process epoch3 data + /// Relay new nakamoto blocks if not in ibd + /// Returns number of new nakamoto blocks + pub fn process_new_epoch3_blocks( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + ibd: bool, + coord_comms: Option<&CoordinatorChannels>, + ) -> u64 { + let mut num_new_nakamoto_blocks = 0; + + match Self::process_new_nakamoto_blocks(network_result, sortdb, chainstate, coord_comms) { + Ok((nakamoto_blocks_and_relayers, bad_neighbors)) => { + num_new_nakamoto_blocks = nakamoto_blocks_and_relayers + .iter() + .fold(0, |acc, (_relayers, blocks)| acc + blocks.len()) + as u64; + + // punish bad peers + if bad_neighbors.len() > 0 { + debug!("{:?}: Ban {} peers", &_local_peer, bad_neighbors.len()); + if let Err(e) = self.p2p.ban_peers(bad_neighbors) { + warn!("Failed to ban bad-block peers: {:?}", &e); } } + + // relay if not IBD + if !ibd && nakamoto_blocks_and_relayers.len() > 0 { + self.relay_epoch3_blocks( + _local_peer, + sortdb, + chainstate, + nakamoto_blocks_and_relayers, + false, + ); + } } Err(e) => { - warn!("Failed to process new blocks: {:?}", &e); + warn!("Failed to process new Nakamoto blocks: {:?}", &e); } - }; - - let nakamoto_blocks = - std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); - if let Err(e) = Relayer::process_nakamoto_blocks( - sortdb, - chainstate, - nakamoto_blocks.into_values(), - coord_comms, - ) { - warn!("Failed to process Nakamoto blocks: {:?}", &e); } + num_new_nakamoto_blocks + } + /// Process new transactions + /// Returns the list of accepted txs + pub fn process_new_transactions( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ibd: bool, + event_observer: Option<&dyn RelayEventDispatcher>, + ) -> Vec { + // process new transactions let mut mempool_txs_added = vec![]; - - // only care about transaction forwarding if not IBD if !ibd { + // only care about transaction forwarding if not IBD. // store all transactions, and forward the novel ones to neighbors test_debug!( "{:?}: Process {} transaction(s)", @@ -2136,7 +2517,8 @@ impl Relayer { chainstate, mempool, event_observer.map(|obs| obs.as_mempool_event_dispatcher()), - )?; + ) + .unwrap_or(vec![]); if new_txs.len() > 0 { debug!( @@ -2155,24 +2537,79 @@ impl Relayer { } } } + mempool_txs_added + } - let mut processed_unconfirmed_state = Default::default(); - - // finally, refresh the unconfirmed chainstate, if need be. - // only bother if we're not in IBD; otherwise this is a waste of time - if network_result.has_microblocks() && !ibd { - processed_unconfirmed_state = Relayer::refresh_unconfirmed(chainstate, sortdb); - } - - // push events for HTTP-uploaded stacker DB chunks - Relayer::process_uploaded_stackerdb_chunks( - mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), - event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), - ); - - // store downloaded stacker DB chunks - Relayer::process_stacker_db_chunks( - &mut self.stacker_dbs, + /// Given a network result, consume and store all data. + /// * Add all blocks and microblocks to staging. + /// * Forward BlocksAvailable messages to neighbors for newly-discovered anchored blocks + /// * Forward MicroblocksAvailable messages to neighbors for newly-discovered confirmed microblock streams + /// * Forward along unconfirmed microblocks that we didn't already have + /// * Add all transactions to the mempool. + /// * Forward transactions we didn't already have. + /// * Reload the unconfirmed state, if necessary. + /// Mask errors from invalid data -- all errors due to invalid blocks and invalid data should be captured, and + /// turned into peer bans. + pub fn process_network_result( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ibd: bool, + coord_comms: Option<&CoordinatorChannels>, + event_observer: Option<&dyn RelayEventDispatcher>, + ) -> Result { + // process epoch2 data + let (num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks) = self + .process_new_epoch2_blocks( + _local_peer, + network_result, + sortdb, + chainstate, + ibd, + coord_comms, + ); + + // process epoch3 data + let num_new_nakamoto_blocks = self.process_new_epoch3_blocks( + _local_peer, + network_result, + sortdb, + chainstate, + ibd, + coord_comms, + ); + + // process transactions + let mempool_txs_added = self.process_new_transactions( + _local_peer, + network_result, + sortdb, + chainstate, + mempool, + ibd, + event_observer, + ); + + // finally, refresh the unconfirmed chainstate, if need be. + // only bother if we're not in IBD; otherwise this is a waste of time + let processed_unconfirmed_state = if network_result.has_microblocks() && !ibd { + Relayer::refresh_unconfirmed(chainstate, sortdb) + } else { + Default::default() + }; + + // push events for HTTP-uploaded stacker DB chunks + Relayer::process_uploaded_stackerdb_chunks( + mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), + event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), + ); + + // store downloaded stacker DB chunks + Relayer::process_stacker_db_chunks( + &mut self.stacker_dbs, &network_result.stacker_db_configs, mem::replace(&mut network_result.stacker_db_sync_results, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), @@ -2192,6 +2629,7 @@ impl Relayer { num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks, + num_new_nakamoto_blocks, }; Ok(receipts) @@ -2609,6 +3047,15 @@ impl PeerNetwork { } } + for (nk, nakamoto_data) in network_result.pushed_nakamoto_blocks.iter() { + for (_, nakamoto_msg) in nakamoto_data.iter() { + for nakamoto_block in nakamoto_msg.blocks.iter() { + self.relayer_stats + .add_relayed_message((*nk).clone(), nakamoto_block); + } + } + } + for (nk, txs) in network_result.pushed_transactions.iter() { for (_, tx) in txs.iter() { self.relayer_stats.add_relayed_message((*nk).clone(), tx); @@ -2618,3668 +3065,4 @@ impl PeerNetwork { } #[cfg(test)] -pub mod test { - use std::cell::RefCell; - use std::collections::HashMap; - - use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; - use clarity::vm::ast::ASTRules; - use clarity::vm::costs::LimitedCostTracker; - use clarity::vm::database::ClarityDatabase; - use clarity::vm::types::QualifiedContractIdentifier; - use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; - use stacks_common::address::AddressHashMode; - use stacks_common::types::chainstate::{ - BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash, - }; - use stacks_common::types::Address; - use stacks_common::util::hash::MerkleTree; - use stacks_common::util::sleep_ms; - use stacks_common::util::vrf::VRFProof; - - use super::*; - use crate::burnchains::tests::TestMiner; - use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; - use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; - use crate::chainstate::stacks::test::codec_all_transactions; - use crate::chainstate::stacks::tests::{ - make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, - make_user_stacks_transfer, - }; - use crate::chainstate::stacks::{Error as ChainstateError, *}; - use crate::clarity_vm::clarity::ClarityConnection; - use crate::core::*; - use crate::net::api::getinfo::RPCPeerInfoData; - use crate::net::asn::*; - use crate::net::chat::*; - use crate::net::codec::*; - use crate::net::download::*; - use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; - use crate::net::httpcore::StacksHttpMessage; - use crate::net::inv::inv2x::*; - use crate::net::test::*; - use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; - use crate::net::*; - use crate::util_lib::test::*; - - #[test] - fn test_relayer_stats_add_relyed_messages() { - let mut relay_stats = RelayerStats::new(); - - let all_transactions = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - assert!(all_transactions.len() > MAX_RECENT_MESSAGES); - - eprintln!("Test with {} transactions", all_transactions.len()); - - let nk = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - }; - - // never overflow recent messages for a neighbor - for (i, tx) in all_transactions.iter().enumerate() { - relay_stats.add_relayed_message(nk.clone(), tx); - - assert_eq!(relay_stats.recent_messages.len(), 1); - assert!(relay_stats.recent_messages.get(&nk).unwrap().len() <= MAX_RECENT_MESSAGES); - - assert_eq!(relay_stats.recent_updates.len(), 1); - } - - assert_eq!( - relay_stats.recent_messages.get(&nk).unwrap().len(), - MAX_RECENT_MESSAGES - ); - - for i in (all_transactions.len() - MAX_RECENT_MESSAGES)..MAX_RECENT_MESSAGES { - let digest = all_transactions[i].get_digest(); - let mut found = false; - for (_, hash) in relay_stats.recent_messages.get(&nk).unwrap().iter() { - found = found || (*hash == digest); - } - if !found { - assert!(false); - } - } - - // never overflow number of neighbors tracked - for i in 0..(MAX_RELAYER_STATS + 1) { - let mut new_nk = nk.clone(); - new_nk.peer_version += i as u32; - - relay_stats.add_relayed_message(new_nk, &all_transactions[0]); - - assert!(relay_stats.recent_updates.len() <= i + 1); - assert!(relay_stats.recent_updates.len() <= MAX_RELAYER_STATS); - } - } - - #[test] - fn test_relayer_merge_stats() { - let mut relayer_stats = RelayerStats::new(); - - let na = NeighborAddress { - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - public_key_hash: Hash160([0u8; 20]), - }; - - let relay_stats = RelayStats { - num_messages: 1, - num_bytes: 1, - last_seen: 1, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, 1); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - let now = get_epoch_time_secs() + 60; - - let relay_stats_2 = RelayStats { - num_messages: 2, - num_bytes: 2, - last_seen: now, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats_2.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); - assert!( - relayer_stats.relay_stats.get(&na).unwrap().last_seen < now - && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() - ); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - let relay_stats_3 = RelayStats { - num_messages: 3, - num_bytes: 3, - last_seen: 0, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats_3.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); - assert!( - relayer_stats.relay_stats.get(&na).unwrap().last_seen < now - && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() - ); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - for i in 0..(MAX_RELAYER_STATS + 1) { - let na = NeighborAddress { - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 14321 + (i as u16), - public_key_hash: Hash160([0u8; 20]), - }; - - let now = get_epoch_time_secs() + (i as u64) + 1; - - let relay_stats = RelayStats { - num_messages: 1, - num_bytes: 1, - last_seen: now, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats.clone()); - - relayer_stats.merge_relay_stats(rs); - assert!(relayer_stats.relay_stats.len() <= MAX_RELAYER_STATS); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, now); - } - } - - #[test] - fn test_relay_inbound_peer_rankings() { - let mut relay_stats = RelayerStats::new(); - - let all_transactions = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - assert!(all_transactions.len() > MAX_RECENT_MESSAGES); - - let nk_1 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - }; - - let nk_2 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54322, - }; - - let nk_3 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54323, - }; - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 0); - - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 1); - assert_eq!(*dups.get(&nk_1).unwrap(), 3); - - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 2); - assert_eq!(*dups.get(&nk_1).unwrap(), 3); - assert_eq!(*dups.get(&nk_2).unwrap(), 4); - - // total dups == 7 - let dist = relay_stats.get_inbound_relay_rankings( - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - &all_transactions[0], - 0, - ); - assert_eq!(*dist.get(&nk_1).unwrap(), 7 - 3 + 1); - assert_eq!(*dist.get(&nk_2).unwrap(), 7 - 4 + 1); - assert_eq!(*dist.get(&nk_3).unwrap(), 7 + 1); - - // high warmup period - let dist = relay_stats.get_inbound_relay_rankings( - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - &all_transactions[0], - 100, - ); - assert_eq!(*dist.get(&nk_1).unwrap(), 100 + 1); - assert_eq!(*dist.get(&nk_2).unwrap(), 100 + 1); - assert_eq!(*dist.get(&nk_3).unwrap(), 100 + 1); - } - - #[test] - fn test_relay_outbound_peer_rankings() { - let relay_stats = RelayerStats::new(); - - let asn1 = ASEntry4 { - prefix: 0x10000000, - mask: 8, - asn: 1, - org: 1, - }; - - let asn2 = ASEntry4 { - prefix: 0x20000000, - mask: 8, - asn: 2, - org: 2, - }; - - let nk_1 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x10, 0x11, 0x12, 0x13, - ]), - port: 54321, - }; - - let nk_2 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x23, - ]), - port: 54322, - }; - - let nk_3 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x24, - ]), - port: 54323, - }; - - let n1 = Neighbor { - addr: nk_1.clone(), - public_key: Secp256k1PublicKey::from_hex( - "0260569384baa726f877d47045931e5310383f18d0b243a9b6c095cee6ef19abd6", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 1, - org: 1, - in_degree: 0, - out_degree: 0, - }; - - let n2 = Neighbor { - addr: nk_2.clone(), - public_key: Secp256k1PublicKey::from_hex( - "02465f9ff58dfa8e844fec86fa5fc3fd59c75ea807e20d469b0a9f885d2891fbd4", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 2, - org: 2, - in_degree: 0, - out_degree: 0, - }; - - let n3 = Neighbor { - addr: nk_3.clone(), - public_key: Secp256k1PublicKey::from_hex( - "032d8a1ea2282c1514fdc1a6f21019561569d02a225cf7c14b4f803b0393cef031", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 2, - org: 2, - in_degree: 0, - out_degree: 0, - }; - - let peerdb = PeerDB::connect_memory( - 0x80000000, - 0, - 4032, - UrlString::try_from("http://foo.com").unwrap(), - &vec![asn1, asn2], - &vec![n1.clone(), n2.clone(), n3.clone()], - ) - .unwrap(); - - let asn_count = RelayerStats::count_ASNs( - peerdb.conn(), - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - ) - .unwrap(); - assert_eq!(asn_count.len(), 3); - assert_eq!(*asn_count.get(&nk_1).unwrap(), 1); - assert_eq!(*asn_count.get(&nk_2).unwrap(), 2); - assert_eq!(*asn_count.get(&nk_3).unwrap(), 2); - - let ranking = relay_stats - .get_outbound_relay_rankings(&peerdb, &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()]) - .unwrap(); - assert_eq!(ranking.len(), 3); - assert_eq!(*ranking.get(&nk_1).unwrap(), 5 - 1 + 1); - assert_eq!(*ranking.get(&nk_2).unwrap(), 5 - 2 + 1); - assert_eq!(*ranking.get(&nk_3).unwrap(), 5 - 2 + 1); - - let ranking = relay_stats - .get_outbound_relay_rankings(&peerdb, &vec![nk_2.clone(), nk_3.clone()]) - .unwrap(); - assert_eq!(ranking.len(), 2); - assert_eq!(*ranking.get(&nk_2).unwrap(), 4 - 2 + 1); - assert_eq!(*ranking.get(&nk_3).unwrap(), 4 - 2 + 1); - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_3_peers_push_available() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_3_peers_push_available", - 4200, - 3, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 3); - - // peer 0 produces the blocks - peer_configs[0].connection_opts.disable_chat_neighbors = true; - - // peer 1 downloads the blocks from peer 0, and sends - // BlocksAvailable and MicroblocksAvailable messages to - // peer 2. - peer_configs[1].connection_opts.disable_chat_neighbors = true; - - // peer 2 learns about the blocks and microblocks from peer 1's - // BlocksAvaiable and MicroblocksAvailable messages, but - // not from inv syncs. - peer_configs[2].connection_opts.disable_chat_neighbors = true; - peer_configs[2].connection_opts.disable_inv_sync = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - peer_configs[2].connection_opts.disable_natpunch = true; - - // do not push blocks and microblocks; only announce them - peer_configs[0].connection_opts.disable_block_push = true; - peer_configs[1].connection_opts.disable_block_push = true; - peer_configs[2].connection_opts.disable_block_push = true; - - peer_configs[0].connection_opts.disable_microblock_push = true; - peer_configs[1].connection_opts.disable_microblock_push = true; - peer_configs[2].connection_opts.disable_microblock_push = true; - - // generous timeouts - peer_configs[0].connection_opts.connect_timeout = 180; - peer_configs[1].connection_opts.connect_timeout = 180; - peer_configs[2].connection_opts.connect_timeout = 180; - peer_configs[0].connection_opts.timeout = 180; - peer_configs[1].connection_opts.timeout = 180; - peer_configs[2].connection_opts.timeout = 180; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - let peer_2 = peer_configs[2].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - peer_configs[2].add_neighbor(&peer_1); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - // only produce blocks for a single reward - // cycle, since pushing block/microblock - // announcements in reward cycles the remote - // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - assert_eq!(block_data.len(), 5); - - block_data - }, - |ref mut peers| { - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync. This is required for the downloader to - // work, and for (Micro)BlocksAvailable messages to be accepted - let peer_1_nk = peers[1].to_neighbor().addr; - let peer_2_nk = peers[2].to_neighbor().addr; - let bc = peers[1].config.burnchain.clone(); - match peers[2].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_1_nk).is_none() { - test_debug!("initialize inv statistics for peer 1 in peer 2"); - inv_state.add_peer(peer_1_nk.clone(), true); - if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { - stats.scans = 1; - stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); - stats.inv.merge_blocks_inv( - 0, - 30, - vec![0, 0, 0, 0, 0], - vec![0, 0, 0, 0, 0], - false, - ); - } else { - panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); - } - } else { - test_debug!("peer 2 has inv state for peer 1"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - let peer_1_nk = peers[1].to_neighbor().addr; - match peers[2].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_1_nk).is_none() { - test_debug!("initialize inv statistics for peer 1 in peer 2"); - inv_state.add_peer(peer_1_nk.clone(), true); - - inv_state - .get_stats_mut(&peer_1_nk) - .unwrap() - .inv - .num_reward_cycles = this_reward_cycle; - inv_state.get_stats_mut(&peer_1_nk).unwrap().inv.pox_inv = - vec![0x3f]; - } else { - test_debug!("peer 2 has inv state for peer 1"); - } - } - None => { - test_debug!("No inv state for peer 2"); - } - } - - // peer 2 should never see a BlocksInv - // message. That would imply it asked for an inv - for (_, convo) in peers[2].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // TODO - true - }, - |_| true, - ); - }) - } - - fn is_peer_connected(peer: &TestPeer, dest: &NeighborKey) -> bool { - let event_id = match peer.network.events.get(dest) { - Some(evid) => *evid, - None => { - return false; - } - }; - - match peer.network.peers.get(&event_id) { - Some(convo) => { - return convo.is_authenticated(); - } - None => { - return false; - } - } - } - - fn push_message( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - msg: StacksMessageType, - ) -> bool { - let event_id = match peer.network.events.get(dest) { - Some(evid) => *evid, - None => { - panic!("Unreachable peer: {:?}", dest); - } - }; - - let relay_msg = match peer.network.peers.get_mut(&event_id) { - Some(convo) => convo - .sign_relay_message( - &peer.network.local_peer, - &peer.network.chain_view, - relay_hints, - msg, - ) - .unwrap(), - None => { - panic!("No such event ID {} from neighbor {}", event_id, dest); - } - }; - - match peer.network.relay_signed_message(dest, relay_msg.clone()) { - Ok(_) => { - return true; - } - Err(net_error::OutboxOverflow) => { - test_debug!( - "{:?} outbox overflow; try again later", - &peer.to_neighbor().addr - ); - return false; - } - Err(net_error::SendError(msg)) => { - warn!( - "Failed to send to {:?}: SendError({})", - &peer.to_neighbor().addr, - msg - ); - return false; - } - Err(e) => { - test_debug!( - "{:?} encountered fatal error when forwarding: {:?}", - &peer.to_neighbor().addr, - &e - ); - assert!(false); - unreachable!(); - } - } - } - - fn http_rpc( - peer_http: u16, - request: StacksHttpRequest, - ) -> Result { - use std::net::TcpStream; - - let mut sock = TcpStream::connect( - &format!("127.0.0.1:{}", peer_http) - .parse::() - .unwrap(), - ) - .unwrap(); - - let request_bytes = request.try_serialize().unwrap(); - match sock.write_all(&request_bytes) { - Ok(_) => {} - Err(e) => { - test_debug!("Client failed to write: {:?}", &e); - return Err(net_error::WriteError(e)); - } - } - - let mut resp = vec![]; - match sock.read_to_end(&mut resp) { - Ok(_) => { - if resp.len() == 0 { - test_debug!("Client did not receive any data"); - return Err(net_error::PermanentlyDrained); - } - } - Err(e) => { - test_debug!("Client failed to read: {:?}", &e); - return Err(net_error::ReadError(e)); - } - } - - test_debug!("Client received {} bytes", resp.len()); - let response = StacksHttp::parse_response( - &request.preamble().verb, - &request.preamble().path_and_query_str, - &resp, - ) - .unwrap(); - match response { - StacksHttpMessage::Response(x) => Ok(x), - _ => { - panic!("Did not receive a Response"); - } - } - } - - fn broadcast_message( - broadcaster: &mut TestPeer, - relay_hints: Vec, - msg: StacksMessageType, - ) -> bool { - let request = NetworkRequest::Broadcast(relay_hints, msg); - match broadcaster.network.dispatch_request(request) { - Ok(_) => true, - Err(e) => { - error!("Failed to broadcast: {:?}", &e); - false - } - } - } - - fn push_block( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block: StacksBlock, - ) -> bool { - test_debug!( - "{:?}: Push block {}/{} to {:?}", - peer.to_neighbor().addr, - &consensus_hash, - block.block_hash(), - dest - ); - - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); - let consensus_hash = sn.consensus_hash; - - let msg = StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash, block)], - }); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_block( - peer: &mut TestPeer, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block: StacksBlock, - ) -> bool { - test_debug!( - "{:?}: Broadcast block {}/{}", - peer.to_neighbor().addr, - &consensus_hash, - block.block_hash(), - ); - - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); - let consensus_hash = sn.consensus_hash; - - let msg = StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash, block)], - }); - broadcast_message(peer, relay_hints, msg) - } - - fn push_microblocks( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - microblocks: Vec, - ) -> bool { - test_debug!( - "{:?}: Push {} microblocksblock {}/{} to {:?}", - peer.to_neighbor().addr, - microblocks.len(), - &consensus_hash, - &block_hash, - dest - ); - let msg = StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ), - microblocks: microblocks, - }); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_microblocks( - peer: &mut TestPeer, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - microblocks: Vec, - ) -> bool { - test_debug!( - "{:?}: broadcast {} microblocksblock {}/{}", - peer.to_neighbor().addr, - microblocks.len(), - &consensus_hash, - &block_hash, - ); - let msg = StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ), - microblocks: microblocks, - }); - broadcast_message(peer, relay_hints, msg) - } - - fn push_transaction( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - tx: StacksTransaction, - ) -> bool { - test_debug!( - "{:?}: Push tx {} to {:?}", - peer.to_neighbor().addr, - tx.txid(), - dest - ); - let msg = StacksMessageType::Transaction(tx); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_transaction( - peer: &mut TestPeer, - relay_hints: Vec, - tx: StacksTransaction, - ) -> bool { - test_debug!("{:?}: broadcast tx {}", peer.to_neighbor().addr, tx.txid(),); - let msg = StacksMessageType::Transaction(tx); - broadcast_message(peer, relay_hints, msg) - } - - fn http_get_info(http_port: u16) -> RPCPeerInfoData { - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "GET".to_string(), - "/v2/info".to_string(), - ); - request.keep_alive = false; - let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); - let response = http_rpc(http_port, getinfo).unwrap(); - let peer_info = response.decode_peer_info().unwrap(); - peer_info - } - - fn http_post_block( - http_port: u16, - consensus_hash: &ConsensusHash, - block: &StacksBlock, - ) -> bool { - test_debug!( - "upload block {}/{} to localhost:{}", - consensus_hash, - block.block_hash(), - http_port - ); - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "POST".to_string(), - "/v2/blocks".to_string(), - ); - request.keep_alive = false; - let post_block = - StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); - - let response = http_rpc(http_port, post_block).unwrap(); - let accepted = response.decode_stacks_block_accepted().unwrap(); - accepted.accepted - } - - fn http_post_microblock( - http_port: u16, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - mblock: &StacksMicroblock, - ) -> bool { - test_debug!( - "upload microblock {}/{}-{} to localhost:{}", - consensus_hash, - block_hash, - mblock.block_hash(), - http_port - ); - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "POST".to_string(), - "/v2/microblocks".to_string(), - ); - request.keep_alive = false; - let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let post_microblock = StacksHttpRequest::new( - request, - HttpRequestContents::new() - .payload_stacks(mblock) - .for_specific_tip(tip), - ); - - let response = http_rpc(http_port, post_microblock).unwrap(); - let payload = response.get_http_payload_ok().unwrap(); - let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); - return true; - } - - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( - outbound_test: bool, - disable_push: bool, - ) { - with_timeout(600, move || { - let original_blocks_and_microblocks = RefCell::new(vec![]); - let blocks_and_microblocks = RefCell::new(vec![]); - let idx = RefCell::new(0); - let sent_blocks = RefCell::new(false); - let sent_microblocks = RefCell::new(false); - - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks", - 4210, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 produces the blocks and pushes them to peer 1 - // peer 1 receives the blocks and microblocks. It - // doesn't download them, nor does it try to get invs - peer_configs[0].connection_opts.disable_block_advertisement = true; - - peer_configs[1].connection_opts.disable_inv_sync = true; - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // force usage of blocksavailable/microblocksavailable? - if disable_push { - peer_configs[0].connection_opts.disable_block_push = true; - peer_configs[0].connection_opts.disable_microblock_push = true; - peer_configs[1].connection_opts.disable_block_push = true; - peer_configs[1].connection_opts.disable_microblock_push = true; - } - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - - if outbound_test { - // neighbor relationship is symmetric -- peer 1 has an outbound connection - // to peer 0. - peer_configs[1].add_neighbor(&peer_0); - } - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - let saved_copy: Vec<(ConsensusHash, StacksBlock, Vec)> = - block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - *blocks_and_microblocks.borrow_mut() = saved_copy.clone(); - *original_blocks_and_microblocks.borrow_mut() = saved_copy; - block_data - }, - |ref mut peers| { - if !disable_push { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - } - - // make sure peer 1's inv has an entry for peer 0, even - // though it's not doing an inv sync. This is required for the downloader to - // work - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - match peers[1].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_0_nk).is_none() { - test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk.clone(), true); - } else { - test_debug!("peer 1 has inv state for peer 0"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - if is_peer_connected(&peers[0], &peer_1_nk) { - // randomly push a block and/or microblocks to peer 1. - let mut block_data = blocks_and_microblocks.borrow_mut(); - let original_block_data = original_blocks_and_microblocks.borrow(); - let mut next_idx = idx.borrow_mut(); - let data_to_push = { - if block_data.len() > 0 { - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } else { - // start over (can happen if a message gets - // dropped due to a timeout) - test_debug!("Reset block transmission (possible timeout)"); - *block_data = (*original_block_data).clone(); - *next_idx = thread_rng().gen::() % block_data.len(); - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } - }; - - if let Some((consensus_hash, block, microblocks)) = data_to_push { - test_debug!( - "Push block {}/{} and microblocks", - &consensus_hash, - block.block_hash() - ); - - let block_hash = block.block_hash(); - let mut sent_blocks = sent_blocks.borrow_mut(); - let mut sent_microblocks = sent_microblocks.borrow_mut(); - - let pushed_block = if !*sent_blocks { - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - consensus_hash.clone(), - block, - ) - } else { - true - }; - - *sent_blocks = pushed_block; - - if pushed_block { - let pushed_microblock = if !*sent_microblocks { - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - consensus_hash, - block_hash, - microblocks, - ) - } else { - true - }; - - *sent_microblocks = pushed_microblock; - - if pushed_block && pushed_microblock { - block_data.remove(*next_idx); - if block_data.len() > 0 { - *next_idx = thread_rng().gen::() % block_data.len(); - } - *sent_blocks = false; - *sent_microblocks = false; - } - } - test_debug!("{} blocks/microblocks remaining", block_data.len()); - } - } - - // peer 0 should never see a GetBlocksInv message. - // peer 1 should never see a BlocksInv message - for (_, convo) in peers[0].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::GetBlocksInv), - 0 - ); - } - for (_, convo) in peers[1].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. - // nodes rely on blocksavailable/microblocksavailable to discover blocks - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { - // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - // nodes rely on blocksavailable/microblocksavailable to discover blocks - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. - // nodes may push blocks and microblocks directly to each other - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { - // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - // nodes may push blocks and microblocks directly to each other - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_upload_blocks_http() { - with_timeout(600, || { - let (port_sx, port_rx) = std::sync::mpsc::sync_channel(1); - let (block_sx, block_rx) = std::sync::mpsc::sync_channel(1); - - std::thread::spawn(move || loop { - eprintln!("Get port"); - let remote_port: u16 = port_rx.recv().unwrap(); - eprintln!("Got port {}", remote_port); - - eprintln!("Send getinfo"); - let peer_info = http_get_info(remote_port); - eprintln!("Got getinfo! {:?}", &peer_info); - let idx = peer_info.stacks_tip_height as usize; - - eprintln!("Get blocks and microblocks"); - let blocks_and_microblocks: Vec<( - ConsensusHash, - Option, - Option>, - )> = block_rx.recv().unwrap(); - eprintln!("Got blocks and microblocks!"); - - if idx >= blocks_and_microblocks.len() { - eprintln!("Out of blocks to send!"); - return; - } - - eprintln!( - "Upload block {}", - &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash() - ); - http_post_block( - remote_port, - &blocks_and_microblocks[idx].0, - blocks_and_microblocks[idx].1.as_ref().unwrap(), - ); - for mblock in blocks_and_microblocks[idx].2.as_ref().unwrap().iter() { - eprintln!("Upload microblock {}", mblock.block_hash()); - http_post_microblock( - remote_port, - &blocks_and_microblocks[idx].0, - &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash(), - mblock, - ); - } - }); - - let original_blocks_and_microblocks = RefCell::new(vec![]); - let port_sx_cell = RefCell::new(port_sx); - let block_sx_cell = RefCell::new(block_sx); - - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_upload_blocks_http", - 4250, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 produces the blocks - peer_configs[0].connection_opts.disable_chat_neighbors = true; - - // peer 0 sends them to peer 1 - peer_configs[1].connection_opts.disable_chat_neighbors = true; - peer_configs[1].connection_opts.disable_inv_sync = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // generous timeouts - peer_configs[0].connection_opts.timeout = 180; - peer_configs[1].connection_opts.timeout = 180; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - // only produce blocks for a single reward - // cycle, since pushing block/microblock - // announcements in reward cycles the remote - // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - assert_eq!(block_data.len(), 5); - - *original_blocks_and_microblocks.borrow_mut() = block_data.clone(); - - block_data - }, - |ref mut peers| { - let blocks_and_microblocks = original_blocks_and_microblocks.borrow().clone(); - let remote_port = peers[1].config.http_port; - - let port_sx = port_sx_cell.borrow_mut(); - let block_sx = block_sx_cell.borrow_mut(); - - let _ = (*port_sx).try_send(remote_port); - let _ = (*block_sx).try_send(blocks_and_microblocks); - }, - |ref peer| { - // check peer health - // TODO - true - }, - |_| true, - ); - }) - } - - fn make_test_smart_contract_transaction( - peer: &mut TestPeer, - name: &str, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - ) -> StacksTransaction { - // make a smart contract - let contract = " - (define-data-var bar int 0) - (define-public (get-bar) (ok (var-get bar))) - (define-public (set-bar (x int) (y int)) - (begin (var-set bar (/ x y)) (ok (var-get bar))))"; - - let cost_limits = peer.config.connection_opts.read_only_call_limit.clone(); - - let tx_contract = peer - .with_mining_state( - |ref mut sortdb, ref mut miner, ref mut spending_account, ref mut stacks_node| { - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - spending_account.as_transaction_auth().unwrap().into(), - TransactionPayload::new_smart_contract( - &name.to_string(), - &contract.to_string(), - None, - ) - .unwrap(), - ); - - let chain_tip = - StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let cur_nonce = stacks_node - .chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }) - .unwrap(); - - test_debug!( - "Nonce of {:?} is {} at {}/{}", - &spending_account.origin_address().unwrap(), - cur_nonce, - consensus_hash, - block_hash - ); - - // spending_account.set_nonce(cur_nonce + 1); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(cur_nonce); - tx_contract.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - spending_account.sign_as_origin(&mut tx_signer); - - let tx_contract_signed = tx_signer.get_tx().unwrap(); - - test_debug!( - "make transaction {:?} off of {:?}/{:?}: {:?}", - &tx_contract_signed.txid(), - consensus_hash, - block_hash, - &tx_contract_signed - ); - - Ok(tx_contract_signed) - }, - ) - .unwrap(); - - tx_contract - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_transactions() { - with_timeout(600, || { - let blocks_and_microblocks = RefCell::new(vec![]); - let blocks_idx = RefCell::new(0); - let sent_txs = RefCell::new(vec![]); - let done = RefCell::new(false); - - let peers = run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_push_transactions", - 4220, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 generates blocks and microblocks, and pushes - // them to peer 1. Peer 0 also generates transactions - // and pushes them to peer 1. - peer_configs[0].connection_opts.disable_block_advertisement = true; - - // let peer 0 drive this test, as before, by controlling - // when peer 1 sees blocks. - peer_configs[1].connection_opts.disable_inv_sync = true; - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - peer_configs[0].connection_opts.outbox_maxlen = 100; - peer_configs[1].connection_opts.inbox_maxlen = 100; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - let initial_balances = vec![ - ( - PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), - ), - 1000000, - ), - ( - PrincipalData::from( - peer_configs[1].spending_account.origin_address().unwrap(), - ), - 1000000, - ), - ]; - - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances.clone(); - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for b in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - if b == 0 { - // prime with first block - peers[i].process_stacks_epoch_at_tip(&stacks_block, &vec![]); - } - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - - // peers must be connected to each other - let mut peer_0_to_1 = false; - let mut peer_1_to_0 = false; - for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } - } - None => {} - } - } - for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } - } - None => {} - } - } - - if !peer_0_to_1 || !peer_1_to_0 { - test_debug!( - "Peers not bi-directionally connected: 0->1 = {}, 1->0 = {}", - peer_0_to_1, - peer_1_to_0 - ); - return; - } - - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync. - match peers[1].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_0_nk).is_none() { - test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk, true); - } else { - test_debug!("peer 1 has inv state for peer 0"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - let done_flag = *done.borrow(); - if is_peer_connected(&peers[0], &peer_1_nk) { - // only submit the next transaction if the previous - // one is accepted - let has_last_transaction = { - let expected_txs: std::cell::Ref<'_, Vec> = - sent_txs.borrow(); - if let Some(tx) = (*expected_txs).last() { - let txid = tx.txid(); - if !peers[1].mempool.as_ref().unwrap().has_tx(&txid) { - debug!("Peer 1 still waiting for transaction {}", &txid); - push_transaction( - &mut peers[0], - &peer_1_nk, - vec![], - (*tx).clone(), - ); - false - } else { - true - } - } else { - true - } - }; - - if has_last_transaction { - // push blocks and microblocks in order, and push a - // transaction that can only be validated once the - // block and microblocks are processed. - let ( - ( - block_consensus_hash, - block, - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ), - idx, - ) = { - let block_data = blocks_and_microblocks.borrow(); - let mut idx = blocks_idx.borrow_mut(); - - let microblocks = block_data[*idx].2.clone(); - let microblocks_consensus_hash = block_data[*idx].0.clone(); - let microblocks_block_hash = block_data[*idx].1.block_hash(); - - *idx += 1; - if *idx >= block_data.len() { - *idx = 1; - } - - let block = block_data[*idx].1.clone(); - let block_consensus_hash = block_data[*idx].0.clone(); - ( - ( - block_consensus_hash, - block, - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ), - *idx, - ) - }; - - if !done_flag { - test_debug!( - "Push microblocks built by {}/{} (idx={})", - µblocks_consensus_hash, - µblocks_block_hash, - idx - ); - - let block_hash = block.block_hash(); - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ); - - test_debug!( - "Push block {}/{} and microblocks (idx = {})", - &block_consensus_hash, - block.block_hash(), - idx - ); - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - block_consensus_hash.clone(), - block, - ); - - // create a transaction against the resulting - // (anchored) chain tip - let tx = make_test_smart_contract_transaction( - &mut peers[0], - &format!("test-contract-{}", &block_hash.to_hex()[0..10]), - &block_consensus_hash, - &block_hash, - ); - - // push or post - push_transaction(&mut peers[0], &peer_1_nk, vec![], tx.clone()); - - let mut expected_txs = sent_txs.borrow_mut(); - expected_txs.push(tx); - } else { - test_debug!("Done pushing data"); - } - } - } - - // peer 0 should never see a GetBlocksInv message. - // peer 1 should never see a BlocksInv message - for (_, convo) in peers[0].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::GetBlocksInv), - 0 - ); - } - for (_, convo) in peers[1].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |ref mut peers| { - // all blocks downloaded. only stop if peer 1 has - // all the transactions - let mut done_flag = done.borrow_mut(); - *done_flag = true; - - let txs = - MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); - test_debug!("Peer 1 has {} txs", txs.len()); - txs.len() == sent_txs.borrow().len() - }, - ); - - // peer 1 should have all the transactions - let blocks_and_microblocks = blocks_and_microblocks.into_inner(); - - let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); - let expected_txs = sent_txs.into_inner(); - for tx in txs.iter() { - let mut found = false; - for expected_tx in expected_txs.iter() { - if tx.tx.txid() == expected_tx.txid() { - found = true; - break; - } - } - if !found { - panic!("Transaction not found: {:?}", &tx.tx); - } - } - - // peer 1 should have 1 tx per chain tip - for ((consensus_hash, block, _), sent_tx) in - blocks_and_microblocks.iter().zip(expected_txs.iter()) - { - let block_hash = block.block_hash(); - let tx_infos = MemPoolDB::get_txs_after( - peers[1].mempool.as_ref().unwrap().conn(), - consensus_hash, - &block_hash, - 0, - 1000, - ) - .unwrap(); - test_debug!( - "Check {}/{} (height {}): expect {}", - &consensus_hash, - &block_hash, - block.header.total_work.work, - &sent_tx.txid() - ); - assert_eq!(tx_infos.len(), 1); - assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); - } - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_peers_broadcast() { - with_timeout(600, || { - let blocks_and_microblocks = RefCell::new(vec![]); - let blocks_idx = RefCell::new(0); - let sent_txs = RefCell::new(vec![]); - let done = RefCell::new(false); - let num_peers = 3; - let privk = StacksPrivateKey::new(); - - let peers = run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_peers_broadcast", - 4230, - num_peers, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), num_peers); - - // peer 0 generates blocks and microblocks, and pushes - // them to peers 1..n. Peer 0 also generates transactions - // and broadcasts them to the network. - - peer_configs[0].connection_opts.disable_inv_sync = true; - peer_configs[0].connection_opts.disable_inv_chat = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state. - for i in 0..peer_configs.len() { - peer_configs[i].connection_opts.disable_natpunch = true; - peer_configs[i].connection_opts.disable_network_prune = true; - peer_configs[i].connection_opts.timeout = 600; - peer_configs[i].connection_opts.connect_timeout = 600; - - // do one walk - peer_configs[i].connection_opts.num_initial_walks = 0; - peer_configs[i].connection_opts.walk_retry_count = 0; - peer_configs[i].connection_opts.walk_interval = 600; - - // don't throttle downloads - peer_configs[i].connection_opts.download_interval = 0; - peer_configs[i].connection_opts.inv_sync_interval = 0; - - let max_inflight = peer_configs[i].connection_opts.max_inflight_blocks; - peer_configs[i].connection_opts.max_clients_per_host = - ((num_peers + 1) as u64) * max_inflight; - peer_configs[i].connection_opts.soft_max_clients_per_host = - ((num_peers + 1) as u64) * max_inflight; - peer_configs[i].connection_opts.num_neighbors = (num_peers + 1) as u64; - peer_configs[i].connection_opts.soft_num_neighbors = (num_peers + 1) as u64; - } - - let initial_balances = vec![( - PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), - ), - 1000000, - )]; - - for i in 0..peer_configs.len() { - peer_configs[i].initial_balances = initial_balances.clone(); - } - - // connectivity - let peer_0 = peer_configs[0].to_neighbor(); - for i in 1..peer_configs.len() { - peer_configs[i].add_neighbor(&peer_0); - let peer_i = peer_configs[i].to_neighbor(); - peer_configs[0].add_neighbor(&peer_i); - } - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let done_flag = *done.borrow(); - - let mut connectivity_0_to_n = HashSet::new(); - let mut connectivity_n_to_0 = HashSet::new(); - - let peer_0_nk = peers[0].to_neighbor().addr; - - for (nk, event_id) in peers[0].network.events.iter() { - if let Some(convo) = peers[0].network.peers.get(event_id) { - if convo.is_authenticated() { - connectivity_0_to_n.insert(nk.clone()); - } - } - } - for i in 1..peers.len() { - for (nk, event_id) in peers[i].network.events.iter() { - if *nk != peer_0_nk { - continue; - } - - if let Some(convo) = peers[i].network.peers.get(event_id) { - if convo.is_authenticated() { - if let Some(inv_state) = &peers[i].network.inv_state { - if let Some(inv_stats) = - inv_state.block_stats.get(&peer_0_nk) - { - if inv_stats.inv.num_reward_cycles >= 5 { - connectivity_n_to_0 - .insert(peers[i].to_neighbor().addr); - } - } - } - } - } - } - } - - if connectivity_0_to_n.len() < peers.len() - 1 - || connectivity_n_to_0.len() < peers.len() - 1 - { - test_debug!( - "Network not connected: 0 --> N = {}, N --> 0 = {}", - connectivity_0_to_n.len(), - connectivity_n_to_0.len() - ); - return; - } - - let ((tip_consensus_hash, tip_block, _), idx) = { - let block_data = blocks_and_microblocks.borrow(); - let idx = blocks_idx.borrow(); - (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) - }; - - if idx > 0 { - let mut caught_up = true; - for i in 1..peers.len() { - peers[i] - .with_db_state(|sortdb, chainstate, relayer, mempool| { - let (canonical_consensus_hash, canonical_block_hash) = - SortitionDB::get_canonical_stacks_chain_tip_hash( - sortdb.conn(), - ) - .unwrap(); - - if canonical_consensus_hash != tip_consensus_hash - || canonical_block_hash != tip_block.block_hash() - { - debug!( - "Peer {} is not caught up yet (at {}/{}, need {}/{})", - i + 1, - &canonical_consensus_hash, - &canonical_block_hash, - &tip_consensus_hash, - &tip_block.block_hash() - ); - caught_up = false; - } - Ok(()) - }) - .unwrap(); - } - if !caught_up { - return; - } - } - - // caught up! - // find next block - let ((consensus_hash, block, microblocks), idx) = { - let block_data = blocks_and_microblocks.borrow(); - let mut idx = blocks_idx.borrow_mut(); - if *idx >= block_data.len() { - test_debug!("Out of blocks and microblocks to push"); - return; - } - - let ret = block_data[*idx].clone(); - *idx += 1; - (ret, *idx) - }; - - if !done_flag { - test_debug!( - "Broadcast block {}/{} and microblocks (idx = {})", - &consensus_hash, - block.block_hash(), - idx - ); - - let block_hash = block.block_hash(); - - // create a transaction against the current - // (anchored) chain tip - let tx = make_test_smart_contract_transaction( - &mut peers[0], - &format!("test-contract-{}", &block_hash.to_hex()[0..10]), - &tip_consensus_hash, - &tip_block.block_hash(), - ); - - let mut expected_txs = sent_txs.borrow_mut(); - expected_txs.push(tx.clone()); - - test_debug!( - "Broadcast {}/{} and its microblocks", - &consensus_hash, - &block.block_hash() - ); - // next block - broadcast_block(&mut peers[0], vec![], consensus_hash.clone(), block); - broadcast_microblocks( - &mut peers[0], - vec![], - consensus_hash, - block_hash, - microblocks, - ); - - // NOTE: first transaction will be dropped since the other nodes haven't - // processed the first-ever Stacks block when their relayer code gets - // around to considering it. - broadcast_transaction(&mut peers[0], vec![], tx); - } else { - test_debug!("Done pushing data"); - } - }, - |ref peer| { - // check peer health -- no message errors - // (i.e. no relay cycles) - for (_, convo) in peer.network.peers.iter() { - assert_eq!(convo.stats.msgs_err, 0); - } - true - }, - |ref mut peers| { - // all blocks downloaded. only stop if peer 1 has - // all the transactions - let mut done_flag = done.borrow_mut(); - *done_flag = true; - - let mut ret = true; - for i in 1..peers.len() { - let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()) - .unwrap(); - test_debug!("Peer {} has {} txs", i + 1, txs.len()); - ret = ret && txs.len() == sent_txs.borrow().len() - 1; - } - ret - }, - ); - - // peers 1..n should have all the transactions - let blocks_and_microblocks = blocks_and_microblocks.into_inner(); - let expected_txs = sent_txs.into_inner(); - - for i in 1..peers.len() { - let txs = - MemPoolDB::get_all_txs(peers[i].mempool.as_ref().unwrap().conn()).unwrap(); - for tx in txs.iter() { - let mut found = false; - for expected_tx in expected_txs.iter() { - if tx.tx.txid() == expected_tx.txid() { - found = true; - break; - } - } - if !found { - panic!("Transaction not found: {:?}", &tx.tx); - } - } - - // peers 1..n should have 1 tx per chain tip (except for the first block) - for ((consensus_hash, block, _), sent_tx) in - blocks_and_microblocks.iter().zip(expected_txs[1..].iter()) - { - let block_hash = block.block_hash(); - let tx_infos = MemPoolDB::get_txs_after( - peers[i].mempool.as_ref().unwrap().conn(), - consensus_hash, - &block_hash, - 0, - 1000, - ) - .unwrap(); - assert_eq!(tx_infos.len(), 1); - assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); - } - } - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_antientropy() { - with_timeout(600, move || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_antientropy", - 4240, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 mines blocks, but does not advertize them nor announce them as - // available via its inventory. It only uses its anti-entropy protocol to - // discover that peer 1 doesn't have them, and sends them to peer 1 that way. - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[0].connection_opts.disable_block_download = true; - - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // permit anti-entropy protocol even if nat'ed - peer_configs[0].connection_opts.antientropy_public = true; - peer_configs[1].connection_opts.antientropy_public = true; - peer_configs[0].connection_opts.antientropy_retry = 1; - peer_configs[1].connection_opts.antientropy_retry = 1; - - // make peer 0 go slowly - peer_configs[0].connection_opts.max_block_push = 2; - peer_configs[0].connection_opts.max_microblock_push = 2; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - // peer 0 is inbound to peer 1 - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - // cap with an empty sortition, so the antientropy protocol picks up all stacks - // blocks - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(vec![]); - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(vec![]); - } - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push((sn.consensus_hash.clone(), None, None)); - - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let tip_opt = peers[1] - .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = NakamotoChainState::get_canonical_block_header( - chainstate.db(), - sortdb, - ) - .unwrap(); - Ok(tip_opt) - }) - .unwrap(); - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { - with_timeout(600, move || { - let sortitions = RefCell::new(vec![]); - let blocks_and_microblocks = RefCell::new(vec![]); - let idx = RefCell::new(0usize); - let pushed_idx = RefCell::new(0usize); - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_buffered_messages", - 4242, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 mines blocks, but it does not present its inventory. - peer_configs[0].connection_opts.disable_inv_chat = true; - peer_configs[0].connection_opts.disable_block_download = true; - - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // peer 0 ignores peer 1's handshakes - peer_configs[0].connection_opts.disable_inbound_handshakes = true; - - // disable anti-entropy - peer_configs[0].connection_opts.max_block_push = 0; - peer_configs[0].connection_opts.max_microblock_push = 0; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - // peer 0 is inbound to peer 1 - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for block_num in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - if block_num == 0 { - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - peers[i].process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - } else { - let mut all_sortitions = sortitions.borrow_mut(); - all_sortitions.push(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data.clone()[1..] - .to_vec() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let mut i = idx.borrow_mut(); - let mut pushed_i = pushed_idx.borrow_mut(); - let all_sortitions = sortitions.borrow(); - let all_blocks_and_microblocks = blocks_and_microblocks.borrow(); - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - - let tip_opt = peers[1] - .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = NakamotoChainState::get_canonical_block_header( - chainstate.db(), - sortdb, - ) - .unwrap(); - Ok(tip_opt) - }) - .unwrap(); - - if !is_peer_connected(&peers[0], &peer_1_nk) { - debug!("Peer 0 not connected to peer 1"); - return; - } - - if let Some(tip) = tip_opt { - debug!( - "Push at {}, need {}", - tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *pushed_i - ); - if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1 - == *pushed_i as u64 - { - // next block - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - (*all_blocks_and_microblocks)[*pushed_i].0.clone(), - (*all_blocks_and_microblocks)[*pushed_i].1.clone(), - ); - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - (*all_blocks_and_microblocks)[*pushed_i].0.clone(), - (*all_blocks_and_microblocks)[*pushed_i].1.block_hash(), - (*all_blocks_and_microblocks)[*pushed_i].2.clone(), - ); - *pushed_i += 1; - } - debug!( - "Sortition at {}, need {}", - tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *i - ); - if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1 - == *i as u64 - { - let event_id = { - let mut ret = 0; - for (nk, event_id) in peers[1].network.events.iter() { - ret = *event_id; - break; - } - if ret == 0 { - return; - } - ret - }; - let mut update_sortition = false; - for (event_id, pending) in peers[1].network.pending_messages.iter() { - debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); - if pending.len() >= 1 { - update_sortition = true; - } - } - if update_sortition { - debug!("Advance sortition!"); - peers[1].next_burnchain_block_raw((*all_sortitions)[*i].clone()); - *i += 1; - } - } - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - pub fn make_contract_tx( - sender: &StacksPrivateKey, - cur_nonce: u64, - tx_fee: u64, - name: &str, - contract: &str, - ) -> StacksTransaction { - let sender_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( - StacksPublicKey::from_private(sender), - ) - .expect("Failed to create p2pkh spending condition from public key."); - - let spending_auth = TransactionAuth::Standard(sender_spending_condition); - - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - spending_auth.clone(), - TransactionPayload::new_smart_contract(&name.to_string(), &contract.to_string(), None) - .unwrap(), - ); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(cur_nonce); - tx_contract.set_tx_fee(tx_fee); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - tx_signer.sign_origin(sender).unwrap(); - - let tx_contract_signed = tx_signer.get_tx().unwrap(); - tx_contract_signed - } - - #[test] - fn test_static_problematic_tests() { - let spender_sk_1 = StacksPrivateKey::new(); - let spender_sk_2 = StacksPrivateKey::new(); - let spender_sk_3 = StacksPrivateKey::new(); - - let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; - let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); - let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); - - let tx_edge = make_contract_tx( - &spender_sk_1, - 0, - (tx_edge_body.len() * 100) as u64, - "test-edge", - &tx_edge_body, - ); - - // something just over the limit of the expression depth - let exceeds_repeat_factor = edge_repeat_factor + 1; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); - - let tx_exceeds = make_contract_tx( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - "test-exceeds", - &tx_exceeds_body, - ); - - // something stupidly high over the expression depth - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let tx_high = make_contract_tx( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::Typical - ) - .is_ok()); - - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(!Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::PrecheckSize - ) - .is_ok()); - assert!(!Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::PrecheckSize - ) - .is_ok()); - } - - #[test] - fn process_new_blocks_rejects_problematic_asts() { - let privk = StacksPrivateKey::from_hex( - "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", - ) - .unwrap(); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&privk)], - ) - .unwrap(); - - let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; - - let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: i64::MAX as u64, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - ]); - let burnchain = peer_config.burnchain.clone(); - - // activate new AST rules right away - let mut peer = TestPeer::new(peer_config); - let mut sortdb = peer.sortdb.take().unwrap(); - { - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } - peer.sortdb = Some(sortdb); - - let chainstate_path = peer.chainstate_path.clone(); - - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; - let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let bad_tx = make_contract_tx( - &privk, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - let bad_txid = bad_tx.txid(); - let bad_tx_len = { - let mut bytes = vec![]; - bad_tx.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - - let mblock_privk = StacksPrivateKey::new(); - - // make one tenure with a valid block, but problematic microblocks - let (burn_ops, block, microblocks) = peer.make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let coinbase_tx = make_coinbase(miner, 0); - - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - - let block = StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone()], - ) - .unwrap() - .0; - - (block, vec![]) - }, - ); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); - - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - - let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - let coinbase_tx = make_coinbase(miner, 0); - - let mblock_privk = miner.next_microblock_privkey(); - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - - // this tx would be problematic without our checks - if let Err(ChainstateError::ProblematicTransaction(txid)) = - StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone(), bad_tx.clone()], - ) - { - assert_eq!(txid, bad_txid); - } else { - panic!("Did not get Error::ProblematicTransaction"); - } - - // make a bad block anyway - // don't worry about the state root - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone()], - ) - .unwrap(); - - let mut bad_block = bad_block.0; - bad_block.txs.push(bad_tx.clone()); - - let txid_vecs = bad_block - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - bad_block.header.tx_merkle_root = merkle_tree.root(); - - let sort_ic = sortdb.index_conn(); - chainstate - .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) - .unwrap(); - - // make a bad microblock - let mut microblock_builder = StacksMicroblockBuilder::new( - parent_header_hash.clone(), - parent_consensus_hash.clone(), - chainstate, - &sort_ic, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - - // miner should fail with just the bad tx, since it's problematic - let mblock_err = microblock_builder - .mine_next_microblock_from_txs( - vec![(bad_tx.clone(), bad_tx_len)], - &mblock_privk, - ) - .unwrap_err(); - if let ChainstateError::NoTransactionsToMine = mblock_err { - } else { - panic!("Did not get NoTransactionsToMine"); - } - - let token_transfer = make_user_stacks_transfer( - &privk, - 0, - 200, - &recipient.to_account_principal(), - 123, - ); - let tt_len = { - let mut bytes = vec![]; - token_transfer.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - let mut bad_mblock = microblock_builder - .mine_next_microblock_from_txs( - vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], - &mblock_privk, - ) - .unwrap(); - - // miner shouldn't include the bad tx, since it's problematic - assert_eq!(bad_mblock.txs.len(), 1); - bad_mblock.txs.push(bad_tx.clone()); - - // force it in anyway - let txid_vecs = bad_mblock - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - bad_mblock.header.tx_merkle_root = merkle_tree.root(); - bad_mblock.sign(&mblock_privk).unwrap(); - - (bad_block, vec![bad_mblock]) - }, - ); - - let bad_mblock = microblocks.pop().unwrap(); - let (_, _, new_consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - peer.process_stacks_epoch(&bad_block, &new_consensus_hash, &vec![]); - - // stuff them all into each possible field of NetworkResult - // p2p messages - let nk = NeighborKey { - peer_version: 1, - network_id: 2, - addrbytes: PeerAddress([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), - port: 19, - }; - let preamble = Preamble { - peer_version: 1, - network_id: 2, - seq: 3, - burn_block_height: 4, - burn_block_hash: BurnchainHeaderHash([5u8; 32]), - burn_stable_block_height: 6, - burn_stable_block_hash: BurnchainHeaderHash([7u8; 32]), - additional_data: 8, - signature: MessageSignature([9u8; 65]), - payload_len: 10, - }; - let bad_msgs = vec![ - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(new_consensus_hash.clone(), bad_block.clone())], - }), - }, - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockId::new( - &new_consensus_hash, - &bad_block.block_hash(), - ), - microblocks: vec![bad_mblock.clone()], - }), - }, - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Transaction(bad_tx.clone()), - }, - ]; - let mut unsolicited = HashMap::new(); - unsolicited.insert(nk.clone(), bad_msgs.clone()); - - let mut network_result = - NetworkResult::new(0, 0, 0, 0, ConsensusHash([0x01; 20]), HashMap::new()); - network_result.consume_unsolicited(unsolicited); - - assert!(network_result.has_blocks()); - assert!(network_result.has_microblocks()); - assert!(network_result.has_transactions()); - - network_result.consume_http_uploads( - bad_msgs - .into_iter() - .map(|msg| msg.payload) - .collect::>(), - ); - - assert!(network_result.has_blocks()); - assert!(network_result.has_microblocks()); - assert!(network_result.has_transactions()); - - assert_eq!(network_result.uploaded_transactions.len(), 1); - assert_eq!(network_result.uploaded_blocks.len(), 1); - assert_eq!(network_result.uploaded_microblocks.len(), 1); - assert_eq!(network_result.pushed_transactions.len(), 1); - assert_eq!(network_result.pushed_blocks.len(), 1); - assert_eq!(network_result.pushed_microblocks.len(), 1); - - network_result - .blocks - .push((new_consensus_hash.clone(), bad_block.clone(), 123)); - network_result.confirmed_microblocks.push(( - new_consensus_hash.clone(), - vec![bad_mblock.clone()], - 234, - )); - - let mut sortdb = peer.sortdb.take().unwrap(); - let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = - Relayer::process_new_blocks( - &mut network_result, - &mut sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, - None, - ) - .unwrap(); - - // despite this data showing up in all aspects of the network result, none of it actually - // gets relayed - assert_eq!(processed_blocks.len(), 0); - assert_eq!(processed_mblocks.len(), 0); - assert_eq!(relay_mblocks.len(), 0); - assert_eq!(bad_neighbors.len(), 0); - - let txs_relayed = Relayer::process_transactions( - &mut network_result, - &sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, - &mut peer.mempool.as_mut().unwrap(), - None, - ) - .unwrap(); - assert_eq!(txs_relayed.len(), 0); - } - - #[test] - fn test_block_pay_to_contract_gated_at_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - 0, - Some(PrincipalData::Contract( - QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") - .unwrap(), - )), - ); - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx], - ) - .unwrap(); - - (anchored_block.0, vec![]) - }; - - // tenures 26 and 27 should fail, since the block is a pay-to-contract block - // Pay-to-contract should only be supported if the block is in epoch 2.1, which - // activates at tenure 27. - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - panic!("Stored pay-to-contract stacks block before epoch 2.1"); - } - Err(chainstate_error::InvalidStacksBlock(_)) => {} - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid pay-to-contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - #[test] - fn test_block_versioned_smart_contract_gated_at_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); - - let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), - 1000000, - )]; - - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - 0, - None, - ); - - let versioned_contract = make_smart_contract_with_version( - miner, - 1, - tip.block_height.try_into().unwrap(), - 0, - Some(ClarityVersion::Clarity1), - Some(1000), - ); - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx, versioned_contract], - ) - .unwrap(); - - eprintln!("{:?}", &anchored_block.0); - (anchored_block.0, vec![]) - }; - - // tenures 26 and 27 should fail, since the block contains a versioned smart contract. - // Versioned smart contracts should only be supported if the block is in epoch 2.1, which - // activates at tenure 27. - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - eprintln!("{:?}", &stacks_block); - panic!("Stored pay-to-contract stacks block before epoch 2.1"); - } - Err(chainstate_error::InvalidStacksBlock(_)) => {} - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - #[test] - fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); - - let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), - 1000000, - )]; - - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - let versioned_contract_opt: RefCell> = RefCell::new(None); - let nonce: RefCell = RefCell::new(0); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let next_nonce = *nonce.borrow(); - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - next_nonce, - None, - ); - - let versioned_contract = make_smart_contract_with_version( - miner, - next_nonce + 1, - tip.block_height.try_into().unwrap(), - 0, - Some(ClarityVersion::Clarity1), - Some(1000), - ); - - *versioned_contract_opt.borrow_mut() = Some(versioned_contract); - *nonce.borrow_mut() = next_nonce + 1; - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx], - ) - .unwrap(); - - eprintln!("{:?}", &anchored_block.0); - (anchored_block.0, vec![]) - }; - - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - - // the empty block should be accepted - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Did not accept valid block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - - // process it - peer.coord.handle_new_stacks_block().unwrap(); - - // the mempool would reject a versioned contract transaction, since we're not yet at - // tenure 28 - let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); - let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), - &consensus_hash, - &stacks_block.block_hash(), - &versioned_contract, - versioned_contract_len as u64, - ) { - Err(MemPoolRejection::Other(msg)) => { - assert!(msg.find("not supported in this epoch").is_some()); - } - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => { - panic!("will_admit_mempool_tx succeeded"); - } - }; - - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - - // process it - peer.coord.handle_new_stacks_block().unwrap(); - - // the mempool would accept a versioned contract transaction, since we're not yet at - // tenure 28 - let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); - let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), - &consensus_hash, - &stacks_block.block_hash(), - &versioned_contract, - versioned_contract_len as u64, - ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} - }; - - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // TODO: process bans - // TODO: test sending invalid blocks-available and microblocks-available (should result in a ban) - // TODO: test sending invalid transactions (should result in a ban) - // TODO: test bandwidth limits (sending too much should result in a nack, and then a ban) -} +pub mod test {} From 4934b345b1a9e7bf57dad357e2e97410fad99396 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:22:38 -0400 Subject: [PATCH 04/20] feat: unit test coverage for Nakamoto unsolicited message handling and block-push --- stackslib/src/net/tests/relay/nakamoto.rs | 954 ++++++++++++++++++++++ 1 file changed, 954 insertions(+) create mode 100644 stackslib/src/net/tests/relay/nakamoto.rs diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs new file mode 100644 index 0000000000..2f52654ae9 --- /dev/null +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -0,0 +1,954 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{HashMap, VecDeque}; +use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError}; +use std::thread; +use std::thread::JoinHandle; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; +use rand::Rng; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; +use stacks_common::types::Address; +use stacks_common::util::hash::MerkleTree; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; + +use super::*; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::tests::TestMiner; +use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::nakamoto::coordinator::tests::{ + make_all_signers_vote_for_aggregate_key, make_token_transfer, +}; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::stacks::boot::test::{ + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, + make_signers_vote_for_aggregate_public_key, make_signers_vote_for_aggregate_public_key_value, + with_sortdb, +}; +use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::tests::{ + make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_user_stacks_transfer, TestStacksNode, +}; +use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::*; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::asn::*; +use crate::net::chat::*; +use crate::net::codec::*; +use crate::net::download::*; +use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; +use crate::net::httpcore::StacksHttpMessage; +use crate::net::inv::inv2x::*; +use crate::net::relay::{ProcessedNetReceipts, Relayer}; +use crate::net::test::*; +use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; +use crate::net::tests::relay::epoch2x::broadcast_message; +use crate::net::{Error as NetError, *}; +use crate::util_lib::test::*; + +/// Everything in a TestPeer, except the coordinator (which is encombered by the lifetime of its +/// chains coordinator's event observer) +struct ExitedPeer { + pub config: TestPeerConfig, + pub network: PeerNetwork, + pub sortdb: Option, + pub miner: TestMiner, + pub stacks_node: Option, + pub relayer: Relayer, + pub mempool: Option, + pub chainstate_path: String, + pub indexer: Option, +} + +impl ExitedPeer { + /// Instantiate the exited peer from the TestPeer + fn from_test_peer(peer: TestPeer) -> Self { + Self { + config: peer.config, + network: peer.network, + sortdb: peer.sortdb, + miner: peer.miner, + stacks_node: peer.stacks_node, + relayer: peer.relayer, + mempool: peer.mempool, + chainstate_path: peer.chainstate_path, + indexer: peer.indexer, + } + } + + /// Run the network stack of the exited peer, but no more block processing will take place. + pub fn run_with_ibd( + &mut self, + ibd: bool, + dns_client: Option<&mut DNSClient>, + ) -> Result<(NetworkResult, ProcessedNetReceipts), NetError> { + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let mut mempool = self.mempool.take().unwrap(); + let indexer = self.indexer.take().unwrap(); + + let net_result = self.network.run( + &indexer, + &mut sortdb, + &mut stacks_node.chainstate, + &mut mempool, + dns_client, + false, + ibd, + 100, + &RPCHandlerArgs::default(), + )?; + let receipts_res = self.relayer.process_network_result( + self.network.get_local_peer(), + &mut net_result.clone(), + &mut sortdb, + &mut stacks_node.chainstate, + &mut mempool, + ibd, + None, + None, + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + self.mempool = Some(mempool); + self.indexer = Some(indexer); + + receipts_res.and_then(|receipts| Ok((net_result, receipts))) + } +} + +/// Messages passed to the unit test from the seed node thread +enum SeedData { + BurnOps(Vec, ConsensusHash), + Blocks(Vec), + Exit(ExitedPeer), +} + +/// Messages passed from the unit test to the seed node thread +#[derive(Clone, Debug, PartialEq)] +enum SeedCommand { + Exit, +} + +/// Communication channels from the unit test to the seed node thread +struct FollowerComms { + data_receiver: Receiver, + command_sender: SyncSender, +} + +impl FollowerComms { + pub fn send_exit(&mut self) { + self.command_sender + .send(SeedCommand::Exit) + .expect("FATAL: seed node hangup"); + } + + pub fn try_recv(&mut self) -> Option { + match self.data_receiver.try_recv() { + Ok(data) => Some(data), + Err(TryRecvError::Empty) => None, + Err(_) => { + panic!("FATAL: seed node hangup"); + } + } + } +} + +/// Communication channels from the seed node thread to the unit test +struct SeedComms { + data_sender: SyncSender, + command_receiver: Receiver, +} + +struct SeedNode {} + +impl SeedNode { + /// Have `peer` produce two reward cycles of length `rc_len`, and forward all sortitions and + /// Nakamoto blocks back to the unit test. This consumes `peer`. + /// + /// The `peer` will process its blocks locally, and _push_ them to one or more followers. The + /// `peer` will wait for there to be at least one network conversation open before advancing, + /// thereby ensuring reliable delivery of the Nakamoto blocks to at least one follower. In + /// addition, the blocks and sortitions will be sent to the unit test via `comms`. + /// + /// The contents of `peer` will be sent back to the unit test via an `ExitedPeer` struct, so + /// the unit test can query it or even run its networking stack. + pub fn main(mut peer: TestPeer, rc_len: u64, comms: SeedComms) { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let mut test_signers = peer.config.test_signers.take().unwrap(); + let test_stackers = peer.config.test_stackers.take().unwrap(); + + let mut all_blocks: Vec = vec![]; + let mut all_burn_ops = vec![]; + let mut rc_blocks = vec![]; + let mut rc_burn_ops = vec![]; + + // have the peer mine some blocks for two reward cycles + for i in 0..(2 * rc_len) { + debug!("Tenure {}", i); + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + // pass along to the follower + if comms + .data_sender + .send(SeedData::BurnOps(burn_ops.clone(), consensus_hash.clone())) + .is_err() + { + warn!("Follower disconnected"); + break; + } + + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + let num_blocks: usize = (thread_rng().gen::() % 10) + 1; + + let block_height = peer.get_burn_block_height(); + + // If we are in the prepare phase, check if we need to generate + // aggregate key votes + let txs = if peer.config.burnchain.is_in_prepare_phase(block_height) { + let cycle_id = peer + .config + .burnchain + .block_height_to_reward_cycle(block_height) + .unwrap(); + let next_cycle_id = cycle_id as u128 + 1; + + with_sortdb(&mut peer, |chainstate, sortdb| { + if let Some(tip) = all_blocks.last() { + // TODO: remove once #4796 closes + make_all_signers_vote_for_aggregate_key( + chainstate, + sortdb, + &tip.block_id(), + &mut test_signers, + &test_stackers, + next_cycle_id, + ) + } else { + vec![] + } + }) + } else { + vec![] + }; + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |miner, chainstate, sortdb, blocks_so_far| { + // Include the aggregate key voting transactions in the first block. + let mut txs = if blocks_so_far.is_empty() { + txs.clone() + } else { + vec![] + }; + + if blocks_so_far.len() < num_blocks { + debug!("\n\nProduce block {}\n\n", all_blocks.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + txs.push(stx_transfer); + } + txs + }, + ); + + let mut blocks: Vec = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // run network state machine until we have a connection + loop { + let network_result_res = peer.run_with_ibd(false, None); + if let Ok((network_result, _)) = network_result_res { + if network_result.num_connected_peers > 0 { + break; + } + } + } + + // relay these blocks + let local_peer = peer.network.get_local_peer().clone(); + let sortdb = peer.sortdb.take().unwrap(); + let stacks_node = peer.stacks_node.take().unwrap(); + + peer.relayer.relay_epoch3_blocks( + &local_peer, + &sortdb, + &stacks_node.chainstate, + vec![(vec![], blocks.clone())], + true, + ); + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(stacks_node); + + // send the blocks to the unit test as well + if comms + .data_sender + .send(SeedData::Blocks(blocks.clone())) + .is_err() + { + warn!("Follower disconnected"); + break; + } + + // if we're starting a new reward cycle, then save the current one + let tip = { + let sort_db = peer.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + if peer + .config + .burnchain + .is_reward_cycle_start(tip.block_height) + { + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + } + + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + peer.config.test_signers = Some(test_signers); + peer.config.test_stackers = Some(test_stackers); + + let exited_peer = ExitedPeer::from_test_peer(peer); + + // inform the follower that we're done, and pass along the final state of the peer + if comms.data_sender.send(SeedData::Exit(exited_peer)).is_err() { + panic!("Follower disconnected"); + } + + // wait for request to exit + let Ok(SeedCommand::Exit) = comms.command_receiver.recv() else { + panic!("FATAL: did not receive shutdown request (follower must have crashed)"); + }; + } + + /// Instantiate bidirectional communication channels between the unit test and seed node + pub fn comms() -> (SeedComms, FollowerComms) { + let (data_sender, data_receiver) = sync_channel(1024); + let (command_sender, command_receiver) = sync_channel(1024); + + let seed_comms = SeedComms { + data_sender, + command_receiver, + }; + + let follower_comms = FollowerComms { + data_receiver, + command_sender, + }; + + (seed_comms, follower_comms) + } +} + +/// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead +/// forwarded to the relayer for processing. +#[test] +fn test_no_buffer_ready_nakamoto_blocks() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!("Follower got {}: {:?}", &consensus_hash, &burn_ops); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(burn_ops.clone()); + assert_eq!(follower_consensus_hash, consensus_hash); + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + // no need to buffer this because we can process it right away + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(!buffer); + + // we need these blocks, but we don't need to buffer them + for block in blocks.iter() { + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + // go process the blocks _as if_ they came from a network result + let mut unsolicited = HashMap::new(); + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + unsolicited.insert(peer_nk.clone(), vec![msg]); + + if let Some(mut network_result) = network_result.take() { + network_result.consume_unsolicited(unsolicited); + let num_processed = follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + + // because we process in order, they should all get processed + assert_eq!(num_processed, blocks.len() as u64); + } + + // no need to buffer if we already have the block + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(!buffer); + + // we don't need these blocks anymore + for block in blocks.iter() { + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); + }); +} + +/// Verify that Nakamoto blocks whose sortitions are not yet known will be buffered, and sent to +/// the relayer once the burnchain advances. +#[test] +fn test_buffer_nonready_nakamoto_blocks() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + // don't authenticate unsolicited messages, since this test directly pushes them + follower + .network + .connection_opts + .test_disable_unsolicited_message_authentication = true; + follower + .config + .connection_opts + .test_disable_unsolicited_message_authentication = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + let mut buffered_burn_ops = VecDeque::new(); + let mut all_blocks = vec![]; + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!( + "Follower got and will buffer {}: {:?}", + &consensus_hash, &burn_ops + ); + buffered_burn_ops.push_back((burn_ops, consensus_hash)); + if buffered_burn_ops.len() > 1 { + let (buffered_burn_ops, buffered_consensus_hash) = + buffered_burn_ops.pop_front().unwrap(); + debug!( + "Follower will process {}: {:?}", + &buffered_consensus_hash, &buffered_burn_ops + ); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(buffered_burn_ops.clone()); + assert_eq!(follower_consensus_hash, buffered_consensus_hash); + } + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + all_blocks.push(blocks.clone()); + + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + // we will need to buffer this since the sortition for these blocks hasn't been + // processed yet + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(buffer); + + // we need these blocks, but we can't process them yet + for block in blocks.iter() { + assert!(follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + // try to process the blocks _as if_ they came from a network result. + // It should fail. + let mut unsolicited = HashMap::new(); + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + unsolicited.insert(peer_nk.clone(), vec![msg]); + + if let Some(mut network_result) = network_result.take() { + network_result.consume_unsolicited(unsolicited); + follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + } + + // have the peer network buffer them up + let mut unsolicited_msgs: HashMap> = HashMap::new(); + for (event_id, convo) in follower.network.peers.iter() { + for blks in all_blocks.iter() { + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + + if let Some(msgs) = unsolicited_msgs.get_mut(event_id) { + msgs.push(msg); + } else { + unsolicited_msgs.insert(*event_id, vec![msg]); + } + } + } + follower.network.handle_unsolicited_messages( + &sortdb, + &node.chainstate, + unsolicited_msgs, + true, + true, + ); + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + + // process the last burnchain sortitions + while let Some((buffered_burn_ops, buffered_consensus_hash)) = + buffered_burn_ops.pop_front() + { + debug!( + "Follower will process {}: {:?}", + &buffered_consensus_hash, &buffered_burn_ops + ); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(buffered_burn_ops.clone()); + assert_eq!(follower_consensus_hash, buffered_consensus_hash); + } + + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + // process the last buffered messages + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + if let Some(mut network_result) = network_result.take() { + follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + } + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); + }); +} + +/// Boot a follower off of a seed node by having the seed node push its blocks to the follower via +/// the p2p stack. The follower will buffer up Nakamoto blocks and forward them to its relayer as +/// needed. +#[test] +fn test_nakamoto_boot_node_from_block_push() { + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full reward cycle + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + // follower will forward pushed data to its relayer + loop { + let network_result_res = + follower.run_with_ibd(true, Some(&mut follower_dns_client)); + if let Ok((network_result, _)) = network_result_res { + if network_result.num_connected_peers > 0 { + break; + } + } + } + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!("Follower will process {}: {:?}", &consensus_hash, &burn_ops); + let (_, _, follower_ch) = follower.next_burnchain_block(burn_ops.clone()); + assert_eq!(follower_ch, consensus_hash); + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // recover exited peer and get its chain tips + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + let mut synced = false; + for i in 0..100 { + // let the follower catch up to and keep talking to the exited peer + exited_peer.run_with_ibd(false, None).unwrap(); + follower + .run_with_ibd(true, Some(&mut follower_dns_client)) + .unwrap(); + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + debug!("{}: Follower sortition tip: {:?}", i, &follower_burn_tip); + debug!("{}: Seed sortition tip: {:?}", i, &exited_peer_burn_tip); + debug!("{}: Follower stacks tip: {:?}", i, &follower_stacks_tip); + debug!("{}: Seed stacks tip: {:?}", i, &exited_peer_stacks_tip); + + if exited_peer_burn_tip.consensus_hash == follower_burn_tip.consensus_hash + && exited_peer_stacks_tip == follower_stacks_tip + { + synced = true; + break; + } + } + + assert!(synced); + }); +} From a794490f252d106446d86e1e41f79688b3362cea Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:22:58 -0400 Subject: [PATCH 05/20] fix: log when we process a new nakamoto block --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..930d47d5fb 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -567,6 +567,7 @@ impl< /// with Some(pox-anchor-block-hash) until the reward cycle info is processed in the sortition /// DB. pub fn handle_new_nakamoto_stacks_block(&mut self) -> Result, Error> { + debug!("Handle new Nakamoto block"); let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", ); From b139a421401b7304d4fffe89852c7df5b99aa0e5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:23:11 -0400 Subject: [PATCH 06/20] chore: implement NakamotoBlocks push message for p2p stack --- stackslib/src/net/codec.rs | 48 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index c0496aa14c..c115a50d82 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -41,6 +41,7 @@ use stacks_common::util::secp256k1::{ use crate::burnchains::{BurnchainView, PrivateKey, PublicKey}; use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksPublicKey, StacksTransaction, MAX_BLOCK_LEN, }; @@ -353,6 +354,36 @@ impl NakamotoInvData { } } +impl StacksMessageCodec for NakamotoBlocksData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.blocks)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let blocks: Vec = { + // loose upper-bound + let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + read_next_at_most::<_, NakamotoBlock>(&mut bound_read, NAKAMOTO_BLOCKS_PUSHED_MAX) + }?; + + // only valid if there are no dups + let mut present = HashSet::new(); + for block in blocks.iter() { + if present.contains(&block.block_id()) { + // no dups allowed + return Err(codec_error::DeserializeError( + "Invalid NakamotoBlocksData: duplicate block".to_string(), + )); + } + + present.insert(block.block_id()); + } + + Ok(NakamotoBlocksData { blocks }) + } +} + impl StacksMessageCodec for GetPoxInv { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.consensus_hash)?; @@ -930,6 +961,7 @@ impl StacksMessageType { StacksMessageType::StackerDBPushChunk(ref _m) => StacksMessageID::StackerDBPushChunk, StacksMessageType::GetNakamotoInv(ref _m) => StacksMessageID::GetNakamotoInv, StacksMessageType::NakamotoInv(ref _m) => StacksMessageID::NakamotoInv, + StacksMessageType::NakamotoBlocks(ref _m) => StacksMessageID::NakamotoBlocks, } } @@ -964,6 +996,7 @@ impl StacksMessageType { StacksMessageType::StackerDBPushChunk(ref _m) => "StackerDBPushChunk", StacksMessageType::GetNakamotoInv(ref _m) => "GetNakamotoInv", StacksMessageType::NakamotoInv(ref _m) => "NakamotoInv", + StacksMessageType::NakamotoBlocks(ref _m) => "NakamotoBlocks", } } @@ -1071,6 +1104,15 @@ impl StacksMessageType { StacksMessageType::NakamotoInv(ref m) => { format!("NakamotoInv({:?})", &m.tenures) } + StacksMessageType::NakamotoBlocks(ref m) => { + format!( + "NakamotoBlocks({:?})", + m.blocks + .iter() + .map(|block| block.block_id()) + .collect::>() + ) + } } } } @@ -1122,6 +1164,7 @@ impl StacksMessageCodec for StacksMessageID { } x if x == StacksMessageID::GetNakamotoInv as u8 => StacksMessageID::GetNakamotoInv, x if x == StacksMessageID::NakamotoInv as u8 => StacksMessageID::NakamotoInv, + x if x == StacksMessageID::NakamotoBlocks as u8 => StacksMessageID::NakamotoBlocks, _ => { return Err(codec_error::DeserializeError( "Unknown message ID".to_string(), @@ -1166,6 +1209,7 @@ impl StacksMessageCodec for StacksMessageType { StacksMessageType::StackerDBPushChunk(ref m) => write_next(fd, m)?, StacksMessageType::GetNakamotoInv(ref m) => write_next(fd, m)?, StacksMessageType::NakamotoInv(ref m) => write_next(fd, m)?, + StacksMessageType::NakamotoBlocks(ref m) => write_next(fd, m)?, } Ok(()) } @@ -1276,6 +1320,10 @@ impl StacksMessageCodec for StacksMessageType { let m: NakamotoInvData = read_next(fd)?; StacksMessageType::NakamotoInv(m) } + StacksMessageID::NakamotoBlocks => { + let m: NakamotoBlocksData = read_next(fd)?; + StacksMessageType::NakamotoBlocks(m) + } StacksMessageID::Reserved => { return Err(codec_error::DeserializeError( "Unsupported message ID 'reserved'".to_string(), From 4e2f82c4c81102fcae0386139c7de260f89a2397 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:24:14 -0400 Subject: [PATCH 07/20] chore: document all fault-injection flags, and expand the maximum number of buffered messages for each kind of buffered data message we support (including Nakamoto blocks) --- stackslib/src/net/connection.rs | 45 ++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 878ab04efb..06e3c54f85 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -381,6 +381,7 @@ pub struct ConnectionOptions { pub max_buffered_microblocks_available: u64, pub max_buffered_blocks: u64, pub max_buffered_microblocks: u64, + pub max_buffered_nakamoto_blocks: u64, /// how often to query a remote peer for its mempool, in seconds pub mempool_sync_interval: u64, /// how many transactions to ask for in a mempool query @@ -393,30 +394,55 @@ pub struct ConnectionOptions { pub socket_send_buffer_size: u32, /// whether or not to announce or accept neighbors that are behind private networks pub private_neighbors: bool, + /// maximum number of confirmations for a nakamoto block's sortition for which it will be + /// pushed + pub max_nakamoto_block_relay_age: u64, + /// The authorization token to enable the block proposal RPC endpoint + pub block_proposal_token: Option, // fault injection + /// Disable neighbor walk and discovery pub disable_neighbor_walk: bool, + /// Disable sharing neighbors to a remote requester pub disable_chat_neighbors: bool, + /// Disable block inventory sync state machine pub disable_inv_sync: bool, + /// Disable sending inventory messages to a remote requester pub disable_inv_chat: bool, + /// Disable block download state machine pub disable_block_download: bool, + /// Disable network pruning pub disable_network_prune: bool, + /// Disable banning misbehaving peers pub disable_network_bans: bool, + /// Disable block availability advertisement pub disable_block_advertisement: bool, + /// Disable block pushing pub disable_block_push: bool, + /// Disable microblock pushing pub disable_microblock_push: bool, + /// Disable walk pingbacks -- don't attempt to walk to a remote peer even if it contacted us + /// first pub disable_pingbacks: bool, + /// Disable walking to inbound neighbors pub disable_inbound_walks: bool, + /// Disable all attempts to learn our IP address pub disable_natpunch: bool, + /// Disable handshakes from inbound neighbors pub disable_inbound_handshakes: bool, + /// Disable getting chunks from StackerDB (e.g. to test push-only) pub disable_stackerdb_get_chunks: bool, + /// Unconditionally disconnect a peer after this amount of time pub force_disconnect_interval: Option, /// If set to true, this forces the p2p state machine to believe that it is running in /// the reward cycle in which Nakamoto activates, and thus needs to run both the epoch /// 2.x and Nakamoto state machines. pub force_nakamoto_epoch_transition: bool, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + + // test facilitation + /// Do not require that an unsolicited message originate from an authenticated, connected + /// neighbor + pub test_disable_unsolicited_message_authentication: bool, } impl std::default::Default for ConnectionOptions { @@ -481,16 +507,19 @@ impl std::default::Default for ConnectionOptions { max_microblock_push: 10, // maximum number of microblocks messages to push out via our anti-entropy protocol antientropy_retry: 60, // retry pushing data once every minute antientropy_public: true, // run antientropy even if we're NOT NAT'ed - max_buffered_blocks_available: 1, - max_buffered_microblocks_available: 1, - max_buffered_blocks: 1, - max_buffered_microblocks: 10, + max_buffered_blocks_available: 5, + max_buffered_microblocks_available: 5, + max_buffered_blocks: 5, + max_buffered_microblocks: 1024, + max_buffered_nakamoto_blocks: 1024, mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) socket_recv_buffer_size: 131072, // Linux default socket_send_buffer_size: 16384, // Linux default private_neighbors: true, + max_nakamoto_block_relay_age: 6, + block_proposal_token: None, // no faults on by default disable_neighbor_walk: false, @@ -510,7 +539,9 @@ impl std::default::Default for ConnectionOptions { disable_stackerdb_get_chunks: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, - block_proposal_token: None, + + // no test facilitations on by default + test_disable_unsolicited_message_authentication: false, } } } From 58173a0ceaa30704126b622ccd2dd106f5925f28 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:24:45 -0400 Subject: [PATCH 08/20] chore: fault-injection for Nakamoto block download --- stackslib/src/net/download/nakamoto/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index ddef979681..b856afab44 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -217,6 +217,10 @@ impl PeerNetwork { chainstate: &StacksChainState, ibd: bool, ) -> Result>, NetError> { + if self.connection_opts.disable_block_download { + return Ok(HashMap::new()); + } + let res = self.sync_blocks_nakamoto(burnchain_height, sortdb, chainstate, ibd)?; let Some(mut block_downloader) = self.block_downloader_nakamoto.take() else { From 991487c15c3af5efc221e8eb9f74d2e5450715ae Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:25:00 -0400 Subject: [PATCH 09/20] chore: track pushed NakamotoBlocks in the NetworkResult struct, so they can be processed by the relayer --- stackslib/src/net/mod.rs | 53 +++++++++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..d7d0b663fb 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -137,6 +137,7 @@ pub mod relay; pub mod rpc; pub mod server; pub mod stackerdb; +pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; @@ -906,15 +907,24 @@ pub struct PoxInvData { pub pox_bitvec: Vec, // a bit will be '1' if the node knows for sure the status of its reward cycle's anchor block; 0 if not. } +/// Stacks epoch 2.x pushed block #[derive(Debug, Clone, PartialEq)] pub struct BlocksDatum(pub ConsensusHash, pub StacksBlock); -/// Blocks pushed +/// Stacks epoch 2.x blocks pushed #[derive(Debug, Clone, PartialEq)] pub struct BlocksData { pub blocks: Vec, } +/// Nakamoto epoch 3.x blocks pushed. +/// No need for a separate NakamotoBlocksDatum struct, because the consensus hashes that place this +/// block into the block stream are already embedded within the header +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoBlocksData { + pub blocks: Vec, +} + /// Microblocks pushed #[derive(Debug, Clone, PartialEq)] pub struct MicroblocksData { @@ -1138,6 +1148,7 @@ pub enum StacksMessageType { // Nakamoto-specific GetNakamotoInv(GetNakamotoInvData), NakamotoInv(NakamotoInvData), + NakamotoBlocks(NakamotoBlocksData), } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -1172,6 +1183,7 @@ pub enum StacksMessageID { // nakamoto GetNakamotoInv = 26, NakamotoInv = 27, + NakamotoBlocks = 28, // reserved Reserved = 255, } @@ -1263,11 +1275,16 @@ pub const GETPOXINV_MAX_BITLEN: u64 = 4096; #[cfg(test)] pub const GETPOXINV_MAX_BITLEN: u64 = 8; -// maximum number of blocks that can be pushed at once (even if the entire message is undersized). +// maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). // This bound is needed since it bounds the amount of I/O a peer can be asked to do to validate the // message. pub const BLOCKS_PUSHED_MAX: u32 = 32; +// maximum number of Nakamoto blocks that can be pushed at once (even if the entire message is undersized). +// This bound is needed since it bounds the amount of I/O a peer can be asked to do to validate the +// message. +pub const NAKAMOTO_BLOCKS_PUSHED_MAX: u32 = 32; + /// neighbor identifier #[derive(Clone, Eq, PartialOrd, Ord)] pub struct NeighborKey { @@ -1423,6 +1440,7 @@ pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work +#[derive(Clone)] pub struct NetworkResult { /// PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) pub download_pox_id: Option, @@ -1440,6 +1458,8 @@ pub struct NetworkResult { pub pushed_blocks: HashMap>, /// all Stacks 2.x microblocks pushed to us, and the relay hints from the message pub pushed_microblocks: HashMap, MicroblocksData)>>, + /// all Stacks 3.x blocks pushed to us + pub pushed_nakamoto_blocks: HashMap, NakamotoBlocksData)>>, /// transactions sent to us by the http server pub uploaded_transactions: Vec, /// blocks sent to us via the http server @@ -1460,9 +1480,11 @@ pub struct NetworkResult { pub num_inv_sync_passes: u64, /// Number of times the Stacks 2.x block downloader has completed one pass pub num_download_passes: u64, + /// Number of connected peers + pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, - /// The consensus hash of the start of this reward cycle + /// The consensus hash of the burnchain tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs pub stacker_db_configs: HashMap, @@ -1473,6 +1495,7 @@ impl NetworkResult { num_state_machine_passes: u64, num_inv_sync_passes: u64, num_download_passes: u64, + num_connected_peers: usize, burn_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, @@ -1486,6 +1509,7 @@ impl NetworkResult { pushed_transactions: HashMap::new(), pushed_blocks: HashMap::new(), pushed_microblocks: HashMap::new(), + pushed_nakamoto_blocks: HashMap::new(), uploaded_transactions: vec![], uploaded_blocks: vec![], uploaded_microblocks: vec![], @@ -1496,6 +1520,7 @@ impl NetworkResult { num_state_machine_passes: num_state_machine_passes, num_inv_sync_passes: num_inv_sync_passes, num_download_passes: num_download_passes, + num_connected_peers, burn_height, rc_consensus_hash, stacker_db_configs, @@ -1513,7 +1538,7 @@ impl NetworkResult { } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 + self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { @@ -1555,7 +1580,7 @@ impl NetworkResult { pub fn consume_unsolicited( &mut self, unhandled_messages: HashMap>, - ) -> () { + ) { for (neighbor_key, messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { match message.payload { @@ -1585,6 +1610,16 @@ impl NetworkResult { .insert(neighbor_key.clone(), vec![(message.relayers, tx_data)]); } } + StacksMessageType::NakamotoBlocks(block_data) => { + if let Some(nakamoto_blocks_msgs) = + self.pushed_nakamoto_blocks.get_mut(&neighbor_key) + { + nakamoto_blocks_msgs.push((message.relayers, block_data)); + } else { + self.pushed_nakamoto_blocks + .insert(neighbor_key.clone(), vec![(message.relayers, block_data)]); + } + } _ => { // forward along if let Some(messages) = self.unhandled_messages.get_mut(&neighbor_key) { @@ -2745,8 +2780,8 @@ pub mod test { &mut self, ibd: bool, dns_client: Option<&mut DNSClient>, - ) -> Result { - let mut net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; + ) -> Result<(NetworkResult, ProcessedNetReceipts), net_error> { + let net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; let mut sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); @@ -2754,7 +2789,7 @@ pub mod test { let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), - &mut net_result, + &mut net_result.clone(), &mut sortdb, &mut stacks_node.chainstate, &mut mempool, @@ -2772,7 +2807,7 @@ pub mod test { self.coord.handle_new_stacks_block().unwrap(); self.coord.handle_new_nakamoto_stacks_block().unwrap(); - receipts_res + receipts_res.and_then(|receipts| Ok((net_result, receipts))) } pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { From 175f5b80be45de5f1869109a4703d1f49c8e36dc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:25:20 -0400 Subject: [PATCH 10/20] chore: need Clone for StackerDBSyncResult (since we need it for NetworkResult) --- stackslib/src/net/stackerdb/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 24265410ee..b6c856a929 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -154,6 +154,7 @@ pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; pub const MINER_SLOT_COUNT: u32 = 1; /// Final result of synchronizing state with a remote set of DB replicas +#[derive(Clone)] pub struct StackerDBSyncResult { /// which contract this is a replica for pub contract_id: QualifiedContractIdentifier, From ad7f2555224f3493d746dbb77ea52d5936e2284e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:25:49 -0400 Subject: [PATCH 11/20] chore: relay test module --- stackslib/src/net/tests/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 82e1b8b814..57b58f2534 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -18,6 +18,7 @@ pub mod download; pub mod httpcore; pub mod inv; pub mod neighbors; +pub mod relay; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; From f2e8b8841c2128c2cf9a47217428c8c218ea2ca0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 15:02:54 -0400 Subject: [PATCH 12/20] chore: fmt --- stackslib/src/net/relay.rs | 9 +++++++-- stackslib/src/net/tests/relay/epoch2x.rs | 6 ++++-- stackslib/src/net/tests/relay/nakamoto.rs | 7 ++----- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 75834cc9c8..a4b7389ad5 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2500,7 +2500,13 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, ) -> u64 { let mut num_new_nakamoto_blocks = 0; - match Self::process_new_nakamoto_blocks(network_result, burnchain, sortdb, chainstate, coord_comms) { + match Self::process_new_nakamoto_blocks( + network_result, + burnchain, + sortdb, + chainstate, + coord_comms, + ) { Ok((nakamoto_blocks_and_relayers, bad_neighbors)) => { num_new_nakamoto_blocks = nakamoto_blocks_and_relayers .iter() @@ -3109,4 +3115,3 @@ impl PeerNetwork { } } } - diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 817af95d72..1a383f7f87 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2932,7 +2932,6 @@ fn process_new_blocks_rejects_problematic_asts() { ) .unwrap(); - // this tx would be problematic without our checks if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( @@ -2978,7 +2977,10 @@ fn process_new_blocks_rejects_problematic_asts() { bad_block.header.tx_merkle_root = merkle_tree.root(); chainstate - .reload_unconfirmed_state(&sortdb.index_handle(&tip.sortition_id), parent_index_hash.clone()) + .reload_unconfirmed_state( + &sortdb.index_handle(&tip.sortition_id), + parent_index_hash.clone(), + ) .unwrap(); // make a bad microblock diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index f99a2d4efa..bbabf6fc0d 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -37,13 +37,10 @@ use super::*; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::tests::TestMiner; use crate::chainstate::burn::operations::BlockstackOperationType; -use crate::chainstate::nakamoto::coordinator::tests::{ - make_token_transfer, -}; +use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::stacks::boot::test::{ - key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, - with_sortdb, + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, }; use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; From a7c5a1f061a3abf9182f69b2e1d3070d15407405 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 16:32:01 -0400 Subject: [PATCH 13/20] fix: verify on receipt of the blocks that it's signed by the signers -- both when we buffer it, and when we relay it --- stackslib/src/net/mod.rs | 4 ++ stackslib/src/net/relay.rs | 92 ++++++++++++++++++++++++++++---- stackslib/src/net/unsolicited.rs | 36 +++++++++++-- 3 files changed, 120 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 026b9e026f..e8e52fd137 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -285,6 +285,8 @@ pub enum Error { InvalidState, /// Waiting for DNS resolution WaitingForDNS, + /// No reward set for given reward cycle + NoPoXRewardSet(u64), } impl From for Error { @@ -433,6 +435,7 @@ impl fmt::Display for Error { Error::Http(e) => fmt::Display::fmt(&e, f), Error::InvalidState => write!(f, "Invalid state-machine state reached"), Error::WaitingForDNS => write!(f, "Waiting for DNS resolution"), + Error::NoPoXRewardSet(rc) => write!(f, "No PoX reward set for cycle {}", rc), } } } @@ -506,6 +509,7 @@ impl error::Error for Error { Error::Http(ref e) => Some(e), Error::InvalidState => None, Error::WaitingForDNS => None, + Error::NoPoXRewardSet(..) => None, } } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index a4b7389ad5..750fba7f6b 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -549,9 +549,15 @@ impl Relayer { /// Given Nakamoto blocks pushed to us, verify that they correspond to expected block data. pub fn validate_nakamoto_blocks_push( + burnchain: &Burnchain, conn: &SortitionDBConn, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, nakamoto_blocks_data: &NakamotoBlocksData, ) -> Result<(), net_error> { + let mut loaded_reward_sets = HashMap::new(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(conn)?; + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { // is this the right Stacks block for this sortition? let Some(sn) = SortitionDB::get_block_snapshot_consensus( @@ -578,6 +584,71 @@ impl Relayer { ); return Err(net_error::InvalidMessage); } + + // is the block signed by the active reward set? + let sn_rc = burnchain + .pox_reward_cycle(sn.block_height) + .expect("FATAL: sortition has no reward cycle"); + let reward_cycle_info = if let Some(rc_info) = loaded_reward_sets.get(&sn_rc) { + rc_info + } else { + let Some((reward_set_info, _)) = load_nakamoto_reward_set( + sn_rc, + &tip_sn.sortition_id, + burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .map_err(|e| { + error!( + "Failed to load reward cycle info for cycle {}: {:?}", + sn_rc, &e + ); + match e { + CoordinatorError::ChainstateError(e) => { + error!( + "No RewardCycleInfo loaded for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::ChainstateError(format!("{:?}", &e)) + } + CoordinatorError::DBError(e) => { + error!( + "No RewardCycleInfo loaded for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::DBError(e) + } + _ => { + error!( + "Failed to load RewardCycleInfo for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::NoPoXRewardSet(sn_rc) + } + } + })? + else { + error!("No reward set for reward cycle {}", &sn_rc); + return Err(net_error::NoPoXRewardSet(sn_rc)); + }; + + loaded_reward_sets.insert(sn_rc, reward_set_info); + loaded_reward_sets.get(&sn_rc).expect("FATAL: infallible") + }; + + let Some(reward_set) = reward_cycle_info.known_selected_anchor_block() else { + error!("No reward set for reward cycle {}", &sn_rc); + return Err(net_error::NoPoXRewardSet(sn_rc)); + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "Signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + ); + return Err(net_error::InvalidMessage); + } } Ok(()) } @@ -1467,21 +1538,25 @@ impl Relayer { for (neighbor_key, relayers_and_block_data) in network_result.pushed_nakamoto_blocks.iter() { for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter() { - let mut good = true; let mut accepted_blocks = vec![]; - if let Err(_e) = Relayer::validate_nakamoto_blocks_push( + if let Err(e) = Relayer::validate_nakamoto_blocks_push( + burnchain, &sortdb.index_conn(), + sortdb, + chainstate, nakamoto_blocks_data, ) { + info!( + "Failed to validate Nakamoto blocks pushed from {:?}: {:?}", + neighbor_key, &e + ); + // punish this peer bad_neighbors.push((*neighbor_key).clone()); - good = false; + break; } for nakamoto_block in nakamoto_blocks_data.blocks.iter() { - if !good { - break; - } let block_id = nakamoto_block.block_id(); debug!( "Received pushed Nakamoto block {} from {}", @@ -1513,7 +1588,6 @@ impl Relayer { Err(chainstate_error::InvalidStacksBlock(msg)) => { warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); bad_neighbors.push((*neighbor_key).clone()); - good = false; break; } Err(e) => { @@ -1521,12 +1595,12 @@ impl Relayer { "Could not process pushed Nakamoto block {}: {:?}", &block_id, &e ); - good = false; break; } } } - if good && accepted_blocks.len() > 0 { + + if accepted_blocks.len() > 0 { new_blocks_and_relayers.push((relayers.clone(), accepted_blocks)); } } diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 29d9009f6f..cf7ef67089 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -711,7 +711,9 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, &nakamoto_block.header.consensus_hash, &e + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &e ); return false; } @@ -720,13 +722,41 @@ impl PeerNetwork { if !sn.pox_valid { info!( "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, &nakamoto_block.header.consensus_hash + self.get_local_peer(), + &nakamoto_block.header.consensus_hash ); return false; } // block must be signed by reward set signers - // TODO + let sn_rc = self + .burnchain + .pox_reward_cycle(sn.block_height) + .expect("FATAL: sortition has no reward cycle"); + let Some(rc_data) = self.current_reward_sets.get(&sn_rc) else { + info!( + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &nakamoto_block.header.block_hash() + ); + return false; + }; + let Some(reward_set) = rc_data.reward_set() else { + info!( + "{:?}: No reward set for reward cycle {}", + self.get_local_peer(), + sn_rc + ); + return false; + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + ); + return false; + } // the block is well-formed, but we'd buffer if we can't process it yet !can_process From ff8cfe71499f2708f55413683cd4531e4c58e677 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 10 Jun 2024 22:39:53 -0400 Subject: [PATCH 14/20] chore: fix failing tests and expand test coverage --- stackslib/src/net/p2p.rs | 5 +- stackslib/src/net/relay.rs | 13 +- stackslib/src/net/tests/relay/nakamoto.rs | 149 +++++++++++++++++----- stackslib/src/net/unsolicited.rs | 105 +++++++++------ 4 files changed, 192 insertions(+), 80 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 34f82b2720..e92200832c 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -409,8 +409,9 @@ pub struct PeerNetwork { antientropy_start_reward_cycle: u64, pub antientropy_last_push_ts: u64, - // pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks) that we - // can't process yet, but might be able to process on the next chain view update + /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, + /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent + /// chain view update. pub pending_messages: HashMap>, // fault injection -- force disconnects diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 750fba7f6b..6aaff981b2 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -645,7 +645,7 @@ impl Relayer { if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { info!( - "Signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + "Signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e ); return Err(net_error::InvalidMessage); } @@ -1574,14 +1574,14 @@ impl Relayer { Ok(accepted) => { if accepted { debug!( - "Accepted Nakamoto block {} from {}", - &block_id, neighbor_key + "Accepted Nakamoto block {} ({}) from {}", + &block_id, &nakamoto_block.header.consensus_hash, neighbor_key ); accepted_blocks.push(nakamoto_block.clone()); } else { debug!( - "Rejected Nakamoto block {} from {}", - &block_id, &neighbor_key, + "Rejected Nakamoto block {} ({}) from {}", + &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, ); } } @@ -1595,7 +1595,6 @@ impl Relayer { "Could not process pushed Nakamoto block {}: {:?}", &block_id, &e ); - break; } } } @@ -1943,7 +1942,7 @@ impl Relayer { ) { Ok(x) => x, Err(e) => { - warn!("Failed to process pushed Nakamoot blocks: {:?}", &e); + warn!("Failed to process pushed Nakamoto blocks: {:?}", &e); (vec![], vec![]) } }; diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index bbabf6fc0d..2d286e157f 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -434,6 +434,8 @@ fn test_no_buffer_ready_nakamoto_blocks() { let mut sortdb = follower.sortdb.take().unwrap(); let mut node = follower.stacks_node.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // no need to buffer this because we can process it right away let buffer = follower .network @@ -454,6 +456,87 @@ fn test_no_buffer_ready_nakamoto_blocks() { &node.chainstate, block )); + + // suppose these blocks were invalid -- they would not be bufferable. + // bad signature? not bufferable + let mut bad_block = block.clone(); + let block_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &bad_block.header.consensus_hash, + ) + .unwrap() + .unwrap(); + bad_block + .header + .signer_signature + .push(bad_block.header.signer_signature.last().cloned().unwrap()); + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle(block_sn.block_height) + .unwrap() + ), + true + ) + ); + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + &bad_block + )); + + // unrecognized consensus hash + let mut bad_block = block.clone(); + bad_block.header.consensus_hash = ConsensusHash([0xde; 20]); + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle( + follower.network.burnchain_tip.block_height + ) + .unwrap() + ), + false + ) + ); + + // stale consensus hash + let mut bad_block = block.clone(); + let ancestor_sn = SortitionDB::get_ancestor_snapshot( + &sortdb.index_conn(), + 1, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + bad_block.header.consensus_hash = ancestor_sn.consensus_hash; + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle(ancestor_sn.block_height) + .unwrap() + ), + true + ) + ); } // go process the blocks _as if_ they came from a network result @@ -631,8 +714,8 @@ fn test_buffer_nonready_nakamoto_blocks() { debug!("Follower got Nakamoto blocks {:?}", &blocks); all_blocks.push(blocks.clone()); - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let sortdb = follower.sortdb.take().unwrap(); + let node = follower.stacks_node.take().unwrap(); // we will need to buffer this since the sortition for these blocks hasn't been // processed yet @@ -657,33 +740,8 @@ fn test_buffer_nonready_nakamoto_blocks() { )); } - // try to process the blocks _as if_ they came from a network result. - // It should fail. - let mut unsolicited = HashMap::new(); - let msg = StacksMessage::from_chain_view( - follower.network.bound_neighbor_key().peer_version, - follower.network.bound_neighbor_key().network_id, - follower.network.get_chain_view(), - StacksMessageType::NakamotoBlocks(NakamotoBlocksData { - blocks: blocks.clone(), - }), - ); - unsolicited.insert(peer_nk.clone(), vec![msg]); - - if let Some(mut network_result) = network_result.take() { - network_result.consume_unsolicited(unsolicited); - follower_relayer.process_new_epoch3_blocks( - follower.network.get_local_peer(), - &mut network_result, - &follower.network.burnchain, - &mut sortdb, - &mut node.chainstate, - true, - None, - ); - } - - // have the peer network buffer them up + // pass this and other blocks to the p2p network's unsolicited message handler, + // so they can be buffered up and processed. let mut unsolicited_msgs: HashMap> = HashMap::new(); for (event_id, convo) in follower.network.peers.iter() { for blks in all_blocks.iter() { @@ -692,7 +750,7 @@ fn test_buffer_nonready_nakamoto_blocks() { follower.network.bound_neighbor_key().network_id, follower.network.get_chain_view(), StacksMessageType::NakamotoBlocks(NakamotoBlocksData { - blocks: blocks.clone(), + blocks: blks.clone(), }), ); @@ -703,6 +761,7 @@ fn test_buffer_nonready_nakamoto_blocks() { } } } + follower.network.handle_unsolicited_messages( &sortdb, &node.chainstate, @@ -730,10 +789,6 @@ fn test_buffer_nonready_nakamoto_blocks() { assert_eq!(follower_consensus_hash, buffered_consensus_hash); } - let mut network_result = follower - .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) - .ok(); - // process the last buffered messages let mut sortdb = follower.sortdb.take().unwrap(); let mut node = follower.stacks_node.take().unwrap(); @@ -753,12 +808,33 @@ fn test_buffer_nonready_nakamoto_blocks() { follower.stacks_node = Some(node); follower.sortdb = Some(sortdb); + network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + seed_exited = true; exited_peer = Some(exited); follower_comms.send_exit(); } } + if let Some(mut network_result) = network_result.take() { + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + let num_processed = follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &follower.network.burnchain, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + info!("Processed {} unsolicited Nakamoto blocks", num_processed); + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + follower.coord.handle_new_burnchain_block().unwrap(); follower.coord.handle_new_stacks_block().unwrap(); follower.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -785,7 +861,10 @@ fn test_buffer_nonready_nakamoto_blocks() { exited_peer.stacks_node = Some(stacks_node); exited_peer.sortdb = Some(sortdb); - assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!( + exited_peer_burn_tip.sortition_id, + follower_burn_tip.sortition_id + ); assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); }); } diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index cf7ef67089..8bed8e5312 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -179,8 +179,8 @@ impl PeerNetwork { let mut blocks_data = 0; let mut microblocks_data = 0; let mut nakamoto_blocks_data = 0; - for msg in msgs.iter() { - match &msg.payload { + for stored_msg in msgs.iter() { + match &stored_msg.payload { StacksMessageType::BlocksAvailable(_) => { blocks_available += 1; if blocks_available >= self.connection_opts.max_buffered_blocks_available { @@ -669,27 +669,49 @@ impl PeerNetwork { } } - /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially - /// buffer - pub(crate) fn is_nakamoto_block_bufferable( + /// Check the signature of a NakamotoBlock against its sortition's reward cycle. + /// The reward cycle must be recent. + pub(crate) fn check_nakamoto_block_signer_signature( &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, + reward_cycle: u64, nakamoto_block: &NakamotoBlock, ) -> bool { - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block(&nakamoto_block.block_id()) - .unwrap_or(false) - { - debug!( - "{:?}: Aleady have Nakamoto block {}", - &self.local_peer, - &nakamoto_block.block_id() + let Some(rc_data) = self.current_reward_sets.get(&reward_cycle) else { + info!( + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &nakamoto_block.header.block_hash() + ); + return false; + }; + let Some(reward_set) = rc_data.reward_set() else { + info!( + "{:?}: No reward set for reward cycle {}", + self.get_local_peer(), + reward_cycle + ); + return false; + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e ); return false; } + true + } + /// Find the reward cycle in which to validate the signature for this block. + /// This may not actually correspond to the sortition for this block's tenure -- for example, + /// it may be for a block whose sortition is about to be processed. As such, return both the + /// reward cycle, and whether or not it corresponds to the sortition. + pub(crate) fn find_nakamoto_block_reward_cycle( + &self, + sortdb: &SortitionDB, + nakamoto_block: &NakamotoBlock, + ) -> (Option, bool) { let mut can_process = true; let sn = match SortitionDB::get_block_snapshot_consensus( &sortdb.conn(), @@ -715,7 +737,7 @@ impl PeerNetwork { &nakamoto_block.header.consensus_hash, &e ); - return false; + return (None, false); } }; @@ -725,36 +747,45 @@ impl PeerNetwork { self.get_local_peer(), &nakamoto_block.header.consensus_hash ); - return false; + return (None, false); } - // block must be signed by reward set signers let sn_rc = self .burnchain .pox_reward_cycle(sn.block_height) .expect("FATAL: sortition has no reward cycle"); - let Some(rc_data) = self.current_reward_sets.get(&sn_rc) else { - info!( - "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", - self.get_local_peer(), - &nakamoto_block.header.consensus_hash, - &nakamoto_block.header.block_hash() + + return (Some(sn_rc), can_process); + } + + /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially + /// buffer. Returns whether or not the block can be buffered. + pub(crate) fn is_nakamoto_block_bufferable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_block: &NakamotoBlock, + ) -> bool { + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&nakamoto_block.block_id()) + .unwrap_or(false) + { + debug!( + "{:?}: Aleady have Nakamoto block {}", + &self.local_peer, + &nakamoto_block.block_id() ); return false; - }; - let Some(reward_set) = rc_data.reward_set() else { - info!( - "{:?}: No reward set for reward cycle {}", - self.get_local_peer(), - sn_rc - ); + } + + let (sn_rc_opt, can_process) = + self.find_nakamoto_block_reward_cycle(sortdb, nakamoto_block); + let Some(sn_rc) = sn_rc_opt else { return false; }; - if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { - info!( - "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e - ); + if !self.check_nakamoto_block_signer_signature(sn_rc, nakamoto_block) { return false; } @@ -942,6 +973,8 @@ impl PeerNetwork { /// /// If `buffer` is false, then if the message handler deems the message valid, it will be /// forwraded to the relayer. + /// + /// Returns the messages to be forward to the relayer, keyed by sender. pub fn handle_unsolicited_messages( &mut self, sortdb: &SortitionDB, From 442c6fec44df3d73e065eb420813b15ea062ea64 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:22:07 -0400 Subject: [PATCH 15/20] chore: resource accounting for pushed nakamoto blocks --- stackslib/src/net/chat.rs | 124 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 3037ac60d1..b129fab50a 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -128,6 +128,8 @@ pub struct NeighborStats { pub transaction_push_rx_counts: VecDeque<(u64, u64)>, /// (timestamp, num bytes) pub stackerdb_push_rx_counts: VecDeque<(u64, u64)>, + /// (timestamp, num bytes) + pub nakamoto_block_push_rx_counts: VecDeque<(u64, u64)>, pub relayed_messages: HashMap, } @@ -152,6 +154,7 @@ impl NeighborStats { microblocks_push_rx_counts: VecDeque::new(), transaction_push_rx_counts: VecDeque::new(), stackerdb_push_rx_counts: VecDeque::new(), + nakamoto_block_push_rx_counts: VecDeque::new(), relayed_messages: HashMap::new(), } } @@ -214,6 +217,17 @@ impl NeighborStats { } } + /// Record that we recently received a Nakamoto blcok push of the given size. + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current + /// bandwidth consumed by Nakamoto block pushes + pub fn add_nakamoto_block_push(&mut self, message_size: u64) -> () { + self.nakamoto_block_push_rx_counts + .push_back((get_epoch_time_secs(), message_size)); + while self.nakamoto_block_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { + self.nakamoto_block_push_rx_counts.pop_front(); + } + } + pub fn add_relayer(&mut self, addr: &NeighborAddress, num_bytes: u64) -> () { if let Some(stats) = self.relayed_messages.get_mut(addr) { stats.num_messages += 1; @@ -298,6 +312,14 @@ impl NeighborStats { NeighborStats::get_bandwidth(&self.stackerdb_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } + /// Get a peer's total nakamoto block bandwidth usage + pub fn get_nakamoto_block_push_bandwidth(&self) -> f64 { + NeighborStats::get_bandwidth( + &self.nakamoto_block_push_rx_counts, + BANDWIDTH_POINT_LIFETIME, + ) + } + /// Determine how many of a particular message this peer has received pub fn get_message_recv_count(&self, msg_id: StacksMessageID) -> u64 { *(self.msg_rx_counts.get(&msg_id).unwrap_or(&0)) @@ -2217,6 +2239,45 @@ impl ConversationP2P { Ok(None) } + /// Validate a pushed Nakamoto block list. + /// Update bandwidth accounting, but forward the blocks along if we can accept them. + /// Possibly return a reply handle for a NACK if we throttle the remote sender + fn validate_nakamoto_block_push( + &mut self, + network: &PeerNetwork, + preamble: &Preamble, + relayers: Vec, + ) -> Result, net_error> { + assert!(preamble.payload_len > 1); // don't count 1-byte type prefix + + let local_peer = network.get_local_peer(); + let chain_view = network.get_chain_view(); + + if !self.process_relayers(local_peer, preamble, &relayers) { + warn!( + "Drop pushed Nakamoto blocks -- invalid relayers {:?}", + &relayers + ); + self.stats.msgs_err += 1; + return Err(net_error::InvalidMessage); + } + + self.stats + .add_nakamoto_block_push((preamble.payload_len as u64) - 1); + + if self.connection.options.max_nakamoto_block_push_bandwidth > 0 + && self.stats.get_nakamoto_block_push_bandwidth() + > (self.connection.options.max_nakamoto_block_push_bandwidth as f64) + { + debug!("Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); + return self + .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) + .and_then(|handle| Ok(Some(handle))); + } + + Ok(None) + } + /// Handle an inbound authenticated p2p data-plane message. /// Return the message if not handled fn handle_data_message( @@ -2305,6 +2366,21 @@ impl ConversationP2P { } } } + StacksMessageType::NakamotoBlocks(_) => { + // not handled here, but do some accounting -- we can't receive too many + // Nakamoto blocks per second + match self.validate_nakamoto_block_push( + network, + &msg.preamble, + msg.relayers.clone(), + )? { + Some(handle) => Ok(handle), + None => { + // will forward upstream + return Ok(Some(msg)); + } + } + } _ => { // all else will forward upstream return Ok(Some(msg)); @@ -6603,6 +6679,54 @@ mod test { assert_eq!(bw_stats.get_stackerdb_push_bandwidth(), 110.0); } + #[test] + fn test_neighbor_stats_nakamoto_block_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_nakamoto_block_push_bandwidth(), 0.0); + + stats.add_nakamoto_block_push(100); + assert_eq!(stats.get_nakamoto_block_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_nakamoto_block_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_nakamoto_block_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_nakamoto_block_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_nakamoto_block_push_bandwidth(), 110.0); + } + #[test] fn test_sign_relay_forward_message() { let conn_opts = ConnectionOptions::default(); From c80b2c968075c05ad8c03edee97f30314c704323 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:22:23 -0400 Subject: [PATCH 16/20] chore: option for maximum nakamoto block push bandwidth --- stackslib/src/net/connection.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 06e3c54f85..6fca681a77 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -368,6 +368,7 @@ pub struct ConnectionOptions { pub max_microblocks_push_bandwidth: u64, pub max_transaction_push_bandwidth: u64, pub max_stackerdb_push_bandwidth: u64, + pub max_nakamoto_block_push_bandwidth: u64, pub max_sockets: usize, pub public_ip_address: Option<(PeerAddress, u16)>, pub public_ip_request_timeout: u64, @@ -498,6 +499,7 @@ impl std::default::Default for ConnectionOptions { max_microblocks_push_bandwidth: 0, // infinite upload bandwidth allowed max_transaction_push_bandwidth: 0, // infinite upload bandwidth allowed max_stackerdb_push_bandwidth: 0, // infinite upload bandwidth allowed + max_nakamoto_block_push_bandwidth: 0, // infinite upload bandwidth allowed max_sockets: 800, // maximum number of client sockets we'll ever register public_ip_address: None, // resolve it at runtime by default public_ip_request_timeout: 60, // how often we can attempt to look up our public IP address From 868be4c544337b15879e70089525e7a812d52273 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:22:39 -0400 Subject: [PATCH 17/20] chore: take &NakamotoBlock instead of NakamotoBlock --- .../src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 11 +++++------ stackslib/src/chainstate/nakamoto/tests/mod.rs | 6 +++--- stackslib/src/chainstate/nakamoto/tests/node.rs | 4 ++-- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 015409a74c..b1a79810ee 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -404,7 +404,7 @@ fn replay_reward_cycle( &sortdb, &mut sort_handle, &mut node.chainstate, - block.clone(), + &block, None, ) .unwrap_or(false); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 50015dace4..4310c01aa4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1835,7 +1835,7 @@ impl NakamotoChainState { /// Insert a Nakamoto block into the staging blocks DB pub(crate) fn store_block( staging_db_tx: &NakamotoStagingBlocksTx, - block: NakamotoBlock, + block: &NakamotoBlock, burn_attachable: bool, ) -> Result<(), ChainstateError> { let block_id = block.block_id(); @@ -1894,7 +1894,7 @@ impl NakamotoChainState { /// Returns true if we stored the block; false if not. pub fn accept_block( config: &ChainstateConfig, - block: NakamotoBlock, + block: &NakamotoBlock, db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, @@ -1927,14 +1927,14 @@ impl NakamotoChainState { // it's okay if this fails because we might not have the parent block yet. It will be // checked on `::append_block()` - let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, &block)?; + let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, block)?; // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. if let Err(e) = Self::validate_nakamoto_block_burnchain( db_handle, expected_burn_opt, - &block, + block, config.mainnet, config.chain_id, ) { @@ -1958,9 +1958,8 @@ impl NakamotoChainState { // same sortition history as `db_handle` (and thus it must be burn_attachable) let burn_attachable = true; - let _block_id = block.block_id(); Self::store_block(staging_db_tx, block, burn_attachable)?; - test_debug!("Stored Nakamoto block {}", &_block_id); + test_debug!("Stored Nakamoto block {}", &block.block_id()); Ok(true) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 0a2441d388..3fd8b7744e 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1069,7 +1069,7 @@ pub fn test_load_store_update_nakamoto_blocks() { 300, ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, nakamoto_block.clone(), false).unwrap(); + NakamotoChainState::store_block(&staging_tx, &nakamoto_block, false).unwrap(); // tenure has one block assert_eq!( @@ -1102,7 +1102,7 @@ pub fn test_load_store_update_nakamoto_blocks() { ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, nakamoto_block_2.clone(), false).unwrap(); + NakamotoChainState::store_block(&staging_tx, &nakamoto_block_2, false).unwrap(); // tenure has two blocks assert_eq!( @@ -1123,7 +1123,7 @@ pub fn test_load_store_update_nakamoto_blocks() { ); // store, but do not process, a block - NakamotoChainState::store_block(&staging_tx, nakamoto_block_3.clone(), false).unwrap(); + NakamotoChainState::store_block(&staging_tx, &nakamoto_block_3, false).unwrap(); staging_tx.commit().unwrap(); tx.commit().unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 5736258b11..215eed3cbf 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -632,7 +632,7 @@ impl TestStacksNode { sortdb, &mut sort_handle, chainstate, - nakamoto_block.clone(), + &nakamoto_block, None, ) { Ok(accepted) => accepted, @@ -1159,7 +1159,7 @@ impl<'a> TestPeer<'a> { &sortdb, &mut sort_handle, &mut node.chainstate, - block, + &block, None, ) .unwrap(); From afdbaadb210d88279a3206b2eacca528c9d7317c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:23:02 -0400 Subject: [PATCH 18/20] chore: address PR feedback --- stackslib/src/net/p2p.rs | 12 +- stackslib/src/net/relay.rs | 187 ++++++++++++---------- stackslib/src/net/tests/mod.rs | 2 +- stackslib/src/net/tests/relay/epoch2x.rs | 3 +- stackslib/src/net/tests/relay/mod.rs | 3 +- stackslib/src/net/tests/relay/nakamoto.rs | 150 ++++++++++++++++- stackslib/src/net/unsolicited.rs | 131 +++++++++------ 7 files changed, 339 insertions(+), 149 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index e92200832c..d5988a27ac 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -209,6 +209,7 @@ pub enum MempoolSyncState { } pub type PeerMap = HashMap; +pub type PendingMessages = HashMap>; pub struct ConnectingPeer { socket: mio_net::TcpStream, @@ -412,7 +413,7 @@ pub struct PeerNetwork { /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent /// chain view update. - pub pending_messages: HashMap>, + pub pending_messages: PendingMessages, // fault injection -- force disconnects fault_last_disconnect: u64, @@ -574,7 +575,7 @@ impl PeerNetwork { antientropy_last_push_ts: 0, antientropy_start_reward_cycle: 0, - pending_messages: HashMap::new(), + pending_messages: PendingMessages::new(), fault_last_disconnect: 0, @@ -1408,11 +1409,10 @@ impl PeerNetwork { // send to each neighbor that needs one let mut all_neighbors = HashSet::new(); for nakamoto_block in data.blocks.iter() { - let mut neighbors = + let neighbors = self.sample_broadcast_peers(&relay_hints, nakamoto_block)?; - for nk in neighbors.drain(..) { - all_neighbors.insert(nk); - } + + all_neighbors.extend(neighbors); } Ok(all_neighbors.into_iter().collect()) } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 6aaff981b2..5183d8c794 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -479,7 +479,7 @@ impl RelayerStats { sampled += 1; // sample without replacement - norm -= rankings_vec[i].1; + norm = norm.saturating_sub(rankings_vec[i].1); rankings_vec[i].1 = 0; break; } @@ -492,6 +492,12 @@ impl RelayerStats { } } +/// Processed result of pushed Nakamoto blocks +pub struct AcceptedNakamotoBlocks { + pub relayers: Vec, + pub blocks: Vec, +} + impl Relayer { pub fn new( handle: NetworkHandle, @@ -644,8 +650,12 @@ impl Relayer { }; if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { - info!( - "Signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + warn!( + "Signature verification failure for Nakamoto block"; + "consensus_hash" => %nakamoto_block.header.consensus_hash, + "block_hash" => %nakamoto_block.header.block_hash(), + "reward_cycle" => sn_rc, + "error" => %e.to_string() ); return Err(net_error::InvalidMessage); } @@ -782,7 +792,7 @@ impl Relayer { sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, - block: NakamotoBlock, + block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, ) -> Result { debug!( @@ -796,7 +806,7 @@ impl Relayer { .nakamoto_blocks_db() .has_nakamoto_block(&block.header.block_id()) .map_err(|e| { - debug!( + warn!( "Failed to determine if we have Nakamoto block {}/{}: {:?}", &block.header.consensus_hash, &block.header.block_hash(), @@ -947,7 +957,7 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, - block.clone(), + &block, coord_comms, ) { warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); @@ -1528,16 +1538,17 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { - let mut new_blocks_and_relayers = vec![]; + ) -> Result<(Vec, Vec), net_error> { + let mut pushed_blocks = vec![]; let mut bad_neighbors = vec![]; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; // process Nakamoto blocks pushed to us. // If a neighbor sends us an invalid Nakamoto block, then ban them. - for (neighbor_key, relayers_and_block_data) in network_result.pushed_nakamoto_blocks.iter() + for (neighbor_key, relayers_and_block_data) in + network_result.pushed_nakamoto_blocks.iter_mut() { - for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter() { + for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter_mut() { let mut accepted_blocks = vec![]; if let Err(e) = Relayer::validate_nakamoto_blocks_push( burnchain, @@ -1556,7 +1567,7 @@ impl Relayer { break; } - for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + for nakamoto_block in nakamoto_blocks_data.blocks.drain(..) { let block_id = nakamoto_block.block_id(); debug!( "Received pushed Nakamoto block {} from {}", @@ -1568,7 +1579,7 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, - nakamoto_block.clone(), + &nakamoto_block, coord_comms, ) { Ok(accepted) => { @@ -1577,9 +1588,9 @@ impl Relayer { "Accepted Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, neighbor_key ); - accepted_blocks.push(nakamoto_block.clone()); + accepted_blocks.push(nakamoto_block); } else { - debug!( + warn!( "Rejected Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, ); @@ -1600,12 +1611,15 @@ impl Relayer { } if accepted_blocks.len() > 0 { - new_blocks_and_relayers.push((relayers.clone(), accepted_blocks)); + pushed_blocks.push(AcceptedNakamotoBlocks { + relayers: relayers.clone(), + blocks: accepted_blocks, + }); } } } - Ok((new_blocks_and_relayers, bad_neighbors)) + Ok((pushed_blocks, bad_neighbors)) } /// Verify that a relayed transaction is not problematic. This is a static check -- we only @@ -1908,7 +1922,7 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { + ) -> Result<(Vec, Vec), net_error> { // process downloaded Nakamoto blocks. // We treat them as singleton blocks fetched via zero relayers let nakamoto_blocks = @@ -1921,10 +1935,10 @@ impl Relayer { nakamoto_blocks.into_values(), coord_comms, ) { - Ok(accepted) => accepted - .into_iter() - .map(|block| (vec![], vec![block])) - .collect(), + Ok(accepted) => vec![AcceptedNakamotoBlocks { + relayers: vec![], + blocks: accepted, + }], Err(e) => { warn!("Failed to process downloaded Nakamoto blocks: {:?}", &e); vec![] @@ -2475,13 +2489,13 @@ impl Relayer { _local_peer: &LocalPeer, sortdb: &SortitionDB, chainstate: &StacksChainState, - nakamoto_blocks_and_relayers: Vec<(Vec, Vec)>, + accepted_blocks: Vec, force_send: bool, ) { debug!( "{:?}: relay {} sets of Nakamoto blocks", _local_peer, - nakamoto_blocks_and_relayers.len() + accepted_blocks.len() ); // the relay strategy is to only send blocks that are within @@ -2502,7 +2516,9 @@ impl Relayer { .map(|sn| sn.consensus_hash) .collect(); - for (relayers, blocks) in nakamoto_blocks_and_relayers.into_iter() { + for blocks_and_relayers in accepted_blocks.into_iter() { + let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; + let relay_blocks: Vec<_> = blocks .into_iter() .filter(|blk| { @@ -2561,10 +2577,10 @@ impl Relayer { /// Process epoch3 data /// Relay new nakamoto blocks if not in ibd - /// Returns number of new nakamoto blocks + /// Returns number of new nakamoto blocks, up to u64::MAX pub fn process_new_epoch3_blocks( &mut self, - _local_peer: &LocalPeer, + local_peer: &LocalPeer, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2572,43 +2588,38 @@ impl Relayer { ibd: bool, coord_comms: Option<&CoordinatorChannels>, ) -> u64 { - let mut num_new_nakamoto_blocks = 0; - match Self::process_new_nakamoto_blocks( + let (accepted_blocks, bad_neighbors) = match Self::process_new_nakamoto_blocks( network_result, burnchain, sortdb, chainstate, coord_comms, ) { - Ok((nakamoto_blocks_and_relayers, bad_neighbors)) => { - num_new_nakamoto_blocks = nakamoto_blocks_and_relayers - .iter() - .fold(0, |acc, (_relayers, blocks)| acc + blocks.len()) - as u64; - - // punish bad peers - if bad_neighbors.len() > 0 { - debug!("{:?}: Ban {} peers", &_local_peer, bad_neighbors.len()); - if let Err(e) = self.p2p.ban_peers(bad_neighbors) { - warn!("Failed to ban bad-block peers: {:?}", &e); - } - } - - // relay if not IBD - if !ibd && nakamoto_blocks_and_relayers.len() > 0 { - self.relay_epoch3_blocks( - _local_peer, - sortdb, - chainstate, - nakamoto_blocks_and_relayers, - false, - ); - } - } + Ok(x) => x, Err(e) => { warn!("Failed to process new Nakamoto blocks: {:?}", &e); + return 0; + } + }; + + let num_new_nakamoto_blocks = accepted_blocks + .iter() + .fold(0, |acc, accepted| acc + accepted.blocks.len()) + .try_into() + .unwrap_or(u64::MAX); // don't panic if we somehow receive more than u64::MAX blocks + + // punish bad peers + if bad_neighbors.len() > 0 { + debug!("{:?}: Ban {} peers", &local_peer, bad_neighbors.len()); + if let Err(e) = self.p2p.ban_peers(bad_neighbors) { + warn!("Failed to ban bad-block peers: {:?}", &e); } } + + // relay if not IBD + if !ibd && accepted_blocks.len() > 0 { + self.relay_epoch3_blocks(local_peer, sortdb, chainstate, accepted_blocks, false); + } num_new_nakamoto_blocks } @@ -2624,40 +2635,42 @@ impl Relayer { ibd: bool, event_observer: Option<&dyn RelayEventDispatcher>, ) -> Vec { - // process new transactions + if ibd { + // don't do anything + return vec![]; + } + + // only care about transaction forwarding if not IBD. + // store all transactions, and forward the novel ones to neighbors let mut mempool_txs_added = vec![]; - if !ibd { - // only care about transaction forwarding if not IBD. - // store all transactions, and forward the novel ones to neighbors - test_debug!( - "{:?}: Process {} transaction(s)", + test_debug!( + "{:?}: Process {} transaction(s)", + &_local_peer, + network_result.pushed_transactions.len() + ); + let new_txs = Relayer::process_transactions( + network_result, + sortdb, + chainstate, + mempool, + event_observer.map(|obs| obs.as_mempool_event_dispatcher()), + ) + .unwrap_or(vec![]); + + if new_txs.len() > 0 { + debug!( + "{:?}: Send {} transactions to neighbors", &_local_peer, - network_result.pushed_transactions.len() + new_txs.len() ); - let new_txs = Relayer::process_transactions( - network_result, - sortdb, - chainstate, - mempool, - event_observer.map(|obs| obs.as_mempool_event_dispatcher()), - ) - .unwrap_or(vec![]); - - if new_txs.len() > 0 { - debug!( - "{:?}: Send {} transactions to neighbors", - &_local_peer, - new_txs.len() - ); - } + } - for (relayers, tx) in new_txs.into_iter() { - debug!("{:?}: Broadcast tx {}", &_local_peer, &tx.txid()); - mempool_txs_added.push(tx.clone()); - let msg = StacksMessageType::Transaction(tx); - if let Err(e) = self.p2p.broadcast_message(relayers, msg) { - warn!("Failed to broadcast transaction: {:?}", &e); - } + for (relayers, tx) in new_txs.into_iter() { + debug!("{:?}: Broadcast tx {}", &_local_peer, &tx.txid()); + mempool_txs_added.push(tx.clone()); + let msg = StacksMessageType::Transaction(tx); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast transaction: {:?}", &e); } } mempool_txs_added @@ -2675,7 +2688,7 @@ impl Relayer { /// turned into peer bans. pub fn process_network_result( &mut self, - _local_peer: &LocalPeer, + local_peer: &LocalPeer, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2688,7 +2701,7 @@ impl Relayer { // process epoch2 data let (num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks) = self .process_new_epoch2_blocks( - _local_peer, + local_peer, network_result, sortdb, chainstate, @@ -2698,7 +2711,7 @@ impl Relayer { // process epoch3 data let num_new_nakamoto_blocks = self.process_new_epoch3_blocks( - _local_peer, + local_peer, network_result, burnchain, sortdb, @@ -2709,7 +2722,7 @@ impl Relayer { // process transactions let mempool_txs_added = self.process_new_transactions( - _local_peer, + local_peer, network_result, sortdb, chainstate, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 1cb28e76fb..bd2c674bf1 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -235,7 +235,7 @@ impl NakamotoBootPlan { &sortdb, &mut sort_handle, &mut node.chainstate, - block.clone(), + &block, None, ) .unwrap(); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 1a383f7f87..fe69b6895a 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs index 04e8e0fd4f..c408e9ee60 100644 --- a/stackslib/src/net/tests/relay/mod.rs +++ b/stackslib/src/net/tests/relay/mod.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 2d286e157f..dc0c144e5c 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -29,7 +28,7 @@ use rand::Rng; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; use stacks_common::types::Address; -use stacks_common::util::hash::MerkleTree; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::VRFProof; @@ -39,12 +38,15 @@ use crate::burnchains::tests::TestMiner; use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoBlockHeader; use crate::chainstate::stacks::boot::test::{ key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, }; use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; -use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::test::{ + codec_all_transactions, make_codec_test_block, make_codec_test_microblock, +}; use crate::chainstate::stacks::tests::{ make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, make_user_stacks_transfer, TestStacksNode, @@ -60,7 +62,7 @@ use crate::net::download::*; use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; use crate::net::httpcore::StacksHttpMessage; use crate::net::inv::inv2x::*; -use crate::net::relay::{ProcessedNetReceipts, Relayer}; +use crate::net::relay::{AcceptedNakamotoBlocks, ProcessedNetReceipts, Relayer}; use crate::net::test::*; use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; @@ -301,7 +303,10 @@ impl SeedNode { &local_peer, &sortdb, &stacks_node.chainstate, - vec![(vec![], blocks.clone())], + vec![AcceptedNakamotoBlocks { + relayers: vec![], + blocks: blocks.clone(), + }], true, ); @@ -374,6 +379,139 @@ impl SeedNode { } } +/// Test buffering limits +#[test] +fn test_buffer_data_message() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let (mut peer, _followers) = + make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 5, bitvecs.clone(), 1); + + let nakamoto_block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: ConsensusHash([0x55; 20]), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + signer_bitvec: BitVec::zeros(1).unwrap(), + }, + txs: vec![], + }; + + let blocks_available = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::BlocksAvailable(BlocksAvailableData { + available: vec![ + (ConsensusHash([0x11; 20]), BurnchainHeaderHash([0x22; 32])), + (ConsensusHash([0x33; 20]), BurnchainHeaderHash([0x44; 32])), + ], + }), + ); + + let microblocks_available = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::MicroblocksAvailable(BlocksAvailableData { + available: vec![ + (ConsensusHash([0x11; 20]), BurnchainHeaderHash([0x22; 32])), + (ConsensusHash([0x33; 20]), BurnchainHeaderHash([0x44; 32])), + ], + }), + ); + + let block = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x11; 20]), + make_codec_test_block(10, StacksEpochId::Epoch25), + )], + }), + ); + let microblocks = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockId([0x55; 32]), + microblocks: vec![make_codec_test_microblock(10)], + }), + ); + let nakamoto_block = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![nakamoto_block], + }), + ); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks_available { + assert!(peer + .network + .buffer_data_message(0, blocks_available.clone())); + } + assert!(!peer + .network + .buffer_data_message(0, blocks_available.clone())); + + for _ in 0..peer + .network + .connection_opts + .max_buffered_microblocks_available + { + assert!(peer + .network + .buffer_data_message(0, microblocks_available.clone())); + } + assert!(!peer + .network + .buffer_data_message(0, microblocks_available.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks { + assert!(peer.network.buffer_data_message(0, block.clone())); + } + assert!(!peer.network.buffer_data_message(0, block.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_microblocks { + assert!(peer.network.buffer_data_message(0, microblocks.clone())); + } + assert!(!peer.network.buffer_data_message(0, microblocks.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_nakamoto_blocks { + assert!(peer.network.buffer_data_message(0, nakamoto_block.clone())); + } + assert!(!peer.network.buffer_data_message(0, nakamoto_block.clone())); +} + /// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead /// forwarded to the relayer for processing. #[test] diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 8bed8e5312..88f6b5efc3 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -160,18 +160,15 @@ impl PeerNetwork { Ok(Some(block_sortition_height)) } - /// Buffer a message for re-processing once the burnchain view updates. - /// If there is no space for the message, then silently drop it. - fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) { - let Some(msgs) = self.pending_messages.get_mut(&event_id) else { - self.pending_messages.insert(event_id, vec![msg]); - debug!( - "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id - ); - return; - }; - + /// Determine whether or not the system can buffer up this message, based on site-local + /// configuration options. + /// Return true if so, false if not + pub(crate) fn can_buffer_data_message( + &self, + event_id: usize, + msgs: &[StacksMessage], + msg: &StacksMessage, + ) -> bool { // check limits against connection opts, and if the limit is not met, then buffer up the // message. let mut blocks_available = 0; @@ -183,67 +180,103 @@ impl PeerNetwork { match &stored_msg.payload { StacksMessageType::BlocksAvailable(_) => { blocks_available += 1; - if blocks_available >= self.connection_opts.max_buffered_blocks_available { + if matches!(&msg.payload, StacksMessageType::BlocksAvailable(..)) + && blocks_available >= self.connection_opts.max_buffered_blocks_available + { debug!( - "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", + "{:?}: Cannot buffer BlocksAvailable from event {} -- already have {} buffered", &self.local_peer, event_id, blocks_available ); - return; + return false; } } StacksMessageType::MicroblocksAvailable(_) => { microblocks_available += 1; - if microblocks_available - >= self.connection_opts.max_buffered_microblocks_available + if matches!(&msg.payload, StacksMessageType::MicroblocksAvailable(..)) + && microblocks_available + >= self.connection_opts.max_buffered_microblocks_available { debug!( - "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", + "{:?}: Cannot buffer MicroblocksAvailable from event {} -- already have {} buffered", &self.local_peer, event_id, microblocks_available ); - return; + return false; } } StacksMessageType::Blocks(_) => { blocks_data += 1; - if blocks_data >= self.connection_opts.max_buffered_blocks { + if matches!(&msg.payload, StacksMessageType::Blocks(..)) + && blocks_data >= self.connection_opts.max_buffered_blocks + { debug!( - "{:?}: Drop BlocksData from event {} -- already have {} buffered", + "{:?}: Cannot buffer BlocksData from event {} -- already have {} buffered", &self.local_peer, event_id, blocks_data ); - return; + return false; } } StacksMessageType::Microblocks(_) => { microblocks_data += 1; - if microblocks_data >= self.connection_opts.max_buffered_microblocks { + if matches!(&msg.payload, StacksMessageType::Microblocks(..)) + && microblocks_data >= self.connection_opts.max_buffered_microblocks + { debug!( - "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", + "{:?}: Cannot buffer MicroblocksData from event {} -- already have {} buffered", &self.local_peer, event_id, microblocks_data ); - return; + return false; } } StacksMessageType::NakamotoBlocks(_) => { nakamoto_blocks_data += 1; - if nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks { + if matches!(&msg.payload, StacksMessageType::NakamotoBlocks(..)) + && nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks + { debug!( - "{:?}: Drop NakamotoBlocksData from event {} -- already have {} buffered", + "{:?}: Cannot buffer NakamotoBlocksData from event {} -- already have {} buffered", &self.local_peer, event_id, nakamoto_blocks_data ); - return; + return false; } } _ => {} } } - msgs.push(msg); - debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, - event_id, - msgs.len() - ); + true + } + + /// Buffer a message for re-processing once the burnchain view updates. + /// If there is no space for the message, then silently drop it. + /// Returns true if buffered. + /// Returns false if not. + pub(crate) fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> bool { + let Some(msgs) = self.pending_messages.get(&event_id) else { + self.pending_messages.insert(event_id, vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.local_peer, event_id + ); + return true; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, msgs, &msg) { + return false; + } + + if let Some(msgs) = self.pending_messages.get_mut(&event_id) { + // should always be reachable + msgs.push(msg); + debug!( + "{:?}: Event {} has {} messages buffered", + &self.local_peer, + event_id, + msgs.len() + ); + } + true } /// Do we need a block or microblock stream, given its sortition's consensus hash? @@ -712,12 +745,11 @@ impl PeerNetwork { sortdb: &SortitionDB, nakamoto_block: &NakamotoBlock, ) -> (Option, bool) { - let mut can_process = true; - let sn = match SortitionDB::get_block_snapshot_consensus( + let (reward_set_sn, can_process) = match SortitionDB::get_block_snapshot_consensus( &sortdb.conn(), &nakamoto_block.header.consensus_hash, ) { - Ok(Some(sn)) => sn, + Ok(Some(sn)) => (sn, true), Ok(None) => { debug!( "No sortition {} for block {}", @@ -726,9 +758,8 @@ impl PeerNetwork { ); // we don't have the sortition for this, so we can't process it yet (i.e. we need // to buffer) - can_process = false; // load the tip so we can load the current reward set data - self.burnchain_tip.clone() + (self.burnchain_tip.clone(), false) } Err(e) => { info!( @@ -741,7 +772,7 @@ impl PeerNetwork { } }; - if !sn.pox_valid { + if !reward_set_sn.pox_valid { info!( "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", self.get_local_peer(), @@ -750,12 +781,12 @@ impl PeerNetwork { return (None, false); } - let sn_rc = self + let reward_set_sn_rc = self .burnchain - .pox_reward_cycle(sn.block_height) + .pox_reward_cycle(reward_set_sn.block_height) .expect("FATAL: sortition has no reward cycle"); - return (Some(sn_rc), can_process); + return (Some(reward_set_sn_rc), can_process); } /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially @@ -1007,7 +1038,6 @@ impl PeerNetwork { continue; } }; - let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { convo.to_neighbor_key() } else { @@ -1023,6 +1053,17 @@ impl PeerNetwork { debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); for message in messages.into_iter() { + if buffer + && !self.can_buffer_data_message( + event_id, + self.pending_messages.get(&event_id).unwrap_or(&vec![]), + &message, + ) + { + // asked to buffer, but we don't have space + continue; + } + if !buffer { debug!( "{:?}: Re-try handling buffered message {} from {:?}", From 93b53dc3949d240304abd348ff53e7bf66805364 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 22:08:15 -0400 Subject: [PATCH 19/20] fix: build error --- testnet/stacks-node/src/nakamoto_node/miner.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 9d20faaead..ffffb60f77 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -308,19 +308,6 @@ impl BlockMinerThread { )) })?; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let reward_info = match load_nakamoto_reward_set( self.burnchain .pox_reward_cycle(tip.block_height.saturating_add(1)) @@ -633,7 +620,7 @@ impl BlockMinerThread { let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, - block, + &block, &mut sortition_handle, &staging_tx, headers_conn, From 04a9f8d9509a43222f3f162a01f2603c5247d63f Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 12 Jun 2024 18:42:16 +0300 Subject: [PATCH 20/20] skip timeout network mutants stackslib --- stackslib/src/net/codec.rs | 1 + stackslib/src/net/p2p.rs | 1 + stackslib/src/net/relay.rs | 8 ++++++++ stackslib/src/net/unsolicited.rs | 5 +++++ 4 files changed, 15 insertions(+) diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index c115a50d82..bd8154e414 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -355,6 +355,7 @@ impl NakamotoInvData { } impl StacksMessageCodec for NakamotoBlocksData { + #[cfg_attr(test, mutants::skip)] fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.blocks)?; Ok(()) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d5988a27ac..9aaffcb8de 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1348,6 +1348,7 @@ impl PeerNetwork { Ok(ret) } + #[cfg_attr(test, mutants::skip)] /// Dispatch a single request from another thread. pub fn dispatch_request(&mut self, request: NetworkRequest) -> Result<(), net_error> { match request { diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 5183d8c794..6b34914bbb 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -937,6 +937,7 @@ impl Relayer { Ok(accepted) } + #[cfg_attr(test, mutants::skip)] /// Process nakamoto blocks that we downloaded. /// Log errors but do not return them. /// Returns the list of blocks we accepted. @@ -1529,6 +1530,7 @@ impl Relayer { Ok((mblock_datas, bad_neighbors)) } + #[cfg_attr(test, mutants::skip)] /// Preprocess all pushed Nakamoto blocks /// Return the Nakamoto blocks we can accept (and who relayed them), as well as the /// list of peers that served us invalid data. @@ -1913,6 +1915,7 @@ impl Relayer { )) } + #[cfg_attr(test, mutants::skip)] /// Process new Nakamoto blocks, both pushed and downloaded. /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that /// sent us invalid blocks. @@ -2401,6 +2404,7 @@ impl Relayer { } } + #[cfg_attr(test, mutants::skip)] /// Process epoch2 block data. /// Relays blocks and microblocks as needed /// Returns (num new blocks, num new confirmed microblocks, num new unconfirmed microblocks) @@ -2460,6 +2464,7 @@ impl Relayer { ) } + #[cfg_attr(test, mutants::skip)] /// Get the last N sortitions, in order from the sortition tip to the n-1st ancestor pub fn get_last_n_sortitions( sortdb: &SortitionDB, @@ -2481,6 +2486,7 @@ impl Relayer { Ok(ret) } + #[cfg_attr(test, mutants::skip)] /// Relay Nakamoto blocks. /// By default, only sends them if we don't have them yet. /// This can be overridden by setting `force_send` to true. @@ -2575,6 +2581,7 @@ impl Relayer { } } + #[cfg_attr(test, mutants::skip)] /// Process epoch3 data /// Relay new nakamoto blocks if not in ibd /// Returns number of new nakamoto blocks, up to u64::MAX @@ -2623,6 +2630,7 @@ impl Relayer { num_new_nakamoto_blocks } + #[cfg_attr(test, mutants::skip)] /// Process new transactions /// Returns the list of accepted txs pub fn process_new_transactions( diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 88f6b5efc3..e444a4f633 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -50,6 +50,7 @@ use crate::net::{ /// synchronization state machine. impl PeerNetwork { + #[cfg_attr(test, mutants::skip)] /// Check that the sender is authenticated. /// Returns Some(remote sender address) if so /// Returns None otherwise @@ -160,6 +161,7 @@ impl PeerNetwork { Ok(Some(block_sortition_height)) } + #[cfg_attr(test, mutants::skip)] /// Determine whether or not the system can buffer up this message, based on site-local /// configuration options. /// Return true if so, false if not @@ -246,6 +248,7 @@ impl PeerNetwork { true } + #[cfg_attr(test, mutants::skip)] /// Buffer a message for re-processing once the burnchain view updates. /// If there is no space for the message, then silently drop it. /// Returns true if buffered. @@ -736,6 +739,7 @@ impl PeerNetwork { true } + #[cfg_attr(test, mutants::skip)] /// Find the reward cycle in which to validate the signature for this block. /// This may not actually correspond to the sortition for this block's tenure -- for example, /// it may be for a block whose sortition is about to be processed. As such, return both the @@ -824,6 +828,7 @@ impl PeerNetwork { !can_process } + #[cfg_attr(test, mutants::skip)] /// Handle an unsolicited NakamotoBlocksData message. /// /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place.