Skip to content
This repository has been archived by the owner on Aug 22, 2024. It is now read-only.

Commit

Permalink
Merge pull request stacks-network#4359 from jbencin/test/replay-block
Browse files Browse the repository at this point in the history
test: Add `replay-block` command
  • Loading branch information
jbencin authored Feb 12, 2024
2 parents 606838d + 895bd86 commit 7e51465
Show file tree
Hide file tree
Showing 3 changed files with 271 additions and 5 deletions.
2 changes: 2 additions & 0 deletions stacks-common/src/util/log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,8 @@ fn inner_get_loglevel() -> slog::Level {
|| env::var("BLOCKSTACK_DEBUG") == Ok("1".into())
{
slog::Level::Debug
} else if env::var("STACKS_LOG_CRITONLY") == Ok("1".into()) {
slog::Level::Critical
} else {
slog::Level::Info
}
Expand Down
32 changes: 27 additions & 5 deletions stackslib/src/chainstate/stacks/db/blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5266,7 +5266,7 @@ impl StacksChainState {
/// necessary so that the Headers database and Clarity database's
/// transactions can commit very close to one another, after the
/// event observer has emitted.
fn append_block<'a>(
pub fn append_block<'a>(
chainstate_tx: &mut ChainstateTx,
clarity_instance: &'a mut ClarityInstance,
burn_dbconn: &mut SortitionHandleTx,
Expand All @@ -5283,6 +5283,7 @@ impl StacksChainState {
burnchain_sortition_burn: u64,
user_burns: &[StagingUserBurnSupport],
affirmation_weight: u64,
do_not_advance: bool,
) -> Result<(StacksEpochReceipt, PreCommitClarityBlock<'a>), Error> {
debug!(
"Process block {:?} with {} transactions",
Expand Down Expand Up @@ -5654,10 +5655,30 @@ impl StacksChainState {
.as_ref()
.map(|(_, _, _, info)| info.clone());

if do_not_advance {
let epoch_receipt = StacksEpochReceipt {
header: StacksHeaderInfo::regtest_genesis(),
tx_receipts,
matured_rewards,
matured_rewards_info,
parent_microblocks_cost: microblock_execution_cost,
anchored_block_cost: block_execution_cost,
parent_burn_block_hash,
parent_burn_block_height,
parent_burn_block_timestamp,
evaluated_epoch,
epoch_transition: applied_epoch_transition,
signers_updated: false,
};

return Ok((epoch_receipt, clarity_commit));
}

let parent_block_header = parent_chain_tip
.anchored_header
.as_stacks_epoch2()
.ok_or_else(|| Error::InvalidChildOfNakomotoBlock)?;

let new_tip = StacksChainState::advance_tip(
&mut chainstate_tx.tx,
parent_block_header,
Expand Down Expand Up @@ -5722,7 +5743,7 @@ impl StacksChainState {
/// Verify that a Stacks anchored block attaches to its parent anchored block.
/// * checks .header.total_work.work
/// * checks .header.parent_block
fn check_block_attachment(
pub fn check_block_attachment(
parent_block_header: &StacksBlockHeader,
block_header: &StacksBlockHeader,
) -> bool {
Expand All @@ -5749,7 +5770,7 @@ impl StacksChainState {
/// The header info will be pulled from the headers DB, so this method only succeeds if the
/// parent block has been processed.
/// If it's not known, return None.
fn get_parent_header_info(
pub fn get_parent_header_info(
chainstate_tx: &mut ChainstateTx,
next_staging_block: &StagingBlock,
) -> Result<Option<StacksHeaderInfo>, Error> {
Expand Down Expand Up @@ -5791,7 +5812,7 @@ impl StacksChainState {
}

/// Extract and parse the block from a loaded staging block, and verify its integrity.
fn extract_stacks_block(next_staging_block: &StagingBlock) -> Result<StacksBlock, Error> {
pub fn extract_stacks_block(next_staging_block: &StagingBlock) -> Result<StacksBlock, Error> {
let block = {
StacksBlock::consensus_deserialize(&mut &next_staging_block.block_data[..])
.map_err(Error::CodecError)?
Expand All @@ -5813,7 +5834,7 @@ impl StacksChainState {
/// header info), determine which branch connects to the given block. If there are multiple
/// branches, punish the parent. Return the portion of the branch that actually connects to
/// the given block.
fn extract_connecting_microblocks(
pub fn extract_connecting_microblocks(
parent_block_header_info: &StacksHeaderInfo,
next_staging_block: &StagingBlock,
block: &StacksBlock,
Expand Down Expand Up @@ -6065,6 +6086,7 @@ impl StacksChainState {
next_staging_block.sortition_burn,
&user_supports,
block_am.weight(),
false,
) {
Ok(next_chain_tip_info) => next_chain_tip_info,
Err(e) => {
Expand Down
242 changes: 242 additions & 0 deletions stackslib/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -860,6 +860,73 @@ simulating a miner.
return;
}

if argv[1] == "replay-block" {
let print_help_and_exit = || -> ! {
let n = &argv[0];
eprintln!("Usage:");
eprintln!(" {n} <chainstate_path>");
eprintln!(" {n} <chainstate_path> prefix <index-block-hash-prefix>");
eprintln!(" {n} <chainstate_path> range <start_block> <end_block>");
eprintln!(" {n} <chainstate_path> <first|last> <block_count>");
process::exit(1);
};
if argv.len() < 2 {
print_help_and_exit();
}
let stacks_path = &argv[2];
let mode = argv.get(3).map(String::as_str);
let staging_blocks_db_path = format!("{stacks_path}/mainnet/chainstate/vm/index.sqlite");
let conn =
Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)
.unwrap();

let query = match mode {
Some("prefix") => format!(
"SELECT index_block_hash FROM staging_blocks WHERE index_block_hash LIKE \"{}%\"",
argv[4]
),
Some("first") => format!(
"SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {}",
argv[4]
),
Some("range") => {
let arg4 = argv[4]
.parse::<u64>()
.expect("<start_block> not a valid u64");
let arg5 = argv[5].parse::<u64>().expect("<end_block> not a valid u64");
let start = arg4.saturating_sub(1);
let blocks = arg5.saturating_sub(arg4);
format!("SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {start}, {blocks}")
}
Some("last") => format!(
"SELECT index_block_hash FROM staging_blocks ORDER BY height DESC LIMIT {}",
argv[4]
),
Some(_) => print_help_and_exit(),
// Default to ALL blocks
None => "SELECT index_block_hash FROM staging_blocks".into(),
};

let mut stmt = conn.prepare(&query).unwrap();
let mut hashes_set = stmt.query(rusqlite::NO_PARAMS).unwrap();

let mut index_block_hashes: Vec<String> = vec![];
while let Ok(Some(row)) = hashes_set.next() {
index_block_hashes.push(row.get(0).unwrap());
}

let total = index_block_hashes.len();
println!("Will check {total} blocks");
for (i, index_block_hash) in index_block_hashes.iter().enumerate() {
if i % 100 == 0 {
println!("Checked {i}...");
}
replay_block(stacks_path, index_block_hash);
}
println!("Finished!");
process::exit(0);
}

if argv[1] == "deserialize-db" {
if argv.len() < 4 {
eprintln!("Usage: {} clarity_sqlite_db [byte-prefix]", &argv[0]);
Expand Down Expand Up @@ -1482,3 +1549,178 @@ simulating a miner.

process::exit(0);
}

fn replay_block(stacks_path: &str, index_block_hash_hex: &str) {
let index_block_hash = StacksBlockId::from_hex(index_block_hash_hex).unwrap();
let chain_state_path = format!("{stacks_path}/mainnet/chainstate/");
let sort_db_path = format!("{stacks_path}/mainnet/burnchain/sortition");
let burn_db_path = format!("{stacks_path}/mainnet/burnchain/burnchain.sqlite");
let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap();

let (mut chainstate, _) =
StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap();

let mut sortdb = SortitionDB::connect(
&sort_db_path,
BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT,
&BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(),
BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(),
STACKS_EPOCHS_MAINNET.as_ref(),
PoxConstants::mainnet_default(),
true,
)
.unwrap();
let mut sort_tx = sortdb.tx_begin_at_tip();

let blocks_path = chainstate.blocks_path.clone();
let (mut chainstate_tx, clarity_instance) = chainstate
.chainstate_tx_begin()
.expect("Failed to start chainstate tx");
let mut next_staging_block =
StacksChainState::load_staging_block_info(&chainstate_tx.tx, &index_block_hash)
.expect("Failed to load staging block data")
.expect("No such index block hash in block database");

next_staging_block.block_data = StacksChainState::load_block_bytes(
&blocks_path,
&next_staging_block.consensus_hash,
&next_staging_block.anchored_block_hash,
)
.unwrap()
.unwrap_or_default();

let Some(next_microblocks) =
StacksChainState::find_parent_microblock_stream(&chainstate_tx.tx, &next_staging_block)
.unwrap()
else {
println!("No microblock stream found for {index_block_hash_hex}");
return;
};

let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) =
match SortitionDB::get_block_snapshot_consensus(
&sort_tx,
&next_staging_block.consensus_hash,
)
.unwrap()
{
Some(sn) => (
sn.burn_header_hash,
sn.block_height as u32,
sn.burn_header_timestamp,
sn.winning_block_txid,
),
None => {
// shouldn't happen
panic!(
"CORRUPTION: staging block {}/{} does not correspond to a burn block",
&next_staging_block.consensus_hash, &next_staging_block.anchored_block_hash
);
}
};

info!(
"Process block {}/{} = {} in burn block {}, parent microblock {}",
next_staging_block.consensus_hash,
next_staging_block.anchored_block_hash,
&index_block_hash,
&burn_header_hash,
&next_staging_block.parent_microblock_hash,
);

let Some(parent_header_info) =
StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap()
else {
println!("Failed to load parent head info for block: {index_block_hash_hex}");
return;
};

let block = StacksChainState::extract_stacks_block(&next_staging_block).unwrap();
let block_size = next_staging_block.block_data.len() as u64;

let parent_block_header = match &parent_header_info.anchored_header {
StacksBlockHeaderTypes::Epoch2(bh) => bh,
StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"),
};

if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) {
let msg = format!(
"Invalid stacks block {}/{} -- does not attach to parent {}/{}",
&next_staging_block.consensus_hash,
block.block_hash(),
parent_block_header.block_hash(),
&parent_header_info.consensus_hash
);
println!("{msg}");
return;
}

// validation check -- validate parent microblocks and find the ones that connect the
// block's parent to this block.
let next_microblocks = StacksChainState::extract_connecting_microblocks(
&parent_header_info,
&next_staging_block,
&block,
next_microblocks,
)
.unwrap();
let (last_microblock_hash, last_microblock_seq) = match next_microblocks.len() {
0 => (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0),
_ => {
let l = next_microblocks.len();
(
next_microblocks[l - 1].block_hash(),
next_microblocks[l - 1].header.sequence,
)
}
};
assert_eq!(
next_staging_block.parent_microblock_hash,
last_microblock_hash
);
assert_eq!(
next_staging_block.parent_microblock_seq,
last_microblock_seq
);

// user supports were never activated
let user_supports = vec![];

let block_am = StacksChainState::find_stacks_tip_affirmation_map(
&burnchain_blocks_db,
sort_tx.tx(),
&next_staging_block.consensus_hash,
&next_staging_block.anchored_block_hash,
)
.unwrap();

let pox_constants = sort_tx.context.pox_constants.clone();

match StacksChainState::append_block(
&mut chainstate_tx,
clarity_instance,
&mut sort_tx,
&pox_constants,
&parent_header_info,
&next_staging_block.consensus_hash,
&burn_header_hash,
burn_header_height,
burn_header_timestamp,
&block,
block_size,
&next_microblocks,
next_staging_block.commit_burn,
next_staging_block.sortition_burn,
&user_supports,
block_am.weight(),
true,
) {
Ok((_receipt, _)) => {
info!("Block processed successfully! block = {index_block_hash}");
}
Err(e) => {
println!("Failed processing block! block = {index_block_hash}, error = {e:?}");
process::exit(1);
}
};
}

0 comments on commit 7e51465

Please sign in to comment.