From ea56f4a853f54ed26c15ceca5025b50b96008813 Mon Sep 17 00:00:00 2001 From: Jack Huang Date: Tue, 8 Oct 2024 21:51:39 +0800 Subject: [PATCH] Pruning logic (#4194) * add parallel code * parallel execution * create chain when the parents are ready in parallel execution * set executing state in sender * add 10000 buffer for parallel execution * add log for saving time * add some test * add false testing case * add more testing code * add check data * add verify blue block in verifier * add some code * add verification * fix some bugs * add yeilding after execution for processing the main chain in other service * fmt and clippy * rebase master set the number 4500000 for vega updating * 3300000 will be version 1 in vega * add pruning logic and compatible logic fix some test cases * fix clippy * fix test case * remove some single chain test case * fix clippy * fix flexdag's test case * fix clippy * add rational random to pass the deserialization verification * merge dag-master * fix bugs: blockmeta changes into latest version * add update db code * add dag db update * update dag db * update dag db * uncomment the pruning code * rebase sync parallel3 * fix compiling * db update for dag state * fix connection * add verify pruning * add pruning height * no checking the pruning point if the main header still dose not have the pruning point * add is ancestor of command for reachability viewing * add command file * use 850000 * add rpc json new command * add some log for debug * if this is the first pruning point, the previous one will be genesis * save the dag state using the pruning point as the key and if it is 0, use genesis id * merge dag master * get the tips by genesis id if the pruning point is 0 * fix test case as prievious version did * use genesis id to get the ghost data * add genesis in registry in test_miner_service * use 1000 as parallel buffer * remove some logs add test case for pruning * add pruning arguments in pruning methods to custom the network config * add test case for pruning calculation and pruning * add write lock when saving the tips * add test pruning for chain * add test case * if no fork, execute at once * add test code * add some rpc param in yaml's cmd * use macro to define the parallel count * use 4090000 as pruning beginning * fix the code for robot's comments --- chain/api/src/chain.rs | 4 +- chain/api/src/message.rs | 7 +- chain/api/src/service.rs | 24 +- chain/mock/src/mock_chain.rs | 111 +++++- chain/service/src/chain_service.rs | 6 + chain/src/chain.rs | 161 +++++++-- chain/src/verifier/mod.rs | 44 ++- chain/tests/test_prune.rs | 115 +++++++ cmd/db-exporter/src/force_deploy_output.rs | 5 +- cmd/db-exporter/src/main.rs | 35 +- cmd/generator/src/lib.rs | 4 +- cmd/starcoin/src/chain/is_ancestor_of_cmd.rs | 46 +++ cmd/starcoin/src/chain/mod.rs | 2 + cmd/starcoin/src/lib.rs | 3 +- config/src/genesis_config.rs | 38 +-- flexidag/src/blockdag.rs | 319 ++++++++++-------- flexidag/src/consensusdb/consenses_state.rs | 18 +- flexidag/src/ghostdag/protocol.rs | 3 +- flexidag/src/prune/pruning_point_manager.rs | 55 +-- flexidag/tests/tests.rs | 114 ++++++- genesis/src/lib.rs | 15 + kube/manifest/starcoin-halley.yaml | 16 +- kube/manifest/starcoin-proxima.yaml | 16 +- miner/src/create_block_template/mod.rs | 3 +- miner/tests/miner_test.rs | 1 + network/src/network_p2p_handle.rs | 2 +- node/src/node.rs | 7 +- rpc/api/generated_rpc_schema/chain.json | 51 +++ rpc/api/src/chain/mod.rs | 8 + rpc/client/src/lib.rs | 11 +- rpc/server/src/module/chain_rpc.rs | 12 + .../block_connector_service.rs | 63 +++- sync/src/block_connector/mod.rs | 6 +- .../test_write_dag_block_chain.rs | 16 +- sync/src/block_connector/write_block_chain.rs | 22 +- sync/src/parallel/executor.rs | 6 + sync/src/parallel/sender.rs | 19 +- sync/src/store/sync_dag_store.rs | 1 - sync/src/tasks/block_sync_task.rs | 17 +- sync/src/tasks/test_tools.rs | 3 +- sync/src/tasks/tests_dag.rs | 1 - .../src/fork_chain.rs | 8 + 42 files changed, 1082 insertions(+), 336 deletions(-) create mode 100644 chain/tests/test_prune.rs create mode 100644 cmd/starcoin/src/chain/is_ancestor_of_cmd.rs diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index 2d801f1d32..53bcdd2fb8 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -106,7 +106,7 @@ pub trait ChainReader { access_path: Option, ) -> Result>; - fn current_tips_hash(&self) -> Result>; + fn current_tips_hash(&self, pruning_point: HashValue) -> Result>; fn has_dag_block(&self, header_id: HashValue) -> Result; fn check_chain_type(&self) -> Result; fn verify_and_ghostdata( @@ -114,6 +114,8 @@ pub trait ChainReader { uncles: &[BlockHeader], header: &BlockHeader, ) -> Result; + fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result; + fn get_pruning_height(&self) -> BlockNumber; } pub trait ChainWriter { diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index 97e5a8d60b..3e28820552 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -4,7 +4,7 @@ use crate::{ChainType, TransactionInfoWithProof}; use anyhow::Result; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::consenses_state::DagStateView; +use starcoin_dag::consensusdb::consenses_state::{DagStateView, ReachabilityView}; use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_service_registry::ServiceRequest; use starcoin_types::transaction::RichTransactionInfo; @@ -68,6 +68,10 @@ pub enum ChainRequest { GetDagStateView, CheckChainType, GetGhostdagData(HashValue), + IsAncestorOfCommand { + ancestor: HashValue, + descendants: Vec, + }, } impl ServiceRequest for ChainRequest { @@ -99,4 +103,5 @@ pub enum ChainResponse { DagStateView(Box), CheckChainType(ChainType), GhostdagDataOption(Box>), + IsAncestorOfCommand { reachability_view: ReachabilityView }, } diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 4017174a14..4c78839d4a 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -5,7 +5,7 @@ use crate::message::{ChainRequest, ChainResponse}; use crate::{ChainType, TransactionInfoWithProof}; use anyhow::{bail, Result}; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::consenses_state::DagStateView; +use starcoin_dag::consensusdb::consenses_state::{DagStateView, ReachabilityView}; use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_service_registry::{ActorService, ServiceHandler, ServiceRef}; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; @@ -149,6 +149,11 @@ pub trait ChainAsyncService: async fn get_dag_state(&self) -> Result; async fn check_chain_type(&self) -> Result; async fn get_ghostdagdata(&self, id: HashValue) -> Result>; + async fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> Result; } #[async_trait::async_trait] @@ -486,4 +491,21 @@ where bail!("failed to get ghostdag data") } } + async fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> Result { + let response = self + .send(ChainRequest::IsAncestorOfCommand { + ancestor, + descendants, + }) + .await??; + if let ChainResponse::IsAncestorOfCommand { reachability_view } = response { + Ok(reachability_view) + } else { + bail!("failed to get ghostdag data") + } + } } diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index 492f9b0389..83b2176612 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -1,17 +1,18 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; +use anyhow::{format_err, Result}; use starcoin_account_api::AccountInfo; use starcoin_chain::{BlockChain, ChainReader, ChainWriter}; use starcoin_config::ChainNetwork; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; -use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::blockdag::{BlockDAG, MineNewDagBlockInfo}; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_storage::Storage; use starcoin_types::block::{Block, BlockHeader}; +use starcoin_types::blockhash::KType; use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; use std::vec; @@ -39,6 +40,26 @@ impl MockChain { Ok(Self::new_inner(net, chain, miner, storage)) } + pub fn new_with_params( + net: ChainNetwork, + k: KType, + pruning_depth: u64, + pruning_finality: u64, + ) -> Result { + let (storage, chain_info, _, dag) = + Genesis::init_storage_for_test_with_param(&net, k, pruning_depth, pruning_finality)?; + + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage.clone(), + None, + dag, + )?; + let miner = AccountInfo::random(); + Ok(Self::new_inner(net, chain, miner, storage)) + } + pub fn new_with_storage( net: ChainNetwork, storage: Arc, @@ -178,6 +199,26 @@ impl MockChain { .create_block(template, self.net.time_service().as_ref()) } + pub fn produce_block_by_params( + &mut self, + parent_header: BlockHeader, + tips: Vec, + pruning_point: HashValue, + ) -> Result { + let (template, _) = self.head.create_block_template_by_header( + *self.miner.address(), + parent_header, + vec![], + vec![], + None, + tips, + pruning_point, + )?; + self.head + .consensus() + .create_block(template, self.net.time_service().as_ref()) + } + pub fn produce_block_by_tips( &mut self, parent_header: BlockHeader, @@ -197,6 +238,72 @@ impl MockChain { .create_block(template, self.net.time_service().as_ref()) } + pub fn produce_block_for_pruning(&mut self) -> Result { + let tips = self.head.get_dag_state()?.tips; + let ghostdata = self.head.dag().ghost_dag_manager().ghostdag(&tips)?; + let selected_header = self + .head() + .get_storage() + .get_block_header_by_hash(ghostdata.selected_parent)? + .ok_or_else(|| { + format_err!( + "Cannot find block header by hash: {:?}", + ghostdata.selected_parent + ) + })?; + + let previous_pruning = if selected_header.pruning_point() == HashValue::zero() { + self.head().get_storage().get_genesis()?.unwrap() + } else { + selected_header.pruning_point() + }; + + let prevous_ghostdata = self + .head() + .dag() + .ghostdata_by_hash(previous_pruning)? + .ok_or_else(|| format_err!("Cannot find ghostdata by hash: {:?}", previous_pruning))?; + + let MineNewDagBlockInfo { + tips: pruned_tips, + blue_blocks, + pruning_point, + } = self + .head + .dag() + .calc_mergeset_and_tips(previous_pruning, prevous_ghostdata.as_ref())?; + + debug!( + "tips: {:?}, blue_blocks: {:?}, pruning_point: {:?}", + pruned_tips, blue_blocks, pruning_point + ); + + let (template, _) = self.head.create_block_template_by_header( + *self.miner.address(), + selected_header, + vec![], + blue_blocks + .get(1..) + .unwrap_or(&[]) + .iter() + .map(|block_id| { + self.head() + .get_storage() + .get_block_header_by_hash(*block_id)? + .ok_or_else(|| { + format_err!("Block header not found for hash: {:?}", block_id) + }) + }) + .collect::>>()?, + None, + pruned_tips, + pruning_point, + )?; + self.head + .consensus() + .create_block(template, self.net.time_service().as_ref()) + } + pub fn apply(&mut self, block: Block) -> Result<()> { self.head.apply(block)?; Ok(()) diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index 422a70130e..99537ffe5a 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -254,6 +254,12 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetGhostdagData(id) => Ok(ChainResponse::GhostdagDataOption(Box::new( self.inner.get_ghostdagdata(id)?, ))), + ChainRequest::IsAncestorOfCommand { + ancestor, + descendants, + } => Ok(ChainResponse::IsAncestorOfCommand { + reachability_view: self.inner.dag.is_ancestor_of(ancestor, descendants)?, + }), } } } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index d83f16caa9..5262d01b12 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -13,13 +13,13 @@ use starcoin_chain_api::{ ExcludedTxns, ExecutedBlock, MintedUncleNumber, TransactionInfoWithProof, VerifiedBlock, VerifyBlockField, }; -use starcoin_config::genesis_config::G_DAG_TEST_CONFIG; use starcoin_consensus::Consensus; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; use starcoin_dag::blockdag::{BlockDAG, MineNewDagBlockInfo}; use starcoin_dag::consensusdb::consenses_state::DagState; use starcoin_dag::consensusdb::prelude::StoreError; +use starcoin_dag::consensusdb::schemadb::GhostdagStoreReader; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; @@ -178,7 +178,7 @@ impl BlockChain { } fn init_dag(mut dag: BlockDAG, genesis_header: BlockHeader) -> Result { - match dag.get_dag_state() { + match dag.get_dag_state(genesis_header.id()) { anyhow::Result::Ok(_dag_state) => (), Err(e) => match e.downcast::()? { StoreError::KeyNotFound(_) => { @@ -307,23 +307,27 @@ impl BlockChain { .unwrap_or(on_chain_block_gas_limit); let strategy = epoch.strategy(); let difficulty = strategy.calculate_next_difficulty(self)?; + + let (ghostdata, tips) = if tips.is_empty() { + let tips = self.get_dag_state()?.tips; + (self.dag().ghostdata(&tips)?, tips) + } else { + (self.dag().ghostdata(&tips)?, tips) + }; + let MineNewDagBlockInfo { tips, blue_blocks, pruning_point: _, - } = if !tips.is_empty() { - let blue_blocks = (*self.dag().ghostdata(&tips)?.mergeset_blues).clone()[1..].to_vec(); + } = { + let blue_blocks = ghostdata.mergeset_blues.clone()[1..].to_vec(); MineNewDagBlockInfo { tips, blue_blocks, pruning_point, // TODO: new test cases will need pass this field if they have some special requirements. } - } else { - self.dag().calc_mergeset_and_tips( - G_DAG_TEST_CONFIG.pruning_depth, - G_DAG_TEST_CONFIG.pruning_finality, - )? }; + debug!( "Blue blocks:{:?} in chain/create_block_template_by_header", blue_blocks @@ -345,9 +349,22 @@ impl BlockChain { uncles }; + let parent_header = if ghostdata.selected_parent != previous_header.id() { + self.storage + .get_block_header_by_hash(ghostdata.selected_parent)? + .ok_or_else(|| { + format_err!( + "Cannot find block header by {:?}", + ghostdata.selected_parent + ) + })? + } else { + previous_header + }; + let mut opened_block = OpenedBlock::new( self.storage.clone(), - previous_header, + parent_header, final_block_gas_limit, author, self.time_service.now_millis(), @@ -358,7 +375,7 @@ impl BlockChain { tips, blue_blocks, 0, - HashValue::zero(), // TODO: this field must be returned by dag + pruning_point, )?; let excluded_txns = opened_block.push_txns(user_txns)?; let template = opened_block.finalize()?; @@ -983,7 +1000,12 @@ impl BlockChain { } pub fn get_dag_state(&self) -> Result { - self.dag.get_dag_state() + let current_pruning_point = self.status().head().pruning_point(); + if current_pruning_point == HashValue::zero() { + self.dag.get_dag_state(self.genesis_hash) + } else { + self.dag.get_dag_state(current_pruning_point) + } } } @@ -1334,8 +1356,10 @@ impl ChainReader for BlockChain { })) } - fn current_tips_hash(&self) -> Result> { - self.dag.get_dag_state().map(|state| state.tips) + fn current_tips_hash(&self, pruning_point: HashValue) -> Result> { + self.dag + .get_dag_state(pruning_point) + .map(|state| state.tips) } fn has_dag_block(&self, header_id: HashValue) -> Result { @@ -1360,7 +1384,43 @@ impl ChainReader for BlockChain { uncles: &[BlockHeader], header: &BlockHeader, ) -> Result { - self.dag().verify_and_ghostdata(uncles, header) + let previous_header = self + .storage + .get_block_header_by_hash(header.parent_hash())? + .ok_or_else(|| format_err!("cannot find parent block header"))?; + let next_ghostdata = self.dag().verify_and_ghostdata(uncles, header)?; + + if self.status().head().pruning_point() != HashValue::zero() { + let previous_ghostdata = if previous_header.pruning_point() == HashValue::zero() { + let genesis = self + .storage + .get_genesis()? + .ok_or_else(|| format_err!("the genesis id is none!"))?; + self.dag().storage.ghost_dag_store.get_data(genesis)? + } else { + self.dag() + .storage + .ghost_dag_store + .get_data(previous_header.pruning_point())? + }; + + self.dag().verify_pruning_point( + previous_header.pruning_point(), + previous_ghostdata.as_ref(), + header.pruning_point(), + &next_ghostdata, + )?; + } + + Ok(next_ghostdata) + } + + fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result { + self.dag().check_ancestor_of(ancestor, descendants) + } + + fn get_pruning_height(&self) -> BlockNumber { + self.get_pruning_height() } } @@ -1469,16 +1529,30 @@ impl BlockChain { fn connect_dag(&mut self, executed_block: ExecutedBlock) -> Result { let dag = self.dag.clone(); let (new_tip_block, _) = (executed_block.block(), executed_block.block_info()); - let mut tips = self.current_tips_hash()?; - let parents = executed_block.block.header.parents_hash(); - if !tips.contains(&new_tip_block.id()) { - for hash in parents { - tips.retain(|x| *x != hash); - } - if !dag.check_ancestor_of(new_tip_block.id(), tips.clone())? { - tips.push(new_tip_block.id()); + let parent_header = self + .storage + .get_block_header_by_hash(new_tip_block.header().parent_hash())? + .ok_or_else(|| { + format_err!( + "Dag block should exist, block id: {:?}", + new_tip_block.header().parent_hash() + ) + })?; + let mut tips = if parent_header.pruning_point() == HashValue::zero() { + self.current_tips_hash(self.genesis_hash)? + } else { + self.current_tips_hash(parent_header.pruning_point())? + }; + + let mut new_tips = vec![]; + for hash in tips { + if !dag.check_ancestor_of(hash, vec![new_tip_block.id()])? { + new_tips.push(hash); } } + tips = new_tips; + tips.push(new_tip_block.id()); + // Caculate the ghostdata of the virutal node created by all tips. // And the ghostdata.selected of the tips will be the latest head. let block_hash = dag @@ -1519,9 +1593,47 @@ impl BlockChain { if self.epoch.end_block_number() == block.header().number() { self.epoch = get_epoch_from_statedb(&self.statedb)?; } - self.dag.save_dag_state(DagState { tips })?; + + if parent_header.pruning_point() == block.header().pruning_point() { + info!("pruning point not changed, save dag state without prune. tips are {:?}, pruning point is {:?}", tips, block.header().pruning_point()); + if block.header().pruning_point() == HashValue::zero() { + self.dag + .save_dag_state(self.genesis_hash, DagState { tips })?; + } else { + self.dag + .save_dag_state(block.header().pruning_point(), DagState { tips })?; + } + } else { + let new_tips = dag.pruning_point_manager().prune( + &DagState { tips: tips.clone() }, + parent_header.pruning_point(), + block.header().pruning_point(), + )?; + info!("pruning point changed, previous tips are: {:?}, save dag state with prune. tips are {:?}, previous pruning point is {:?}, current pruning point is {:?}", + tips, new_tips, parent_header.pruning_point(), block.header().pruning_point()); + self.dag + .save_dag_state(block.header().pruning_point(), DagState { tips: new_tips })?; + } + Ok(executed_block) } + + pub fn get_pruning_height(&self) -> BlockNumber { + let chain_id = self.status().head().chain_id(); + if chain_id.is_vega() { + 4000000 + } else if chain_id.is_proxima() { + 850000 + } else if chain_id.is_halley() { + 4090000 + } else if chain_id.is_main() { + 1 + } else if chain_id.is_dag_test() || chain_id.is_test() || chain_id.is_dev() { + BlockNumber::MAX + } else { + 1 + } + } } impl ChainWriter for BlockChain { @@ -1545,7 +1657,6 @@ impl ChainWriter for BlockChain { fn chain_state(&mut self) -> &ChainStateDB { &self.statedb } - fn apply_for_sync(&mut self, block: Block) -> Result { self.apply_with_verifier::(block) } diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index bd8870685c..a4381b2522 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -7,6 +7,7 @@ use starcoin_chain_api::{ verify_block, ChainReader, ConnectBlockError, VerifiedBlock, VerifyBlockField, }; use starcoin_consensus::{Consensus, ConsensusVerifyError}; +use starcoin_crypto::HashValue; use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_logger::prelude::debug; use starcoin_open_block::AddressFilter; @@ -347,7 +348,6 @@ impl BasicDagVerifier { R: ChainReader, { let parents_hash = new_block_header.parents_hash(); - verify_block!( VerifyBlockField::Header, parents_hash.len() == parents_hash.iter().collect::>().len(), @@ -363,6 +363,7 @@ impl BasicDagVerifier { parents_hash, new_block_header.parent_hash() ); + parents_hash.iter().try_for_each(|parent_hash| { verify_block!( VerifyBlockField::Header, @@ -383,6 +384,33 @@ impl BasicDagVerifier { Ok::<(), ConnectBlockError>(()) })?; + // verify the pruning point + let parent_header = current_chain.current_header(); + if parent_header.pruning_point() != HashValue::zero() { + // the chain had pruning point already checking the descendants of the pruning point is a must + // check the parents are the descendants of the pruning point + parents_hash.iter().try_for_each(|parent_hash| { + verify_block!( + VerifyBlockField::Header, + current_chain.is_dag_ancestor_of(new_block_header.pruning_point(), vec![*parent_hash]).map_err(|e| { + ConnectBlockError::VerifyBlockFailed( + VerifyBlockField::Header, + anyhow::anyhow!( + "the block {:?} 's parent: {:?} is not the descendant of pruning point {:?}, error: {:?}", + new_block_header.id(), + parent_hash, + new_block_header.pruning_point(), + e + ), + ) + })?, + "Invalid block: parent {:?} is not the descendant of pruning point: {:?}", + parent_hash, new_block_header.pruning_point() + ); + Ok::<(), ConnectBlockError>(()) + })?; + } + ConsensusVerifier::verify_header(current_chain, new_block_header) } @@ -397,7 +425,7 @@ impl BasicDagVerifier { current_chain.verify_and_ghostdata(uncles, header) } } -//TODO: Implement it. + pub struct DagVerifier; impl BlockVerifier for DagVerifier { fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> @@ -408,14 +436,18 @@ impl BlockVerifier for DagVerifier { } fn verify_uncles( - _current_chain: &R, - _uncles: &[BlockHeader], - _header: &BlockHeader, + current_chain: &R, + uncles: &[BlockHeader], + header: &BlockHeader, ) -> Result> where R: ChainReader, { - Ok(None) + Ok(Some(BasicDagVerifier::verify_blue_blocks( + current_chain, + uncles, + header, + )?)) } } diff --git a/chain/tests/test_prune.rs b/chain/tests/test_prune.rs new file mode 100644 index 0000000000..4e8f77078b --- /dev/null +++ b/chain/tests/test_prune.rs @@ -0,0 +1,115 @@ +use starcoin_chain::ChainReader; +use starcoin_chain_mock::MockChain; +use starcoin_config::ChainNetwork; +use starcoin_logger::prelude::debug; +use std::collections::HashSet; + +#[stest::test] +fn test_block_chain_prune() -> anyhow::Result<()> { + let mut mock_chain = MockChain::new_with_params(ChainNetwork::new_test(), 3, 4, 3)?; + let genesis = mock_chain.head().status().head.clone(); + + // blue blocks + let block_blue_1 = mock_chain.produce_and_apply_by_tips(genesis.clone(), vec![genesis.id()])?; + let block_blue_2 = + mock_chain.produce_and_apply_by_tips(block_blue_1.clone(), vec![block_blue_1.id()])?; + let block_blue_3 = + mock_chain.produce_and_apply_by_tips(block_blue_2.clone(), vec![block_blue_2.id()])?; + let block_blue_3_1 = + mock_chain.produce_and_apply_by_tips(block_blue_2.clone(), vec![block_blue_2.id()])?; + let block_blue_4 = mock_chain.produce_and_apply_by_tips( + block_blue_3.clone(), + vec![block_blue_3.id(), block_blue_3_1.id()], + )?; + let block_blue_5 = + mock_chain.produce_and_apply_by_tips(block_blue_4.clone(), vec![block_blue_4.id()])?; + + // red blocks + let block_red_2 = + mock_chain.produce_and_apply_by_tips(block_blue_1.clone(), vec![block_blue_1.id()])?; + let block_red_2_1 = + mock_chain.produce_and_apply_by_tips(block_blue_1.clone(), vec![block_blue_1.id()])?; + let block_red_3 = mock_chain.produce_and_apply_by_tips( + block_red_2.clone(), + vec![block_red_2.id(), block_red_2_1.id()], + )?; + + debug!( + "tips: {:?}, pruning point: {:?}", + mock_chain.head().get_dag_state()?, + mock_chain.head().status().head().pruning_point() + ); + assert_eq!( + mock_chain + .head() + .get_dag_state()? + .tips + .into_iter() + .collect::>(), + HashSet::from_iter(vec![block_blue_5.id(), block_red_3.id()]) + ); + + let block_blue_6 = + mock_chain.produce_and_apply_by_tips(block_blue_5.clone(), vec![block_blue_5.id()])?; + let block_blue_6_1 = + mock_chain.produce_and_apply_by_tips(block_blue_5.clone(), vec![block_blue_5.id()])?; + let block_red_4 = + mock_chain.produce_and_apply_by_tips(block_red_3.clone(), vec![block_red_3.id()])?; + + debug!( + "tips: {:?}, pruning point: {:?}", + mock_chain.head().get_dag_state()?, + mock_chain.head().status().head().pruning_point() + ); + assert_eq!( + mock_chain + .head() + .get_dag_state()? + .tips + .into_iter() + .collect::>(), + HashSet::from_iter(vec![ + block_blue_6.id(), + block_blue_6_1.id(), + block_red_4.id() + ]) + ); + + let block_blue_7 = mock_chain.produce_block_for_pruning()?; + mock_chain.apply(block_blue_7.clone())?; + + assert_eq!(block_blue_7.header().pruning_point(), block_blue_2.id()); + assert_eq!( + block_blue_7 + .header() + .parents_hash() + .into_iter() + .collect::>(), + HashSet::from_iter(vec![block_blue_6.id(), block_blue_6_1.id()]) + ); + + let tips = mock_chain.head().get_dag_state()?.tips; + assert_eq!( + tips.iter().cloned().collect::>(), + HashSet::from_iter(vec![block_blue_7.id()]) + ); + + let failure_block = mock_chain.produce_block_by_params( + block_blue_7.header().clone(), + vec![block_red_4.id(), block_blue_7.id()], + block_blue_7.header().pruning_point(), + )?; + assert_eq!( + failure_block + .header() + .parents_hash() + .into_iter() + .collect::>(), + HashSet::from_iter(vec![block_red_4.id(), block_blue_7.id()]) + ); + let result = mock_chain.apply(failure_block); + debug!("apply failure block result: {:?}", result); + assert!(result.is_err()); + + Ok(()) +} diff --git a/cmd/db-exporter/src/force_deploy_output.rs b/cmd/db-exporter/src/force_deploy_output.rs index d7325bf7b7..81159bd295 100644 --- a/cmd/db-exporter/src/force_deploy_output.rs +++ b/cmd/db-exporter/src/force_deploy_output.rs @@ -10,7 +10,7 @@ use clap::Parser; use starcoin_chain::{BlockChain, ChainReader, ChainWriter}; use starcoin_cmd::dev::dev_helper; use starcoin_config::{BuiltinNetworkID, ChainNetwork}; -use starcoin_dag::blockdag::{BlockDAG, DEFAULT_GHOSTDAG_K}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; use starcoin_genesis::Genesis; use starcoin_state_api::ChainStateWriter; @@ -77,8 +77,7 @@ pub fn force_deploy_output( db_storage, ))?); - let dag = BlockDAG::new( - DEFAULT_GHOSTDAG_K, + let dag = BlockDAG::create_blockdag( starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( network_path.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), diff --git a/cmd/db-exporter/src/main.rs b/cmd/db-exporter/src/main.rs index a7d0491b9a..7acb855fbe 100644 --- a/cmd/db-exporter/src/main.rs +++ b/cmd/db-exporter/src/main.rs @@ -21,7 +21,7 @@ use starcoin_chain::{ use starcoin_config::{BuiltinNetworkID, ChainNetwork, RocksdbConfig}; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; -use starcoin_dag::blockdag::{BlockDAG, DEFAULT_GHOSTDAG_K}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; use starcoin_genesis::Genesis; use starcoin_resource_viewer::{AnnotatedMoveStruct, AnnotatedMoveValue, MoveValueAnnotator}; @@ -768,7 +768,7 @@ pub fn export_block_range( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let storage = Arc::new(Storage::new(StorageInstance::new_cache_and_db_instance( CacheStorage::new(None), @@ -895,7 +895,7 @@ pub fn apply_block( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); // StarcoinVM::set_concurrency_level_once(num_cpus::get()); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; @@ -986,7 +986,7 @@ pub fn startup_info_back( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let chain = BlockChain::new( @@ -1038,7 +1038,7 @@ pub fn gen_block_transactions( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( @@ -1564,7 +1564,7 @@ pub fn export_snapshot( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; let chain = BlockChain::new( @@ -1916,7 +1916,7 @@ pub fn apply_snapshot( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; @@ -2258,7 +2258,7 @@ pub fn gen_turbo_stm_transactions(to_dir: PathBuf, block_num: Option) -> an to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( @@ -2290,7 +2290,7 @@ pub fn apply_turbo_stm_block( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info_seq, _) = Genesis::init_and_check_storage(&net, storage_seq.clone(), dag.clone(), to_dir.as_ref())?; let mut chain_seq = BlockChain::new( @@ -2354,7 +2354,7 @@ pub fn apply_turbo_stm_block( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info_stm, _) = Genesis::init_and_check_storage( &net, storage_stm.clone(), @@ -2412,8 +2412,7 @@ pub fn verify_block( db_storage, ))?); - let dag = BlockDAG::new( - DEFAULT_GHOSTDAG_K, + let dag = BlockDAG::create_blockdag( starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), @@ -2526,8 +2525,7 @@ pub fn block_output( db_storage, ))?); - let dag = BlockDAG::new( - DEFAULT_GHOSTDAG_K, + let dag = BlockDAG::create_blockdag( starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), @@ -2571,8 +2569,7 @@ pub fn apply_block_output( CacheStorage::new(None), db_storage, ))?); - let dag = BlockDAG::new( - DEFAULT_GHOSTDAG_K, + let dag = BlockDAG::create_blockdag( starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), @@ -2632,8 +2629,7 @@ fn save_startup_info( CacheStorage::new(None), db_storage, ))?); - let dag = BlockDAG::new( - DEFAULT_GHOSTDAG_K, + let dag = BlockDAG::create_blockdag( starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), @@ -2668,8 +2664,7 @@ fn token_supply( CacheStorage::new(None), db_storage, ))?); - let dag = BlockDAG::new( - DEFAULT_GHOSTDAG_K, + let dag = BlockDAG::create_blockdag( starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), diff --git a/cmd/generator/src/lib.rs b/cmd/generator/src/lib.rs index 125a50a225..93d7696ec9 100644 --- a/cmd/generator/src/lib.rs +++ b/cmd/generator/src/lib.rs @@ -6,7 +6,7 @@ use starcoin_account::account_storage::AccountStorage; use starcoin_account::AccountManager; use starcoin_account_api::AccountInfo; use starcoin_config::{NodeConfig, StarcoinOpt}; -use starcoin_dag::blockdag::{BlockDAG, DEFAULT_GHOSTDAG_K}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_storage::cache_storage::CacheStorage; use starcoin_storage::db_storage::DBStorage; @@ -36,7 +36,7 @@ pub fn init_or_load_data_dir( config.storage.dag_dir(), config.storage.clone().into(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); let (chain_info, _genesis) = Genesis::init_and_check_storage( config.net(), storage.clone(), diff --git a/cmd/starcoin/src/chain/is_ancestor_of_cmd.rs b/cmd/starcoin/src/chain/is_ancestor_of_cmd.rs new file mode 100644 index 0000000000..cfb9d74676 --- /dev/null +++ b/cmd/starcoin/src/chain/is_ancestor_of_cmd.rs @@ -0,0 +1,46 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; + +use crate::cli_state::CliState; +use crate::StarcoinOpt; +use anyhow::Result; +use clap::Parser; +use scmd::{CommandAction, ExecContext}; +use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::ReachabilityView; + +/// Get block info by number +#[derive(Debug, Parser, Clone)] +#[clap(name = "is-ancestor-of", alias = "is_ancestor_of")] +pub struct IsAncestorOfOpt { + #[clap(name = "ancestor", long, short = 'a')] + ancestor: String, + + #[clap(name = "descendants", long, short = 'd')] + descendants: Vec, +} + +pub struct IsAncestorOfCommand; + +impl CommandAction for IsAncestorOfCommand { + type State = CliState; + type GlobalOpt = StarcoinOpt; + type Opt = IsAncestorOfOpt; + type ReturnItem = ReachabilityView; + + fn run( + &self, + ctx: &ExecContext, + ) -> Result { + let opt = ctx.opt().clone(); + ctx.state().client().is_ancestor_of( + HashValue::from_str(&opt.ancestor)?, + opt.descendants + .into_iter() + .map(|id| HashValue::from_str(&id).map_err(|e| anyhow::anyhow!("{:?}", e))) + .collect::>>()?, + ) + } +} diff --git a/cmd/starcoin/src/chain/mod.rs b/cmd/starcoin/src/chain/mod.rs index c004e3c96f..7206ffbad9 100644 --- a/cmd/starcoin/src/chain/mod.rs +++ b/cmd/starcoin/src/chain/mod.rs @@ -12,6 +12,7 @@ mod get_txn_info_list_cmd; mod get_txn_infos_cmd; pub mod get_txn_proof_cmd; mod info_cmd; +mod is_ancestor_of_cmd; mod list_block_cmd; pub use epoch_info::*; @@ -24,4 +25,5 @@ pub use get_txn_info_cmd::*; pub use get_txn_info_list_cmd::*; pub use get_txn_infos_cmd::*; pub use info_cmd::*; +pub use is_ancestor_of_cmd::*; pub use list_block_cmd::*; diff --git a/cmd/starcoin/src/lib.rs b/cmd/starcoin/src/lib.rs index bc2114cc75..4a5ef258fc 100644 --- a/cmd/starcoin/src/lib.rs +++ b/cmd/starcoin/src/lib.rs @@ -103,7 +103,8 @@ pub fn add_command( .subcommand(chain::GetTransactionInfoListCommand) .subcommand(chain::get_txn_proof_cmd::GetTransactionProofCommand) .subcommand(chain::GetBlockInfoCommand) - .subcommand(chain::GetDagStateCommand), + .subcommand(chain::GetDagStateCommand) + .subcommand(chain::IsAncestorOfCommand), ) .command( CustomCommand::with_name("txpool") diff --git a/config/src/genesis_config.rs b/config/src/genesis_config.rs index 50a1136ffe..69483e1f29 100644 --- a/config/src/genesis_config.rs +++ b/config/src/genesis_config.rs @@ -654,13 +654,10 @@ pub struct GenesisConfig { pub time_service_type: TimeServiceType, /// transaction timeout pub transaction_timeout: u64, - /// pruning depth pub pruning_depth: u64, - /// pruning finality pub pruning_finality: u64, - /// block header version pub block_header_version: starcoin_types::block::Version, } @@ -757,6 +754,9 @@ static G_DEFAULT_BASE_REWARD_PER_BLOCK: Lazy> = pub static G_BASE_BLOCK_GAS_LIMIT: u64 = 50_000_000; //must big than maximum_number_of_gas_units +pub static G_PRUNING_DEPTH: u64 = 17280; +pub static G_PRUNING_FINALITY: u64 = 8640; + static G_EMPTY_BOOT_NODES: Lazy> = Lazy::new(Vec::new); const ONE_DAY: u64 = 86400; @@ -807,8 +807,8 @@ pub static G_DAG_TEST_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -860,8 +860,8 @@ pub static G_TEST_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -916,8 +916,8 @@ pub static G_DEV_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -977,8 +977,8 @@ pub static G_HALLEY_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1039,8 +1039,8 @@ pub static G_PROXIMA_CONFIG: Lazy = Lazy::new(|| { }, transaction_timeout: ONE_DAY, // todo: rollback it to zero and initialize BlockDag properly - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1099,8 +1099,8 @@ pub static G_BARNARD_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 24 * 1000, // 1d }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1173,8 +1173,8 @@ pub static G_MAIN_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 24 * 1000, // 1d }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1231,8 +1231,8 @@ pub static G_VEGA_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 24 * 1000, // 1d }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 054f01af07..0f77d14934 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -1,6 +1,8 @@ use super::reachability::{inquirer, reachability_service::MTReachabilityService}; use super::types::ghostdata::GhostdagData; -use crate::consensusdb::consenses_state::{DagState, DagStateReader, DagStateStore}; +use crate::consensusdb::consenses_state::{ + DagState, DagStateReader, DagStateStore, ReachabilityView, +}; use crate::consensusdb::prelude::{FlexiDagStorageConfig, StoreError}; use crate::consensusdb::schemadb::{GhostdagStoreReader, ReachabilityStore, REINDEX_ROOT_KEY}; use crate::consensusdb::{ @@ -13,14 +15,12 @@ use crate::consensusdb::{ use crate::ghostdag::protocol::GhostdagManager; use crate::prune::pruning_point_manager::PruningPointManagerT; use crate::{process_key_already_error, reachability}; -use anyhow::{bail, ensure, format_err, Ok}; -use starcoin_accumulator::node::AccumulatorStoreType; -use starcoin_accumulator::{Accumulator, MerkleAccumulator}; +use anyhow::{bail, ensure, Ok}; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::temp_dir; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_logger::prelude::{debug, info, warn}; -use starcoin_storage::Store; -use starcoin_types::block::{AccumulatorInfo, BlockHeader}; +use starcoin_types::block::BlockHeader; use starcoin_types::{ blockhash::{BlockHashes, KType}, consensus_header::ConsensusHeader, @@ -56,7 +56,16 @@ pub struct BlockDAG { } impl BlockDAG { - pub fn new(k: KType, db: FlexiDagStorage) -> Self { + pub fn create_blockdag(dag_storage: FlexiDagStorage) -> Self { + Self::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ) + } + + pub fn new(k: KType, db: FlexiDagStorage, pruning_depth: u64, pruning_finality: u64) -> Self { let ghostdag_store = db.ghost_dag_store.clone(); let header_store = db.header_store.clone(); let relations_store = db.relations_store.clone(); @@ -69,7 +78,12 @@ impl BlockDAG { header_store, reachability_service.clone(), ); - let pruning_point_manager = PruningPointManager::new(reachability_service, ghostdag_store); + let pruning_point_manager = PruningPointManager::new( + reachability_service, + ghostdag_store, + pruning_depth, + pruning_finality, + ); Self { ghostdag_manager, @@ -84,13 +98,17 @@ impl BlockDAG { ..Default::default() }; let dag_storage = FlexiDagStorage::create_from_path(temp_dir(), config)?; - Ok(Self::new(DEFAULT_GHOSTDAG_K, dag_storage)) + Ok(Self::create_blockdag(dag_storage)) } - pub fn create_for_testing_with_parameters(k: KType) -> anyhow::Result { + pub fn create_for_testing_with_parameters( + k: KType, + pruning_depth: u64, + pruning_finality: u64, + ) -> anyhow::Result { let dag_storage = FlexiDagStorage::create_from_path(temp_dir(), FlexiDagStorageConfig::default())?; - Ok(Self::new(k, dag_storage)) + Ok(Self::new(k, dag_storage, pruning_depth, pruning_finality)) } pub fn has_dag_block(&self, hash: Hash) -> anyhow::Result { @@ -114,9 +132,12 @@ impl BlockDAG { .insert(origin, BlockHashes::new(vec![]))?; self.commit(genesis, origin)?; - self.save_dag_state(DagState { - tips: vec![genesis_id], - })?; + self.save_dag_state( + genesis_id, + DagState { + tips: vec![genesis_id], + }, + )?; Ok(origin) } pub fn ghostdata(&self, parents: &[HashValue]) -> anyhow::Result { @@ -203,6 +224,7 @@ impl BlockDAG { trusted_ghostdata } }; + // Store ghostdata process_key_already_error( self.storage @@ -275,7 +297,6 @@ impl BlockDAG { bail!("failed to add a block when committing, e: {:?}", e); } } - process_key_already_error( self.storage .relations_store @@ -317,6 +338,7 @@ impl BlockDAG { } Some(ghostdata) => ghostdata, }; + // Store ghostdata process_key_already_error( self.storage @@ -423,12 +445,37 @@ impl BlockDAG { } } - pub fn get_dag_state(&self) -> anyhow::Result { - Ok(self.storage.state_store.read().get_state()?) + pub fn get_dag_state(&self, hash: Hash) -> anyhow::Result { + Ok(self.storage.state_store.read().get_state_by_hash(hash)?) } - pub fn save_dag_state(&self, state: DagState) -> anyhow::Result<()> { - self.storage.state_store.write().insert(state)?; + pub fn save_dag_state(&self, hash: Hash, state: DagState) -> anyhow::Result<()> { + let writer = self.storage.state_store.write(); + match writer.get_state_by_hash(hash) { + anyhow::Result::Ok(dag_state) => { + // remove the ancestor tips + let left_tips = dag_state.tips.into_iter().filter(|tip| { + !state.tips.iter().any(|new_tip| { + self.ghost_dag_manager().check_ancestor_of(*tip, vec![*new_tip]).unwrap_or_else(|e| { + warn!("failed to check ancestor of tip: {:?}, new_tip: {:?}, error: {:?}", tip, new_tip, e); + false + }) + }) + }); + let merged_tips = left_tips + .chain(state.tips.clone()) + .collect::>() + .into_iter() + .collect::>(); + writer.insert(hash, DagState { tips: merged_tips })?; + } + Err(_) => { + writer.insert(hash, state)?; + } + } + + drop(writer); + Ok(()) } @@ -438,131 +485,71 @@ impl BlockDAG { pub fn calc_mergeset_and_tips( &self, - _pruning_depth: u64, - _pruning_finality: u64, + previous_pruning_point: HashValue, + previous_ghostdata: &GhostdagData, ) -> anyhow::Result { - let dag_state = self.get_dag_state()?; - let ghostdata = self.ghost_dag_manager().ghostdag(&dag_state.tips)?; - - anyhow::Ok(MineNewDagBlockInfo { - tips: dag_state.tips, - blue_blocks: (*ghostdata.mergeset_blues).clone(), - pruning_point: HashValue::zero(), - }) - - // let next_pruning_point = self.pruning_point_manager().next_pruning_point( - // &dag_state, - // &ghostdata, - // pruning_depth, - // pruning_finality, - // )?; - // if next_pruning_point == dag_state.pruning_point { - // anyhow::Ok(MineNewDagBlockInfo { - // tips: dag_state.tips, - // blue_blocks: (*ghostdata.mergeset_blues).clone(), - // pruning_point: next_pruning_point, - // }) - // } else { - // let pruned_tips = self - // .pruning_point_manager() - // .prune(&dag_state, next_pruning_point)?; - // let mergeset_blues = (*self - // .ghost_dag_manager() - // .ghostdag(&pruned_tips)? - // .mergeset_blues) - // .clone(); - // anyhow::Ok(MineNewDagBlockInfo { - // tips: pruned_tips, - // blue_blocks: mergeset_blues, - // pruning_point: next_pruning_point, - // }) - // } - } - - fn verify_pruning_point( - &self, - pruning_depth: u64, - pruning_finality: u64, - block_header: &BlockHeader, - genesis_id: HashValue, - ) -> anyhow::Result<()> { - let ghostdata = self.ghost_dag_manager().ghostdag(&block_header.parents())?; + info!("start to calculate the mergeset and tips, previous pruning point: {:?}, previous ghostdata: {:?}", previous_pruning_point, previous_ghostdata); + let dag_state = self.get_dag_state(previous_pruning_point)?; + let next_ghostdata = self.ghostdata(&dag_state.tips)?; + info!( + "start to calculate the mergeset and tips for tips: {:?}, and last pruning point: {:?} and next ghostdata: {:?}", + dag_state.tips, previous_pruning_point, next_ghostdata, + ); let next_pruning_point = self.pruning_point_manager().next_pruning_point( - block_header.pruning_point(), - &ghostdata, - pruning_depth, - pruning_finality, + previous_pruning_point, + previous_ghostdata, + &next_ghostdata, )?; - - if (block_header.chain_id().is_vega() - || block_header.chain_id().is_proxima() - || block_header.chain_id().is_halley()) - && block_header.pruning_point() == HashValue::zero() - { - if next_pruning_point == genesis_id { - return anyhow::Ok(()); - } else { - bail!( - "pruning point is not correct, it should update the next pruning point: {}", - next_pruning_point - ); - } - } - if next_pruning_point != block_header.pruning_point() { - bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, block_header.pruning_point()); + info!( + "the next pruning point is: {:?}, and the previous pruning point is: {:?}", + next_pruning_point, previous_pruning_point + ); + if next_pruning_point == Hash::zero() || next_pruning_point == previous_pruning_point { + anyhow::Ok(MineNewDagBlockInfo { + tips: dag_state.tips, + blue_blocks: (*next_ghostdata.mergeset_blues).clone(), + pruning_point: next_pruning_point, + }) + } else { + let pruned_tips = self.pruning_point_manager().prune( + &dag_state, + previous_pruning_point, + next_pruning_point, + )?; + let mergeset_blues = (*self + .ghost_dag_manager() + .ghostdag(&pruned_tips)? + .mergeset_blues) + .clone(); + info!( + "previous tips are: {:?}, the pruned tips are: {:?}, the mergeset blues are: {:?}, the next pruning point is: {:?}", + dag_state.tips, + pruned_tips, mergeset_blues, next_pruning_point + ); + anyhow::Ok(MineNewDagBlockInfo { + tips: pruned_tips, + blue_blocks: mergeset_blues, + pruning_point: next_pruning_point, + }) } - anyhow::Ok(()) - } - - pub fn verify( - &self, - pruning_depth: u64, - pruning_finality: u64, - block_header: &BlockHeader, - genesis_id: HashValue, - ) -> anyhow::Result<()> { - self.verify_pruning_point(pruning_depth, pruning_finality, block_header, genesis_id) } - pub fn check_upgrade( + pub fn verify_pruning_point( &self, - info: AccumulatorInfo, - storage: Arc, + previous_pruning_point: HashValue, + previous_ghostdata: &GhostdagData, + next_pruning_point: HashValue, + next_ghostdata: &GhostdagData, ) -> anyhow::Result<()> { - let accumulator = MerkleAccumulator::new_with_info( - info, - storage.get_accumulator_store(AccumulatorStoreType::Block), - ); - - let read_guard = self.storage.state_store.read(); - - let update_dag_state = match read_guard.get_state_by_hash( - accumulator - .get_leaf(0)? - .ok_or_else(|| format_err!("no leaf when upgrading dag db"))?, - ) { - anyhow::Result::Ok(dag_state) => match read_guard.get_state() { - anyhow::Result::Ok(saved_dag_state) => { - info!("The dag state is {:?}", saved_dag_state); - None - } - Err(_) => Some(dag_state), - }, - Err(_) => { - warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", read_guard.get_state()?); - None - } - }; - - drop(read_guard); + let inside_next_pruning_point = self.pruning_point_manager().next_pruning_point( + previous_pruning_point, + previous_ghostdata, + next_ghostdata, + )?; - if let Some(dag_state) = update_dag_state { - let write_guard = self.storage.state_store.write(); - info!("The dag state will be saved as {:?}", dag_state); - write_guard.insert(dag_state)?; - drop(write_guard); + if next_pruning_point != inside_next_pruning_point { + bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, inside_next_pruning_point); } - anyhow::Ok(()) } @@ -580,4 +567,68 @@ impl BlockDAG { self.ghost_dag_manager() .verify_and_ghostdata(blue_blocks, header) } + pub fn check_upgrade(&self, main: &BlockHeader, genesis_id: HashValue) -> anyhow::Result<()> { + // set the state with key 0 + if main.version() == 0 || main.version() == 1 { + let result_dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(genesis_id); + match result_dag_state { + anyhow::Result::Ok(_dag_state) => (), + Err(_) => { + let result_dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(HashValue::zero()); + + match result_dag_state { + anyhow::Result::Ok(dag_state) => { + self.storage + .state_store + .write() + .insert(genesis_id, dag_state)?; + } + Err(_) => { + let dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(main.id())?; + self.storage + .state_store + .write() + .insert(HashValue::zero(), dag_state.clone())?; + self.storage + .state_store + .write() + .insert(genesis_id, dag_state)?; + } + } + } + } + } + + anyhow::Ok(()) + } + + pub fn is_ancestor_of( + &self, + ancestor: Hash, + descendants: Vec, + ) -> anyhow::Result { + let de = descendants + .into_iter() + .filter(|descendant| { + self.check_ancestor_of(ancestor, vec![*descendant]) + .unwrap_or(false) + }) + .collect::>(); + anyhow::Ok(ReachabilityView { + ancestor, + descendants: de, + }) + } } diff --git a/flexidag/src/consensusdb/consenses_state.rs b/flexidag/src/consensusdb/consenses_state.rs index 481c415fdb..8dcf852d3b 100644 --- a/flexidag/src/consensusdb/consenses_state.rs +++ b/flexidag/src/consensusdb/consenses_state.rs @@ -34,13 +34,12 @@ impl ValueCodec for DagState { } pub trait DagStateReader { - fn get_state(&self) -> Result; fn get_state_by_hash(&self, hash: Hash) -> Result; } pub trait DagStateStore: DagStateReader { // This is append only - fn insert(&self, state: DagState) -> Result<(), StoreError>; + fn insert(&self, hash: Hash, state: DagState) -> Result<(), StoreError>; } /// A DB + cache implementation of `HeaderStore` trait, with concurrency support. @@ -60,11 +59,6 @@ impl DbDagStateStore { } impl DagStateReader for DbDagStateStore { - fn get_state(&self) -> Result { - let result = self.dag_state_access.read(0.into())?; - Ok(result) - } - fn get_state_by_hash(&self, hash: Hash) -> Result { let result = self.dag_state_access.read(hash)?; Ok(result) @@ -72,9 +66,9 @@ impl DagStateReader for DbDagStateStore { } impl DagStateStore for DbDagStateStore { - fn insert(&self, state: DagState) -> Result<(), StoreError> { + fn insert(&self, hash: Hash, state: DagState) -> Result<(), StoreError> { self.dag_state_access - .write(DirectDbWriter::new(&self.db), 0.into(), state)?; + .write(DirectDbWriter::new(&self.db), hash, state)?; Ok(()) } } @@ -90,3 +84,9 @@ impl DagStateView { DagState { tips: self.tips } } } + +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub struct ReachabilityView { + pub ancestor: Hash, + pub descendants: Vec, +} diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index c219be7af4..30567d473a 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -222,6 +222,7 @@ impl< .collect::>() { if header.number() < 10000000 { + // no bail before 10000000 warn!("The data of blue set is not equal when executing the block: {:?}, for {:?}, checking data: {:?}", header.id(), blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues); } else { bail!("The data of blue set is not equal when executing the block: {:?}, for {:?}, checking data: {:?}", header.id(), blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues); @@ -261,7 +262,6 @@ impl< header.id(), new_block_data ); - Ok(new_block_data) } @@ -501,7 +501,6 @@ impl< blocks: impl IntoIterator, ) -> Result> { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks.sort_by_cached_key(|block| { let blue_work = self .ghostdag_store diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index b7496e456a..0e1a2dd1b4 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -1,6 +1,5 @@ use starcoin_crypto::HashValue; -use starcoin_logger::prelude::debug; -use starcoin_types::blockhash::ORIGIN; +use starcoin_logger::prelude::{debug, info}; use crate::reachability::reachability_service::ReachabilityService; use crate::{ @@ -16,16 +15,22 @@ use crate::{ pub struct PruningPointManagerT { reachability_service: MTReachabilityService, ghost_dag_store: DbGhostdagStore, + pruning_depth: u64, + pruning_finality: u64, } impl PruningPointManagerT { pub fn new( reachability_service: MTReachabilityService, ghost_dag_store: DbGhostdagStore, + pruning_depth: u64, + pruning_finality: u64, ) -> Self { Self { reachability_service, ghost_dag_store, + pruning_depth, + pruning_finality, } } @@ -33,8 +38,8 @@ impl PruningPointManagerT { self.reachability_service.clone() } - pub fn finality_score(&self, blue_score: u64, pruning_finality: u64) -> u64 { - blue_score / pruning_finality + pub fn finality_score(&self, blue_score: u64) -> u64 { + blue_score / self.pruning_finality } pub fn prune( @@ -43,7 +48,7 @@ impl PruningPointManagerT { current_pruning_point: HashValue, next_pruning_point: HashValue, ) -> anyhow::Result> { - if current_pruning_point == HashValue::zero() { + if current_pruning_point == next_pruning_point { return Ok(dag_state.tips.clone()); } anyhow::Ok( @@ -61,37 +66,39 @@ impl PruningPointManagerT { pub(crate) fn next_pruning_point( &self, - pruning_point: HashValue, - ghostdata: &GhostdagData, - pruning_depth: u64, - pruning_finality: u64, + previous_pruning_point: HashValue, + previous_ghostdata: &GhostdagData, + next_ghostdata: &GhostdagData, ) -> anyhow::Result { - let pruning_ghostdata = self.ghost_dag_store.get_data(pruning_point)?; let min_required_blue_score_for_next_pruning_point = - (self.finality_score(pruning_ghostdata.blue_score, pruning_finality) + 1) - * pruning_finality; + (self.finality_score(previous_ghostdata.blue_score) + 1) * self.pruning_finality; debug!( "min_required_blue_score_for_next_pruning_point: {:?}", min_required_blue_score_for_next_pruning_point ); - let mut latest_pruning_ghost_data = self.ghost_dag_store.get_compact_data(pruning_point)?; - if min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdata.blue_score { + + let mut latest_pruning_ghost_data = previous_ghostdata.to_compact(); + if min_required_blue_score_for_next_pruning_point + self.pruning_depth + <= next_ghostdata.blue_score + { for child in self.reachability_service().forward_chain_iterator( - pruning_point, - ghostdata.selected_parent, + previous_pruning_point, + next_ghostdata.selected_parent, true, ) { let next_pruning_ghostdata = self.ghost_dag_store.get_data(child)?; debug!( "child: {:?}, observer2.blue_score: {:?}, next_pruning_ghostdata.blue_score: {:?}", - child, ghostdata.blue_score, next_pruning_ghostdata.blue_score + child, next_ghostdata.blue_score, next_pruning_ghostdata.blue_score ); - if ghostdata.blue_score - next_pruning_ghostdata.blue_score < pruning_depth { + if next_ghostdata.blue_score - next_pruning_ghostdata.blue_score + < self.pruning_depth + { break; } - if self.finality_score(next_pruning_ghostdata.blue_score, pruning_finality) - > self.finality_score(latest_pruning_ghost_data.blue_score, pruning_finality) + if self.finality_score(next_pruning_ghostdata.blue_score) + > self.finality_score(latest_pruning_ghost_data.blue_score) { latest_pruning_ghost_data = CompactGhostdagData { blue_score: next_pruning_ghostdata.blue_score, @@ -101,11 +108,13 @@ impl PruningPointManagerT { } } - println!("prune point: {:?}", latest_pruning_ghost_data); + info!("prune point: {:?}", latest_pruning_ghost_data); } - if latest_pruning_ghost_data.selected_parent == HashValue::new(ORIGIN) { - anyhow::Ok(pruning_point) // still genesis + if latest_pruning_ghost_data.selected_parent + == previous_ghostdata.to_compact().selected_parent + { + anyhow::Ok(HashValue::zero()) // still genesis } else { anyhow::Ok(latest_pruning_ghost_data.selected_parent) } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index e3208e7bcc..edb387c9d8 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, format_err, Ok, Result}; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_crypto::HashValue as Hash; use starcoin_dag::{ blockdag::{BlockDAG, MineNewDagBlockInfo}, @@ -22,6 +23,7 @@ use starcoin_types::{ }; use std::{ + collections::HashSet, ops::{Deref, DerefMut}, sync::Arc, time::Instant, @@ -323,14 +325,14 @@ fn test_dag_tips_store() { dag.storage .state_store .write() - .insert(state.clone()) + .insert(Hash::zero(), state.clone()) .expect("failed to store the dag state"); assert_eq!( dag.storage .state_store .read() - .get_state() + .get_state_by_hash(Hash::zero()) .expect("failed to get the dag state"), state ); @@ -736,11 +738,12 @@ fn add_and_print_with_ghostdata( Ok(header) } -fn add_and_print( +fn add_and_print_with_pruning_point( number: BlockNumber, parent: Hash, parents: Vec, origin: Hash, + pruning_point: Hash, dag: &mut BlockDAG, ) -> anyhow::Result { let header_builder = BlockHeaderBuilder::random(); @@ -748,6 +751,7 @@ fn add_and_print( .with_parent_hash(parent) .with_parents_hash(parents) .with_number(number) + .with_pruning_point(pruning_point) .build(); let start = Instant::now(); dag.commit(header.to_owned(), origin)?; @@ -766,6 +770,16 @@ fn add_and_print( Ok(header) } +fn add_and_print( + number: BlockNumber, + parent: Hash, + parents: Vec, + origin: Hash, + dag: &mut BlockDAG, +) -> anyhow::Result { + add_and_print_with_pruning_point(number, parent, parents, origin, Hash::zero(), dag) +} + #[test] fn test_dag_mergeset() -> anyhow::Result<()> { // initialzie the dag firstly @@ -869,7 +883,6 @@ fn test_big_data_commit() -> anyhow::Result<()> { anyhow::Result::Ok(()) } -#[ignore = "pruning will be tested in next release"] #[test] fn test_prune() -> anyhow::Result<()> { // initialzie the dag firstly @@ -877,7 +890,8 @@ fn test_prune() -> anyhow::Result<()> { let pruning_depth = 4; let pruning_finality = 3; - let mut dag = BlockDAG::create_for_testing_with_parameters(k).unwrap(); + let mut dag = + BlockDAG::create_for_testing_with_parameters(k, pruning_depth, pruning_finality).unwrap(); let origin = BlockHeaderBuilder::random().with_number(0).build(); let genesis = BlockHeader::dag_genesis_random_with_parent(origin)?; @@ -966,21 +980,85 @@ fn test_prune() -> anyhow::Result<()> { assert_eq!(observer3.blue_score, observer2.blue_score); assert_eq!(observer3.selected_parent, observer2.selected_parent); - // prunning process begins - dag.save_dag_state(DagState { - tips: vec![block_red_3.id(), block_main_5.id()], - })?; + dag.save_dag_state( + genesis.id(), + DagState { + tips: vec![block_red_3.id(), block_main_5.id()], + }, + )?; + // prunning process begins + let (previous_ghostdata, previous_pruning_point) = + if block_main_5.pruning_point() == Hash::zero() { + ( + dag.ghostdata_by_hash(genesis.id())?.ok_or_else(|| { + format_err!("failed to get the ghostdata by genesis: {:?}", genesis.id()) + })?, + genesis.id(), + ) + } else { + ( + dag.ghostdata_by_hash(block_main_5.pruning_point())? + .ok_or_else(|| { + format_err!( + "failed to get the ghostdata by pruning point: {:?}", + block_main_5.pruning_point() + ) + })?, + block_main_5.pruning_point(), + ) + }; + + // test the pruning point calculation let MineNewDagBlockInfo { tips, blue_blocks: _, pruning_point, - } = dag.calc_mergeset_and_tips(pruning_depth, pruning_finality)?; + } = dag.calc_mergeset_and_tips(previous_pruning_point, previous_ghostdata.as_ref())?; assert_eq!(pruning_point, block_main_2.id()); assert_eq!(tips.len(), 1); assert_eq!(*tips.last().unwrap(), block_main_5.id()); + // test the pruning logic + + let block_main_6 = add_and_print( + 6, + block_main_5.id(), + tips.clone(), + genesis.parent_hash(), + &mut dag, + )?; + let block_main_6_1 = + add_and_print(6, block_main_5.id(), tips, genesis.parent_hash(), &mut dag)?; + let block_fork = add_and_print( + 4, + block_red_3.id(), + vec![block_red_3.id()], + genesis.parent_hash(), + &mut dag, + )?; + + dag.save_dag_state( + genesis.id(), + DagState { + tips: vec![block_main_6.id(), block_main_6_1.id(), block_fork.id()], + }, + )?; + + let MineNewDagBlockInfo { + tips, + blue_blocks: _, + pruning_point, + } = dag.calc_mergeset_and_tips(previous_pruning_point, previous_ghostdata.as_ref())?; + + assert_eq!(pruning_point, block_main_2.id()); + assert_eq!(tips.len(), 2); + assert_eq!( + tips.into_iter().collect::>(), + HashSet::from_iter(vec![block_main_6.id(), block_main_6_1.id()]) + ); + anyhow::Result::Ok(()) } @@ -989,7 +1067,9 @@ fn test_verification_blue_block() -> anyhow::Result<()> { // initialzie the dag firstly let k = 5; - let mut dag = BlockDAG::create_for_testing_with_parameters(k).unwrap(); + let mut dag = + BlockDAG::create_for_testing_with_parameters(k, G_PRUNING_DEPTH, G_PRUNING_FINALITY) + .unwrap(); let origin = BlockHeaderBuilder::random().with_number(0).build(); let genesis = BlockHeader::dag_genesis_random_with_parent(origin)?; @@ -1210,5 +1290,17 @@ fn test_verification_blue_block() -> anyhow::Result<()> { dag.ghost_dag_manager() .check_ghostdata_blue_block(&together_ghost_data)?; + let together_mine = dag.ghostdata(&[block_from_normal.id(), block_from_makeup.id()])?; + let mine_together = add_and_print( + 8, + together_mine.selected_parent, + vec![block_from_normal.id(), block_from_makeup.id()], + genesis.parent_hash(), + &mut dag, + )?; + let together_ghost_data = dag.storage.ghost_dag_store.get_data(mine_together.id())?; + dag.ghost_dag_manager() + .check_ghostdata_blue_block(&together_ghost_data)?; + anyhow::Result::Ok(()) } diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index dfae64eaa2..e9894b1ef3 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -24,6 +24,7 @@ use starcoin_storage::table_info::TableInfoStore; use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_transaction_builder::build_stdlib_package_with_modules; use starcoin_transaction_builder::{build_stdlib_package, StdLibOptions}; +use starcoin_types::blockhash::KType; use starcoin_types::startup_info::{ChainInfo, StartupInfo}; use starcoin_types::transaction::Package; use starcoin_types::transaction::TransactionInfo; @@ -368,6 +369,20 @@ impl Genesis { let chain_info = genesis.execute_genesis_block(net, storage.clone(), dag.clone())?; Ok((storage, chain_info, genesis, dag)) } + + pub fn init_storage_for_test_with_param( + net: &ChainNetwork, + k: KType, + pruning_depth: u64, + pruning_finality: u64, + ) -> Result<(Arc, ChainInfo, Self, BlockDAG)> { + debug!("init storage by genesis for test. {net:?}"); + let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); + let genesis = Self::load_or_build(net)?; + let dag = BlockDAG::create_for_testing_with_parameters(k, pruning_depth, pruning_finality)?; + let chain_info = genesis.execute_genesis_block(net, storage.clone(), dag.clone())?; + Ok((storage, chain_info, genesis, dag)) + } } #[cfg(test)] diff --git a/kube/manifest/starcoin-halley.yaml b/kube/manifest/starcoin-halley.yaml index 12c9aca410..1be303f1c4 100644 --- a/kube/manifest/starcoin-halley.yaml +++ b/kube/manifest/starcoin-halley.yaml @@ -23,7 +23,7 @@ spec: starcoin/node-pool: seed-pool containers: - name: starcoin - image: starcoin/starcoin:dag-master + image: starcoin/starcoin:pruning-point imagePullPolicy: Always command: - bash @@ -36,7 +36,7 @@ spec: if [ ! -z $node_key ]; then node_key_flag="--node-key ${node_key}"; fi; - /starcoin/starcoin -n halley -d /sc-data --discover-local=true $node_key_flag; + /starcoin/starcoin -n halley -d /sc-data --p2prpc-default-global-api-quota 9000/s --p2prpc-custom-user-api-quota get_header_by_hash=9000/s --p2prpc-custom-user-api-quota get_headers_by_hash=9000/s --p2prpc-custom-user-api-quota info=9000/s --p2prpc-custom-user-api-quota get_block_by_hash=9000/s --p2prpc-custom-user-api-quota get_block_ids=9000/s --p2prpc-custom-user-api-quota get_blocks=9000/s --jsonrpc-default-global-api-quota 9000/s --jsonrpc-custom-user-api-quota chain.get_headers_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.get_header_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.info=9000/s --jsonrpc-custom-user-api-quota chain.get_block_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.get_block_ids=9000/s --jsonrpc-custom-user-api-quota chain.get_blocks=9000/s --min-peers-to-propagate 512 --max-peers-to-propagate 1024 --max-outgoing-peers 512 --max-incoming-peers 512 --discover-local=true $node_key_flag; ret=$?; if [ $ret -ne 0 ]; then echo "Node start fail, try to remove config and data."; @@ -68,18 +68,6 @@ spec: timeoutSeconds: 2 failureThreshold: 3 successThreshold: 1 - readinessProbe: - exec: - command: - - sh - - -c - - >- - /starcoin/starcoin -n halley -d /sc-data node sync status|grep Synchronized - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 2 - failureThreshold: 3 - successThreshold: 1 volumeClaimTemplates: - metadata: name: starcoin-volume diff --git a/kube/manifest/starcoin-proxima.yaml b/kube/manifest/starcoin-proxima.yaml index 71294b8665..dd8476ae67 100644 --- a/kube/manifest/starcoin-proxima.yaml +++ b/kube/manifest/starcoin-proxima.yaml @@ -23,13 +23,13 @@ spec: starcoin/node-pool: seed-pool containers: - name: starcoin - image: ghcr.io/starcoinorg/starcoin:dag-master + image: ghcr.io/starcoinorg/starcoin:pruning-point imagePullPolicy: Always command: - bash - -c args: - - rm -rf /sc-data/proxima/ /sc-data/proxima/starcoindb/db/starcoindb/LOCK; + - rm -rf /sc-data/proxima/starcoindb/db/starcoindb/LOCK /sc-data/proxima/genesis_config.json; id=$(echo -e $POD_NAME|awk -F'-' '{print $2}') && IFS='; ' read -r -a node_keys <<< $NODE_KEYS && node_key=${node_keys[$id]}; if [ ! -z $node_key ]; then @@ -70,18 +70,6 @@ spec: timeoutSeconds: 2 failureThreshold: 3 successThreshold: 1 - readinessProbe: - exec: - command: - - sh - - -c - - >- - /starcoin/starcoin -n proxima -d /sc-data node sync status|grep Synchronized - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 2 - failureThreshold: 3 - successThreshold: 1 volumeClaimTemplates: - metadata: name: starcoin-volume diff --git a/miner/src/create_block_template/mod.rs b/miner/src/create_block_template/mod.rs index 7fd06ac88d..3bae182df7 100644 --- a/miner/src/create_block_template/mod.rs +++ b/miner/src/create_block_template/mod.rs @@ -113,6 +113,7 @@ impl ServiceHandler for BlockBuilderService { .net() .genesis_config() .block_header_version; + self.inner.create_block_template(header_version) } } @@ -188,7 +189,7 @@ where next_difficulty: difficulty, now_milliseconds: mut now_millis, pruning_point, - } = *block_on(self.block_connector_service.send(MinerRequest {}))??; + } = *block_on(self.block_connector_service.send(MinerRequest { version }))??; let block_gas_limit = self .local_block_gas_limit diff --git a/miner/tests/miner_test.rs b/miner/tests/miner_test.rs index f8996def5f..60042ef95b 100644 --- a/miner/tests/miner_test.rs +++ b/miner/tests/miner_test.rs @@ -30,6 +30,7 @@ async fn test_miner_service() { registry.put_shared(dag).await.unwrap(); let genesis_hash = genesis.block().id(); + registry.put_shared(genesis).await.unwrap(); let chain_header = storage .get_block_header_by_hash(genesis_hash) .unwrap() diff --git a/network/src/network_p2p_handle.rs b/network/src/network_p2p_handle.rs index 8a61623fc0..95dc994e0d 100644 --- a/network/src/network_p2p_handle.rs +++ b/network/src/network_p2p_handle.rs @@ -97,7 +97,7 @@ impl BusinessLayerHandle for Networkp2pHandle { match Status::decode(&received_handshake[..]) { Result::Ok(status) => self.inner_handshake(peer_id, status), Err(err) => { - error!(target: "network-p2p", "Couldn't decode handshake packet sent by {}: {:?}: {}", peer_id, hex::encode(received_handshake), err); + error!(target: "network-p2p", "Couldn't decode handshake packet sent by {}, err: {}", peer_id, err); Err(rep::BAD_MESSAGE) } } diff --git a/node/src/node.rs b/node/src/node.rs index e9c2891323..a59468e1e5 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -321,6 +321,8 @@ impl NodeService { let dag = starcoin_dag::blockdag::BlockDAG::new( KType::try_from(G_BASE_MAX_UNCLES_PER_BLOCK)?, dag_storage.clone(), + config.base().net().genesis_config().pruning_depth, + config.base().net().genesis_config().pruning_finality, ); registry.put_shared(dag.clone()).await?; let (chain_info, genesis) = Genesis::init_and_check_storage( @@ -336,10 +338,7 @@ impl NodeService { upgrade_time.as_secs() ); - dag.check_upgrade( - chain_info.status().info().block_accumulator_info.clone(), - storage.clone(), - )?; + dag.check_upgrade(chain_info.status().head(), genesis.block().id())?; registry.put_shared(genesis).await?; diff --git a/rpc/api/generated_rpc_schema/chain.json b/rpc/api/generated_rpc_schema/chain.json index 3f6243e7c9..e79576d8c2 100644 --- a/rpc/api/generated_rpc_schema/chain.json +++ b/rpc/api/generated_rpc_schema/chain.json @@ -4173,6 +4173,57 @@ } } } + }, + { + "name": "chain.is_ancestor_of", + "params": [ + { + "name": "ancestor", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "HashValue", + "type": "string", + "format": "HashValue" + } + }, + { + "name": "descendants", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Array_of_HashValue", + "type": "array", + "items": { + "type": "string", + "format": "HashValue" + } + } + } + ], + "result": { + "name": "starcoin_dag :: consensusdb :: consenses_state :: ReachabilityView", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ReachabilityView", + "type": "object", + "required": [ + "ancestor", + "descendants" + ], + "properties": { + "ancestor": { + "type": "string", + "format": "HashValue" + }, + "descendants": { + "type": "array", + "items": { + "type": "string", + "format": "HashValue" + } + } + } + } + } } ] } \ No newline at end of file diff --git a/rpc/api/src/chain/mod.rs b/rpc/api/src/chain/mod.rs index ea4a80afd3..ae95e33f90 100644 --- a/rpc/api/src/chain/mod.rs +++ b/rpc/api/src/chain/mod.rs @@ -132,6 +132,14 @@ pub trait ChainApi { /// Get block ghostdag data #[rpc(name = "chain.get_ghostdagdata")] fn get_ghostdagdata(&self, block_hash: HashValue) -> FutureResult>; + + /// Check the ancestor and descendants' relationship + #[rpc(name = "chain.is_ancestor_of")] + fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> FutureResult; } #[derive(Copy, Clone, Default, Serialize, Deserialize, JsonSchema)] diff --git a/rpc/client/src/lib.rs b/rpc/client/src/lib.rs index 569ff82333..d8fbc5ad47 100644 --- a/rpc/client/src/lib.rs +++ b/rpc/client/src/lib.rs @@ -21,7 +21,7 @@ use serde_json::Value; use starcoin_abi_types::{FunctionABI, ModuleABI, StructInstantiation}; use starcoin_account_api::AccountInfo; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::consenses_state::DagStateView; +use starcoin_dag::consensusdb::consenses_state::{DagStateView, ReachabilityView}; use starcoin_logger::{prelude::*, LogPattern}; use starcoin_rpc_api::chain::{ GetBlockOption, GetBlocksOption, GetEventOption, GetTransactionOption, @@ -790,6 +790,15 @@ impl RpcClient { .map_err(map_err) } + pub fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> anyhow::Result { + self.call_rpc_blocking(|inner| inner.chain_client.is_ancestor_of(ancestor, descendants)) + .map_err(map_err) + } + pub fn chain_get_blocks_by_number( &self, number: Option, diff --git a/rpc/server/src/module/chain_rpc.rs b/rpc/server/src/module/chain_rpc.rs index dedee3b0e6..ea78017804 100644 --- a/rpc/server/src/module/chain_rpc.rs +++ b/rpc/server/src/module/chain_rpc.rs @@ -485,6 +485,18 @@ where let fut = async move { service.get_ghostdagdata(block_hash).await }.map_err(map_err); Box::pin(fut.boxed()) } + + #[doc = " Check the ancestor and descendants\' relationship "] + fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> FutureResult { + let service = self.service.clone(); + let fut = + async move { service.is_ancestor_of(ancestor, descendants).await }.map_err(map_err); + Box::pin(fut.boxed()) + } } fn try_decode_block_txns(state: &dyn StateView, block: &mut BlockView) -> anyhow::Result<()> { diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 8617bdd39a..1e4d554ca6 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -14,6 +14,7 @@ use crate::sync::{CheckSyncEvent, SyncService}; use crate::tasks::{BlockConnectedEvent, BlockConnectedFinishEvent, BlockDiskCheckEvent}; use anyhow::{bail, format_err, Ok, Result}; use network_api::PeerProvider; +use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; use starcoin_consensus::Consensus; @@ -21,6 +22,7 @@ use starcoin_crypto::HashValue; use starcoin_dag::blockdag::BlockDAG; use starcoin_dag::blockdag::MineNewDagBlockInfo; use starcoin_executor::VMMetrics; +use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; use starcoin_service_registry::{ @@ -378,30 +380,65 @@ where _msg: MinerRequest, ctx: &mut ServiceContext, ) -> ::Response { - let main = self.chain_service.get_main(); + let main_header = self.chain_service.get_main().status().head().clone(); let dag = self.chain_service.get_dag(); - let epoch = main.epoch().clone(); - let strategy = epoch.strategy(); - let on_chain_block_gas_limit = epoch.block_gas_limit(); - let (pruning_depth, pruning_finality) = ctx - .get_shared::>()? - .base() - .net() - .pruning_config(); + let MineNewDagBlockInfo { tips, blue_blocks, pruning_point, - } = dag.calc_mergeset_and_tips(pruning_depth, pruning_finality)?; + } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { + let (previous_ghostdata, pruning_point) = if main_header.pruning_point() + == HashValue::zero() + { + let genesis = ctx.get_shared::()?; + ( + self.chain_service + .get_dag() + .ghostdata_by_hash(genesis.block().id())? + .ok_or_else(|| format_err!("The ghostdata of Genesis block header dose not exist., genesis id: {:?}", genesis.block().id()))?, + genesis.block().id(), + ) + } else { + ( + self.chain_service + .get_dag() + .ghostdata_by_hash(main_header.pruning_point())? + .ok_or_else(|| format_err!("The ghostdata of the pruning point does not exist. pruning point id: {:?}", main_header.pruning_point()))?, + main_header.pruning_point(), + ) + }; + dag.calc_mergeset_and_tips(pruning_point, previous_ghostdata.as_ref())? + } else { + let genesis = ctx.get_shared::()?; + let tips = dag.get_dag_state(genesis.block().id())?.tips; + MineNewDagBlockInfo { + tips: tips.clone(), + blue_blocks: dag.ghostdata(&tips)?.mergeset_blues.as_ref().clone(), + pruning_point: HashValue::zero(), + } + }; + if blue_blocks.is_empty() { bail!("failed to get the blue blocks from the DAG"); } - let selected_parent = blue_blocks.first().expect("the blue block must exist"); + let selected_parent = *blue_blocks + .first() + .ok_or_else(|| format_err!("the blue blocks must be not be 0!"))?; + + let time_service = self.config.net().time_service(); + let storage = ctx.get_shared::>()?; + let vm_metrics = ctx.get_shared_opt::()?; + let main = BlockChain::new(time_service, selected_parent, storage, vm_metrics, dag)?; + + let epoch = main.epoch().clone(); + let strategy = epoch.strategy(); + let on_chain_block_gas_limit = epoch.block_gas_limit(); let previous_header = main .get_storage() - .get_block_header_by_hash(*selected_parent)? + .get_block_header_by_hash(selected_parent)? .ok_or_else(|| format_err!("BlockHeader should exist by hash: {}", selected_parent))?; - let next_difficulty = epoch.strategy().calculate_next_difficulty(main)?; + let next_difficulty = epoch.strategy().calculate_next_difficulty(&main)?; let now_milliseconds = main.time_service().now_millis(); Ok(Box::new(MinerResponse { diff --git a/sync/src/block_connector/mod.rs b/sync/src/block_connector/mod.rs index a1f97a65dc..32f45f26fc 100644 --- a/sync/src/block_connector/mod.rs +++ b/sync/src/block_connector/mod.rs @@ -3,7 +3,7 @@ use starcoin_crypto::HashValue; use starcoin_service_registry::ServiceRequest; -use starcoin_types::block::{Block, ExecutedBlock}; +use starcoin_types::block::{Block, ExecutedBlock, Version}; mod block_connector_service; mod metrics; @@ -48,7 +48,9 @@ impl ServiceRequest for ExecuteRequest { } #[derive(Clone, Debug)] -pub struct MinerRequest {} +pub struct MinerRequest { + pub version: Version, +} #[derive(Clone, Debug)] pub struct MinerResponse { diff --git a/sync/src/block_connector/test_write_dag_block_chain.rs b/sync/src/block_connector/test_write_dag_block_chain.rs index bdc0992aa5..2d93d21eed 100644 --- a/sync/src/block_connector/test_write_dag_block_chain.rs +++ b/sync/src/block_connector/test_write_dag_block_chain.rs @@ -3,7 +3,7 @@ #![allow(clippy::arithmetic_side_effects)] use crate::block_connector::test_write_block_chain::create_writeable_dag_block_chain; use crate::block_connector::WriteBlockChainService; -use anyhow::{bail, Ok}; +use anyhow::{bail, format_err, Ok}; use starcoin_account_api::AccountInfo; use starcoin_chain::{BlockChain, ChainReader}; use starcoin_chain_service::WriteableChainService; @@ -50,7 +50,19 @@ pub fn new_dag_block( let miner_address = *miner.address(); let block_chain = writeable_block_chain_service.get_main(); - let tips = block_chain.current_tips_hash().expect("failed to get tips"); + let tips = if block_chain.status().head().pruning_point() == HashValue::zero() { + let genesis_id = block_chain + .get_storage() + .get_genesis()? + .ok_or_else(|| format_err!("Genesis block is none"))?; + block_chain + .current_tips_hash(genesis_id) + .expect("failed to get tips") + } else { + block_chain + .current_tips_hash(block_chain.status().head().pruning_point()) + .expect("failed to get tips") + }; let (block_template, _) = block_chain .create_block_template( miner_address, diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index 36817a9eb3..1802b713de 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -580,23 +580,16 @@ where fn connect_inner(&mut self, block: Block) -> Result { let block_id = block.id(); - if block_id == *starcoin_storage::BARNARD_HARD_FORK_HASH - && block.header().number() == starcoin_storage::BARNARD_HARD_FORK_HEIGHT - { - debug!("barnard hard fork {}", block_id); - return Err(ConnectBlockError::BarnardHardFork(Box::new(block)).into()); - } if self.main.current_header().id() == block_id { debug!("Repeat connect, current header is {} already.", block_id); return Ok(ConnectOk::Duplicate); } - if self.main.check_chain_type()? == ChainType::Dag - && !block - .header() - .parents_hash() - .iter() - .all(|parent_hash| self.main.dag().has_dag_block(*parent_hash).unwrap_or(false)) + if !block + .header() + .parents_hash() + .iter() + .all(|parent_hash| self.main.dag().has_dag_block(*parent_hash).unwrap_or(false)) { debug!( "block: {:?} is a future dag block, trigger sync to pull other dag blocks", @@ -615,7 +608,7 @@ where } let (block_info, fork) = self.find_or_fork(block.header())?; match (block_info, fork) { - //block has been processed in some branch, so just trigger a head selection. + // block has been processed in some branch, so just trigger a head selection. (Some(_block_info), Some(branch)) => { debug!( "Block {} has been processed, trigger head selection, total_difficulty: {}", @@ -625,7 +618,7 @@ where self.select_head(branch)?; Ok(ConnectOk::Duplicate) } - //block has been processed, and its parent is main chain, so just connect it to main chain. + // block has been processed, and its parent is main chain, so just connect it to main chain. (Some(block_info), None) => { let executed_block = self.main.connect(ExecutedBlock { block: block.clone(), @@ -638,6 +631,7 @@ where self.do_new_head(executed_block, 1, vec![block], 0, vec![])?; Ok(ConnectOk::Connect) } + // the block is not processed but its parent branch exists (None, Some(mut branch)) => { let _executed_block = branch.apply(block)?; self.select_head(branch)?; diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index 5279dec192..eb6014504a 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -80,6 +80,12 @@ impl DagBlockExecutor { }; let header = block.header().clone(); + info!( + "sync parallel worker {:p} received block: {:?}", + &self, + block.header().id() + ); + loop { match Self::waiting_for_parents( &self.dag, diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index f4a5f71cbf..4d30510b01 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -96,6 +96,23 @@ impl<'a> DagBlockSender<'a> { } } + for executor in &mut self.executors { + match &executor.state { + ExecuteState::Executed(_) => { + executor.state = ExecuteState::Executing(block.id()); + executor + .sender_to_executor + .send(Some(block.clone())) + .await?; + return anyhow::Ok(true); + } + + ExecuteState::Executing(_) | ExecuteState::Error(_) | ExecuteState::Closed => { + continue; + } + } + } + anyhow::Ok(false) } @@ -134,13 +151,11 @@ impl<'a> DagBlockSender<'a> { }); sender_to_worker.send(Some(block)).await?; - self.flush_executor_state().await?; } self.wait_for_finish().await?; sync_dag_store.delete_all_dag_sync_block()?; - Ok(()) } diff --git a/sync/src/store/sync_dag_store.rs b/sync/src/store/sync_dag_store.rs index 8d3dca1492..4216240f34 100644 --- a/sync/src/store/sync_dag_store.rs +++ b/sync/src/store/sync_dag_store.rs @@ -111,7 +111,6 @@ impl SyncDagStore { block: Some(block.clone()), }]) .map_err(|e| format_err!("Failed to save absent block: {:?}", e))?; - Ok(()) } _ => Err(format_err!( diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 4c27cecf9d..b218b70d2f 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -32,6 +32,8 @@ use stream_task::{CollectorState, TaskError, TaskResultCollector, TaskState}; use super::continue_execute_absent_block::ContinueChainOperator; use super::{BlockConnectAction, BlockConnectedFinishEvent}; +const ASYNC_BLOCK_COUNT: u64 = 1000; + enum ParallelSign { NeedMoreBlocks, Continue, @@ -421,13 +423,15 @@ where Ok(()) } - async fn find_absent_ancestor(&self, mut block_headers: Vec) -> Result<()> { + async fn find_absent_ancestor(&self, mut block_headers: Vec) -> Result { + let mut count: u64 = 0; loop { let mut absent_blocks = vec![]; self.find_absent_parent_dag_blocks_for_blocks(block_headers, &mut absent_blocks)?; if absent_blocks.is_empty() { - return Ok(()); + return Ok(count); } + count = count.saturating_add(absent_blocks.len() as u64); block_headers = self.fetch_blocks(absent_blocks).await?; } } @@ -449,10 +453,15 @@ where block_header.parents_hash() ); let fut = async { - self.find_absent_ancestor(vec![block_header.clone()]) + let count = self + .find_absent_ancestor(vec![block_header.clone()]) .await?; - if block_header.number() % 10000 == 0 + if count == 0 { + return anyhow::Ok(ParallelSign::Continue); + } + + if block_header.number() % ASYNC_BLOCK_COUNT == 0 || block_header.number() >= self.target.target_id.number() { let parallel_execute = DagBlockSender::new( diff --git a/sync/src/tasks/test_tools.rs b/sync/src/tasks/test_tools.rs index 6f015367f4..9dcf086764 100644 --- a/sync/src/tasks/test_tools.rs +++ b/sync/src/tasks/test_tools.rs @@ -13,7 +13,6 @@ use starcoin_account_api::AccountInfo; use starcoin_chain_api::ChainReader; use starcoin_chain_service::ChainReaderService; use starcoin_config::{BuiltinNetworkID, ChainNetwork, NodeConfig, RocksdbConfig}; -use starcoin_dag::blockdag::DEFAULT_GHOSTDAG_K; use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; @@ -59,7 +58,7 @@ impl SyncTestSystem { FlexiDagStorageConfig::new(), ) .expect("init dag storage fail."); - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); // local dag + let dag = starcoin_dag::blockdag::BlockDAG::create_blockdag(dag_storage); // local dag let chain_info = genesis.execute_genesis_block(config.net(), storage.clone(), dag.clone())?; diff --git a/sync/src/tasks/tests_dag.rs b/sync/src/tasks/tests_dag.rs index b2248cc137..f4fb074658 100644 --- a/sync/src/tasks/tests_dag.rs +++ b/sync/src/tasks/tests_dag.rs @@ -144,7 +144,6 @@ async fn test_continue_sync_dag_blocks() -> Result<()> { .unwrap() .produce_fork_chain(one_fork_count, two_fork_count)?; - ///// let target_dag_genesis_header_id = target_node .chain() .get_storage() diff --git a/vm/starcoin-transactional-test-harness/src/fork_chain.rs b/vm/starcoin-transactional-test-harness/src/fork_chain.rs index bebaaa1d0d..bcb118ca1b 100644 --- a/vm/starcoin-transactional-test-harness/src/fork_chain.rs +++ b/vm/starcoin-transactional-test-harness/src/fork_chain.rs @@ -505,6 +505,14 @@ impl ChainApi for MockChainApi { fn get_ghostdagdata(&self, _block_hash: HashValue) -> FutureResult> { unimplemented!() } + + fn is_ancestor_of( + &self, + _ancestor: HashValue, + _descendants: Vec, + ) -> FutureResult { + unimplemented!() + } } fn try_decode_block_txns(state: &dyn StateView, block: &mut BlockView) -> anyhow::Result<()> {